new update basic.py which add class L1Regularizer(Cell). new add test_l1_regularizer.py. new add test_l1_regularizer.py

pull/9386/head
“dangjiaqi1” 4 years ago
parent ee50f5b6f6
commit 39948670dd

@ -14,9 +14,10 @@
# ============================================================================
"""basic"""
import math
import numpy as np
import mindspore.common.dtype as mstype
from mindspore.ops.composite.multitype_ops import _constexpr_utils as const_utils
from mindspore.common.seed import _get_graph_seed
from mindspore.common.tensor import Tensor
from mindspore.common.initializer import initializer
@ -33,10 +34,59 @@ from mindspore import context
from ..cell import Cell
from .activation import get_activation
__all__ = ['Dropout', 'Flatten', 'Dense', 'ClipByNorm', 'Norm', 'OneHot', 'Pad', 'Unfold',
'Tril', 'Triu', 'Interpolate', 'MatrixDiag', 'MatrixDiagPart', 'MatrixSetDiag']
'Tril', 'Triu', 'Interpolate', 'MatrixDiag', 'MatrixDiagPart', 'MatrixSetDiag', 'L1Regularizer']
class L1Regularizer(Cell):
"""
Apply l1 regularization to weights
l1 regularization makes weights sparsity
Note:
scale(regularization factor) should be a number which greater than 0
Args:
scale (int, float) l1 regularization factor which greater than 0.
Raises:
ValueError: If `scale(regularization factor)` is not greater than 0.
If `scale(regularization factor)` is math.inf or math.nan.
Inputs:
- **weights** (Tensor) - The input tensor
Outputs:
Tensor, which dtype is Float and shape is ()
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> scale = 0.5
>>> net = nn.L1Regularizer(scale)
>>> weights = Tensor(np.array([[1.0, -2.0], [-3.0, 4.0]]).astype(np.float32))
>>> output = net(weights)
>>> print(output.asnumpy())
5.0
"""
def __init__(self, scale):
super(L1Regularizer, self).__init__()
Validator.check_value_type("scale", scale, [int, float], self.cls_name)
if scale <= 0:
raise ValueError("scale should be a number which greater than 0")
if math.isinf(scale) or math.isnan(scale):
raise ValueError("scale is INF or NAN")
self.abs = P.Abs()
self.reduce_sum = P.ReduceSum()
self.scale = Tensor(scale, dtype=mstype.float32)
def construct(self, weights):
const_utils.check_valid_type(weights.dtype, mstype.number_type, 'weights')
l1_regularization = self.scale * self.reduce_sum(self.abs(weights))
return l1_regularization
class Dropout(Cell):
r"""

@ -0,0 +1,60 @@
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
""" Test L1Regularizer """
import numpy as np
import pytest
import mindspore.nn as nn
import mindspore.context as context
from mindspore import Tensor, ms_function
context.set_context(mode=context.GRAPH_MODE, save_graphs=True)
class Net_l1_regularizer(nn.Cell):
def __init__(self, scale):
super(Net_l1_regularizer, self).__init__()
self.l1_regularizer = nn.L1Regularizer(scale)
@ms_function
def construct(self, weights):
return self.l1_regularizer(weights)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_l1_regularizer01():
scale = 0.5
weights = Tensor(np.array([[1.0, -2.0], [-3.0, 4.0]]).astype(np.float32))
l1_regularizer = Net_l1_regularizer(scale)
output = l1_regularizer(weights)
print("After l1_regularizer01 is: ", output.asnumpy())
print("output.shape: ", output.shape)
print("output.dtype: ", output.dtype)
expect = 5.0
assert np.all(output.asnumpy() == expect)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_l1_regularizer08():
scale = 0.5
net = nn.L1Regularizer(scale)
weights = Tensor(np.array([[1.0, -2.0], [-3.0, 4.0]]).astype(np.float32))
output = net(weights)
expect = 5.0
print("output : ", output.asnumpy())
assert np.all(output.asnumpy() == expect)

@ -0,0 +1,101 @@
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
""" Test L1Regularizer """
import math
import numpy as np
import mindspore.nn as nn
import mindspore.context as context
from mindspore import Tensor, ms_function
context.set_context(mode=context.GRAPH_MODE, save_graphs=True)
class Net_l1_regularizer(nn.Cell):
def __init__(self, scale):
super(Net_l1_regularizer, self).__init__()
self.l1_regularizer = nn.L1Regularizer(scale)
@ms_function
def construct(self, weights):
return self.l1_regularizer(weights)
def test_l1_regularizer02():
scale = 0.0
weights = Tensor(np.array([[1.0, -2.0], [-3.0, 4.0]]).astype(np.float32))
try:
l1_regularizer = Net_l1_regularizer(scale)
l1_regularizer(weights)
except ValueError:
assert True
def test_l1_regularizer03():
scale = -0.5
weights = Tensor(np.array([[1.0, -2.0], [-3.0, 4.0]]).astype(np.float32))
try:
l1_regularizer = Net_l1_regularizer(scale)
l1_regularizer(weights)
except ValueError:
assert True
def test_l1_regularizer04():
scale = math.inf
weights = Tensor(np.array([[1.0, -2.0], [-3.0, 4.0]]).astype(np.float32))
try:
l1_regularizer = Net_l1_regularizer(scale)
l1_regularizer(weights)
except ValueError:
assert True
def test_l1_regularizer05():
scale = math.nan
weights = Tensor(np.array([[1.0, -2.0], [-3.0, 4.0]]).astype(np.float32))
try:
l1_regularizer = Net_l1_regularizer(scale)
l1_regularizer(weights)
except ValueError:
assert True
def test_l1_regularizer06():
scale = 0.5
weights = "sss"
try:
l1_regularizer = Net_l1_regularizer(scale)
l1_regularizer(weights)
except TypeError:
assert True
def test_l1_regularizer07():
scale = 0.5
try:
l1_regularizer = Net_l1_regularizer(scale)
l1_regularizer()
except TypeError:
assert True
def test_l1_regularizer09():
scale = 0.5
weights = Tensor([[False, False], [False, False]])
try:
net = nn.L1Regularizer(scale)
net(weights)
except TypeError:
assert True
Loading…
Cancel
Save