!4297 sync code incubator to master
Merge pull request !4297 from guozhijian/code_sync_incubator_f3c32baf_to_master_fcfc75a3_0811pull/4297/MERGE
commit
4f75adb11a
@ -0,0 +1,46 @@
|
||||
# Copyright 2020 Huawei Technologies Co., Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ============================================================================
|
||||
|
||||
"""FusedSparseAdam op"""
|
||||
from mindspore.ops.op_info_register import op_info_register, AiCPURegOp, DataType
|
||||
|
||||
fused_sparse_adam_op_info = AiCPURegOp("FusedSparseAdam") \
|
||||
.fusion_type("OPAQUE") \
|
||||
.attr("use_locking", "bool") \
|
||||
.attr("use_nesterov", "bool") \
|
||||
.input(0, "var", "required") \
|
||||
.input(1, "m", "required") \
|
||||
.input(2, "v", "required") \
|
||||
.input(3, "beta1_power", "required") \
|
||||
.input(4, "beta2_power", "required") \
|
||||
.input(5, "lr", "required") \
|
||||
.input(6, "beta1", "required") \
|
||||
.input(7, "beta2", "required") \
|
||||
.input(8, "epsilon", "required") \
|
||||
.input(9, "grad", "required") \
|
||||
.input(10, "indices", "required") \
|
||||
.output(0, "var", "required") \
|
||||
.output(1, "m", "required") \
|
||||
.output(2, "v", "required") \
|
||||
.dtype_format(DataType.F32_Default, DataType.F32_Default, DataType.F32_Default, DataType.F32_Default,
|
||||
DataType.F32_Default, DataType.F32_Default, DataType.F32_Default, DataType.F32_Default,
|
||||
DataType.F32_Default, DataType.F32_Default, DataType.I32_Default, DataType.F32_Default,
|
||||
DataType.F32_Default, DataType.F32_Default) \
|
||||
.get_op_info()
|
||||
|
||||
@op_info_register(fused_sparse_adam_op_info)
|
||||
def _fused_sparse_adam_aicpu():
|
||||
"""FusedSparseAdam aicpu register"""
|
||||
return
|
@ -0,0 +1,41 @@
|
||||
# Copyright 2020 Huawei Technologies Co., Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ============================================================================
|
||||
|
||||
"""FusedSparseFtrl op"""
|
||||
from mindspore.ops.op_info_register import op_info_register, AiCPURegOp, DataType
|
||||
|
||||
fused_sparse_ftrl_op_info = AiCPURegOp("FusedSparseFtrl") \
|
||||
.fusion_type("OPAQUE") \
|
||||
.attr("lr", "float") \
|
||||
.attr("l1", "float") \
|
||||
.attr("l2", "float") \
|
||||
.attr("lr_power", "float") \
|
||||
.attr("use_locking", "bool") \
|
||||
.input(0, "var", "required") \
|
||||
.input(1, "accum", "required") \
|
||||
.input(2, "linear", "required") \
|
||||
.input(3, "grad", "required") \
|
||||
.input(4, "indices", "required") \
|
||||
.output(0, "var", "required") \
|
||||
.output(1, "accum", "required") \
|
||||
.output(2, "linear", "required") \
|
||||
.dtype_format(DataType.F32_Default, DataType.F32_Default, DataType.F32_Default, DataType.F32_Default,
|
||||
DataType.I32_Default, DataType.F32_Default, DataType.F32_Default, DataType.F32_Default) \
|
||||
.get_op_info()
|
||||
|
||||
@op_info_register(fused_sparse_ftrl_op_info)
|
||||
def _fused_sparse_ftrl_aicpu():
|
||||
"""FusedSparseFtrl aicpu register"""
|
||||
return
|
@ -0,0 +1,46 @@
|
||||
# Copyright 2020 Huawei Technologies Co., Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ============================================================================
|
||||
|
||||
"""FusedSparseLazyAdam op"""
|
||||
from mindspore.ops.op_info_register import op_info_register, AiCPURegOp, DataType
|
||||
|
||||
fused_sparse_lazy_adam_op_info = AiCPURegOp("FusedSparseLazyAdam") \
|
||||
.fusion_type("OPAQUE") \
|
||||
.attr("use_locking", "bool") \
|
||||
.attr("use_nesterov", "bool") \
|
||||
.input(0, "var", "required") \
|
||||
.input(1, "m", "required") \
|
||||
.input(2, "v", "required") \
|
||||
.input(3, "beta1_power", "required") \
|
||||
.input(4, "beta2_power", "required") \
|
||||
.input(5, "lr", "required") \
|
||||
.input(6, "beta1", "required") \
|
||||
.input(7, "beta2", "required") \
|
||||
.input(8, "epsilon", "required") \
|
||||
.input(9, "grad", "required") \
|
||||
.input(10, "indices", "required") \
|
||||
.output(0, "var", "required") \
|
||||
.output(1, "m", "required") \
|
||||
.output(2, "v", "required") \
|
||||
.dtype_format(DataType.F32_Default, DataType.F32_Default, DataType.F32_Default, DataType.F32_Default,
|
||||
DataType.F32_Default, DataType.F32_Default, DataType.F32_Default, DataType.F32_Default,
|
||||
DataType.F32_Default, DataType.F32_Default, DataType.I32_Default, DataType.F32_Default,
|
||||
DataType.F32_Default, DataType.F32_Default) \
|
||||
.get_op_info()
|
||||
|
||||
@op_info_register(fused_sparse_lazy_adam_op_info)
|
||||
def _fused_sparse_lazy_adam_aicpu():
|
||||
"""FusedSparseLazyAdam aicpu register"""
|
||||
return
|
@ -0,0 +1,39 @@
|
||||
# Copyright 2020 Huawei Technologies Co., Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ============================================================================
|
||||
|
||||
"""FusedSparseProximalAdagrad op"""
|
||||
from mindspore.ops.op_info_register import op_info_register, AiCPURegOp, DataType
|
||||
|
||||
fused_sparse_proximal_adagrad_op_info = AiCPURegOp("FusedSparseProximalAdagrad") \
|
||||
.fusion_type("OPAQUE") \
|
||||
.attr("use_locking", "bool") \
|
||||
.input(0, "var", "required") \
|
||||
.input(1, "accum", "required") \
|
||||
.input(2, "lr", "required") \
|
||||
.input(3, "l1", "required") \
|
||||
.input(4, "l2", "required") \
|
||||
.input(5, "grad", "required") \
|
||||
.input(6, "indices", "required") \
|
||||
.output(0, "var", "required") \
|
||||
.output(1, "accum", "required") \
|
||||
.dtype_format(DataType.F32_Default, DataType.F32_Default, DataType.F32_Default, DataType.F32_Default,
|
||||
DataType.F32_Default, DataType.F32_Default, DataType.I32_Default, DataType.F32_Default,
|
||||
DataType.F32_Default) \
|
||||
.get_op_info()
|
||||
|
||||
@op_info_register(fused_sparse_proximal_adagrad_op_info)
|
||||
def _fused_sparse_proximal_adagrad_aicpu():
|
||||
"""FusedSparseProximalAdagrad aicpu register"""
|
||||
return
|
@ -0,0 +1,41 @@
|
||||
# Copyright 2020 Huawei Technologies Co., Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ============================================================================
|
||||
|
||||
"""Padding op"""
|
||||
from mindspore.ops.op_info_register import op_info_register, AiCPURegOp, DataType
|
||||
|
||||
padding_op_info = AiCPURegOp("Padding") \
|
||||
.fusion_type("OPAQUE") \
|
||||
.input(0, "x", "required") \
|
||||
.output(0, "y", "required") \
|
||||
.attr("pad_dim_size", "int") \
|
||||
.dtype_format(DataType.I8_Default, DataType.I8_Default) \
|
||||
.dtype_format(DataType.I16_Default, DataType.I16_Default) \
|
||||
.dtype_format(DataType.I32_Default, DataType.I32_Default) \
|
||||
.dtype_format(DataType.I64_Default, DataType.I64_Default) \
|
||||
.dtype_format(DataType.U8_Default, DataType.U8_Default) \
|
||||
.dtype_format(DataType.U16_Default, DataType.U16_Default) \
|
||||
.dtype_format(DataType.U32_Default, DataType.U32_Default) \
|
||||
.dtype_format(DataType.U64_Default, DataType.U64_Default) \
|
||||
.dtype_format(DataType.F16_Default, DataType.F16_Default) \
|
||||
.dtype_format(DataType.F32_Default, DataType.F32_Default) \
|
||||
.dtype_format(DataType.F64_Default, DataType.F64_Default) \
|
||||
.dtype_format(DataType.BOOL_Default, DataType.BOOL_Default) \
|
||||
.get_op_info()
|
||||
|
||||
@op_info_register(padding_op_info)
|
||||
def _padding_aicpu():
|
||||
"""Padding AiCPU register"""
|
||||
return
|
@ -0,0 +1,53 @@
|
||||
# Copyright 2020 Huawei Technologies Co., Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ============================================================================
|
||||
import numpy as np
|
||||
import mindspore.nn as nn
|
||||
import mindspore.common.dtype as mstype
|
||||
import mindspore.context as context
|
||||
from mindspore import Tensor
|
||||
from mindspore.ops import operations as P
|
||||
from mindspore.common.parameter import Parameter
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
|
||||
|
||||
beta1_power = 0.9
|
||||
beta2_power = 0.999
|
||||
lr = 0.001
|
||||
beta1 = 0.9
|
||||
beta2 = 0.999
|
||||
epsilon = 1e-8
|
||||
|
||||
class Net(nn.Cell):
|
||||
def __init__(self):
|
||||
super(Net, self).__init__()
|
||||
self.fused_sparse_adam = P.FusedSparseAdam()
|
||||
self.var = Parameter(Tensor(np.ones([3, 1, 2]).astype(np.float32)), name="var")
|
||||
self.m = Parameter(Tensor(np.ones([3, 1, 2]).astype(np.float32)), name="m")
|
||||
self.v = Parameter(Tensor(np.ones([3, 1, 2]).astype(np.float32)), name="v")
|
||||
|
||||
def construct(self, grad, indices):
|
||||
return self.fused_sparse_adam(self.var, self.m, self.v, beta1_power, beta2_power, lr, beta1, beta2, epsilon,
|
||||
grad, indices)
|
||||
|
||||
def test_net():
|
||||
gradient = Tensor(np.array([0.22948648, 0.14569908, 0.92861906, 0.66870148])
|
||||
.reshape([2, 1, 2]).astype(np.float32))
|
||||
indices = Tensor([0, 1], mstype.int32)
|
||||
net = Net()
|
||||
output = net(gradient, indices)
|
||||
print(output)
|
||||
print(net.var.default_input)
|
||||
print(net.m.default_input)
|
||||
print(net.v.default_input)
|
@ -0,0 +1,50 @@
|
||||
# Copyright 2020 Huawei Technologies Co., Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ============================================================================
|
||||
import numpy as np
|
||||
import mindspore.common.dtype as mstype
|
||||
import mindspore.nn as nn
|
||||
import mindspore.context as context
|
||||
from mindspore import Tensor
|
||||
from mindspore.ops import operations as P
|
||||
from mindspore.common.parameter import Parameter
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
|
||||
|
||||
lr = 0.01
|
||||
l1 = 0.0
|
||||
l2 = 0.0
|
||||
lr_power = -0.5
|
||||
|
||||
class Net(nn.Cell):
|
||||
def __init__(self):
|
||||
super(Net, self).__init__()
|
||||
self.fused_sparse_ftrl = P.FusedSparseFtrl(lr=0.1, l1=0.0, l2=0.0, lr_power=-0.5)
|
||||
self.var = Parameter(Tensor(np.ones([3, 3]).astype(np.float32)), name="var")
|
||||
self.accum = Parameter(Tensor(np.ones([3, 3]).astype(np.float32)), name="accum")
|
||||
self.linear = Parameter(Tensor(np.ones([3, 3]).astype(np.float32)), name="linear")
|
||||
|
||||
def construct(self, grad, indices):
|
||||
return self.fused_sparse_ftrl(self.var, self.accum, self.linear, grad, indices)
|
||||
|
||||
def test_net():
|
||||
gradient = Tensor(np.array([-3, 2, 3, 0, 0, 0, -4, -1, -2])
|
||||
.reshape([3, 3]).astype(np.float32))
|
||||
indices = Tensor(np.ones([3]), mstype.int32)
|
||||
net = Net()
|
||||
output = net(gradient, indices)
|
||||
print(output)
|
||||
print(net.var.default_input)
|
||||
print(net.accum.default_input)
|
||||
print(net.linear.default_input)
|
@ -0,0 +1,53 @@
|
||||
# Copyright 2020 Huawei Technologies Co., Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ============================================================================
|
||||
import numpy as np
|
||||
import mindspore.common.dtype as mstype
|
||||
import mindspore.context as context
|
||||
import mindspore.nn as nn
|
||||
from mindspore import Tensor
|
||||
from mindspore.ops import operations as P
|
||||
from mindspore.common.parameter import Parameter
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
|
||||
|
||||
beta1_power = 0.9
|
||||
beta2_power = 0.999
|
||||
lr = 0.001
|
||||
beta1 = 0.9
|
||||
beta2 = 0.999
|
||||
epsilon = 1e-8
|
||||
|
||||
class Net(nn.Cell):
|
||||
def __init__(self):
|
||||
super(Net, self).__init__()
|
||||
self.fused_sparse_lazy_adam = P.FusedSparseLazyAdam()
|
||||
self.var = Parameter(Tensor(np.ones([3, 1, 2]).astype(np.float32)), name="var")
|
||||
self.m = Parameter(Tensor(np.ones([3, 1, 2]).astype(np.float32)), name="m")
|
||||
self.v = Parameter(Tensor(np.ones([3, 1, 2]).astype(np.float32)), name="v")
|
||||
|
||||
def construct(self, grad, indices):
|
||||
return self.fused_sparse_lazy_adam(self.var, self.m, self.v, beta1_power, beta2_power,
|
||||
lr, beta1, beta2, epsilon, grad, indices)
|
||||
|
||||
def test_net():
|
||||
gradient = Tensor(np.array([0.22948648, 0.14569908, 0.92861906, 0.66870148])
|
||||
.reshape([2, 1, 2]).astype(np.float32))
|
||||
indices = Tensor([0, 1], mstype.int32)
|
||||
net = Net()
|
||||
output = net(gradient, indices)
|
||||
print(output)
|
||||
print(net.var.default_input)
|
||||
print(net.m.default_input)
|
||||
print(net.v.default_input)
|
@ -0,0 +1,47 @@
|
||||
# Copyright 2020 Huawei Technologies Co., Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ============================================================================
|
||||
import numpy as np
|
||||
import mindspore.nn as nn
|
||||
import mindspore.context as context
|
||||
import mindspore.common.dtype as mstype
|
||||
from mindspore import Tensor
|
||||
from mindspore.ops import operations as P
|
||||
from mindspore.common.parameter import Parameter
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
|
||||
|
||||
class Net(nn.Cell):
|
||||
def __init__(self):
|
||||
super(Net, self).__init__()
|
||||
self.fused_sparse_proximal_adagrad = P.FusedSparseProximalAdagrad()
|
||||
self.var = Parameter(Tensor(np.ones([3, 3]).astype(np.float32)), name="var")
|
||||
self.accum = Parameter(Tensor(np.ones([3, 3]).astype(np.float32)), name="accum")
|
||||
self.lr = 0.01
|
||||
self.l1 = 0.0
|
||||
self.l2 = 0.0
|
||||
|
||||
def construct(self, grad, indices):
|
||||
return self.fused_sparse_proximal_adagrad(self.var, self.accum, self.lr, self.l1, self.l2,
|
||||
grad, indices)
|
||||
|
||||
def test_net():
|
||||
gradient = Tensor(np.array([-3, 2, 3, 0, 0, 0, -4, -1, -2])
|
||||
.reshape([3, 3]).astype(np.float32))
|
||||
indices = Tensor(np.ones([3]), mstype.int32)
|
||||
net = Net()
|
||||
output = net(gradient, indices)
|
||||
print(output)
|
||||
print(net.var.default_input)
|
||||
print(net.accum.default_input)
|
@ -0,0 +1,56 @@
|
||||
# Copyright 2020 Huawei Technologies Co., Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ============================================================================
|
||||
import numpy as np
|
||||
|
||||
import mindspore.context as context
|
||||
import mindspore.nn as nn
|
||||
from mindspore import Tensor
|
||||
from mindspore.common import dtype as mstype
|
||||
from mindspore.ops import composite as C
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
|
||||
|
||||
|
||||
class Net(nn.Cell):
|
||||
def __init__(self, shape, seed=0):
|
||||
super(Net, self).__init__()
|
||||
self.shape = shape
|
||||
self.seed = seed
|
||||
|
||||
def construct(self, alpha, beta):
|
||||
C.set_seed(20)
|
||||
return C.gamma(self.shape, alpha, beta, self.seed)
|
||||
|
||||
|
||||
def test_net_1D():
|
||||
seed = 10
|
||||
shape = (3, 2, 4)
|
||||
alpha = 1.0
|
||||
beta = 1.0
|
||||
net = Net(shape, seed)
|
||||
talpha, tbeta = Tensor(alpha, mstype.float32), Tensor(beta, mstype.float32)
|
||||
output = net(talpha, tbeta)
|
||||
assert output.shape == (3, 2, 4)
|
||||
|
||||
|
||||
def test_net_ND():
|
||||
seed = 10
|
||||
shape = (3, 1, 2)
|
||||
alpha = np.array([[[1], [2]], [[3], [4]], [[5], [6]]]).astype(np.float32)
|
||||
beta = np.array([1.0]).astype(np.float32)
|
||||
net = Net(shape, seed)
|
||||
talpha, tbeta = Tensor(alpha, mstype.float32), Tensor(beta, mstype.float32)
|
||||
output = net(talpha, tbeta)
|
||||
assert output.shape == (3, 2, 2)
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in new issue