add SquaredDifference operation for cpu

pull/10638/head
“dangjiaqi1” 4 years ago
parent 5653a0a970
commit d86509c1c8

@ -157,6 +157,16 @@ void ArithmeticCPUKernel::NotEqual(const T *input1, const T *input2, bool *out,
}
}
template <typename T>
void ArithmeticCPUKernel::SquaredDifference(const T *input1, const T *input2, T *out, size_t start, size_t end) {
for (size_t i = start; i < end; i++) {
std::vector<size_t> idx;
GenIndex(i, &idx);
T diff = input1[idx[0]] - input2[idx[1]];
out[i] = diff * diff;
}
}
void ArithmeticCPUKernel::InitKernel(const CNodePtr &kernel_node) {
MS_EXCEPTION_IF_NULL(kernel_node);
std::string kernel_name = AnfAlgo::GetCNodeName(kernel_node);
@ -182,6 +192,8 @@ void ArithmeticCPUKernel::InitKernel(const CNodePtr &kernel_node) {
operate_type_ = NOTEQUAL;
} else if (kernel_name == prim::kPrimAssignAdd->name()) {
operate_type_ = ASSIGNADD;
} else if (kernel_name == prim::kPrimSquaredDifference->name()) {
operate_type_ = SQUAREDDIFFERENCE;
} else {
MS_LOG(EXCEPTION) << "Not support " << kernel_name;
}
@ -343,6 +355,9 @@ void ArithmeticCPUKernel::LaunchKernel(const std::vector<AddressPtr> &inputs, co
threads.emplace_back(std::thread(&ArithmeticCPUKernel::Pow<T>, this, input1, input2, output, start, end));
} else if (operate_type_ == ASSIGNADD) {
threads.emplace_back(std::thread(&ArithmeticCPUKernel::AssignAdd<T>, this, input1, input2, output, start, end));
} else if (operate_type_ == SQUAREDDIFFERENCE) {
threads.emplace_back(
std::thread(&ArithmeticCPUKernel::SquaredDifference<T>, this, input1, input2, output, start, end));
} else {
MS_LOG(EXCEPTION) << "Not support " << operate_type_;
}

@ -61,6 +61,8 @@ class ArithmeticCPUKernel : public CPUKernel {
void Equal(const T *input1, const T *input2, bool *out, size_t start, size_t end);
template <typename T>
void NotEqual(const T *input1, const T *input2, bool *out, size_t start, size_t end);
template <typename T>
void SquaredDifference(const T *input1, const T *input2, T *out, size_t start, size_t end);
std::vector<size_t> input_shape0_;
std::vector<size_t> input_shape1_;
std::vector<size_t> input_element_num0_;
@ -166,7 +168,6 @@ MS_REG_CPU_KERNEL(
MS_REG_CPU_KERNEL(
Equal, KernelAttr().AddInputAttr(kNumberTypeFloat64).AddInputAttr(kNumberTypeFloat64).AddOutputAttr(kNumberTypeBool),
ArithmeticCPUKernel);
MS_REG_CPU_KERNEL(
NotEqual, KernelAttr().AddInputAttr(kNumberTypeBool).AddInputAttr(kNumberTypeBool).AddOutputAttr(kNumberTypeBool),
ArithmeticCPUKernel);
@ -200,6 +201,18 @@ MS_REG_CPU_KERNEL(
NotEqual,
KernelAttr().AddInputAttr(kNumberTypeFloat64).AddInputAttr(kNumberTypeFloat64).AddOutputAttr(kNumberTypeBool),
ArithmeticCPUKernel);
MS_REG_CPU_KERNEL(
SquaredDifference,
KernelAttr().AddInputAttr(kNumberTypeInt32).AddInputAttr(kNumberTypeInt32).AddOutputAttr(kNumberTypeInt32),
ArithmeticCPUKernel);
MS_REG_CPU_KERNEL(
SquaredDifference,
KernelAttr().AddInputAttr(kNumberTypeFloat16).AddInputAttr(kNumberTypeFloat16).AddOutputAttr(kNumberTypeFloat16),
ArithmeticCPUKernel);
MS_REG_CPU_KERNEL(
SquaredDifference,
KernelAttr().AddInputAttr(kNumberTypeFloat32).AddInputAttr(kNumberTypeFloat32).AddOutputAttr(kNumberTypeFloat32),
ArithmeticCPUKernel);
} // namespace kernel
} // namespace mindspore

@ -79,6 +79,7 @@ enum OperateType {
EQUAL,
NOTEQUAL,
FLOOR,
SQUAREDDIFFERENCE
};
class CPUKernel : public kernel::KernelMod {

@ -268,6 +268,7 @@ inline const PrimitivePtr kPrimRsqrt = std::make_shared<Primitive>("Rsqrt");
inline const PrimitivePtr kPrimSplitV = std::make_shared<Primitive>("SplitV");
inline const PrimitivePtr kPrimLinSpace = std::make_shared<Primitive>("LinSpace");
inline const PrimitivePtr kPrimSign = std::make_shared<Primitive>("Sign");
inline const PrimitivePtr kPrimSquaredDifference = std::make_shared<Primitive>("SquaredDifference");
// Statements
inline const PrimitivePtr kPrimReturn = std::make_shared<Primitive>("return");

@ -0,0 +1,109 @@
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore.context as context
import mindspore.common.dtype as mstype
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.ops import operations as P
context.set_context(mode=context.GRAPH_MODE, device_target='CPU')
class Net(nn.Cell):
def __init__(self):
super(Net, self).__init__()
self.ops = P.SquaredDifference()
def construct(self, x, y):
return self.ops(x, y)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu_training
@pytest.mark.env_onecard
def test_net01():
net = Net()
np.random.seed(1)
x1 = np.random.randn(2, 3).astype(np.int32)
y1 = np.random.randn(2, 3).astype(np.int32)
output1 = net(Tensor(x1), Tensor(y1)).asnumpy()
diff = x1 - y1
expect1 = diff * diff
assert np.all(expect1 == output1)
assert output1.shape == expect1.shape
x2 = np.random.randn(2, 3).astype(np.float32)
y2 = np.random.randn(2, 3).astype(np.float32)
output2 = net(Tensor(x2), Tensor(y2)).asnumpy()
diff = x2 - y2
expect2 = diff * diff
assert np.all(expect2 == output2)
assert output2.shape == expect2.shape
x3 = np.random.randn(2, 3).astype(np.bool)
y3 = np.random.randn(2, 3).astype(np.bool)
try:
net(Tensor(x3), Tensor(y3)).asnumpy()
except TypeError:
assert True
@pytest.mark.level0
@pytest.mark.platform_x86_cpu_training
@pytest.mark.env_onecard
def test_net02():
net = Net()
x1 = Tensor(1, mstype.float32)
y1 = Tensor(np.array([[3, 3], [3, 3]]).astype(np.float32))
expect1 = np.array([[4, 4], [4, 4]]).astype(np.float32)
output1 = net(x1, y1).asnumpy()
assert np.all(expect1 == output1)
assert output1.shape == expect1.shape
np.random.seed(1)
x2 = np.random.randn(2, 3).astype(np.float32)
y2 = np.random.randn(2, 2, 3).astype(np.float32)
output2 = net(Tensor(x2), Tensor(y2)).asnumpy()
diff = x2 - y2
expect2 = diff * diff
assert np.all(expect2 == output2)
assert output2.shape == expect2.shape
x3 = np.random.randn(1, 2).astype(np.float32)
y3 = np.random.randn(3, 1).astype(np.float32)
output3 = net(Tensor(x3), Tensor(y3)).asnumpy()
diff = x3 - y3
expect3 = diff * diff
assert np.all(expect3 == output3)
assert output3.shape == expect3.shape
x4 = np.random.randn(2, 3).astype(np.float32)
y4 = np.random.randn(1, 2).astype(np.float32)
try:
net(Tensor(x4), Tensor(y4)).asnumpy()
except ValueError:
assert True
x5 = np.random.randn(2, 3, 2, 3, 4, 5, 6, 7).astype(np.float32)
y5 = np.random.randn(2, 3, 2, 3, 4, 5, 6, 7).astype(np.float32)
output5 = net(Tensor(x5), Tensor(y5)).asnumpy()
diff = x5 - y5
expect5 = diff * diff
assert np.all(expect5 == output5)
assert output5.shape == expect5.shape
Loading…
Cancel
Save