parent
724ded7ad2
commit
d043eeb834
@ -0,0 +1,105 @@
|
||||
/**
|
||||
* Copyright 2019 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#include "backend/kernel_compiler/cpu/argmin_with_value_cpu_kernel.h"
|
||||
#include "runtime/device/cpu/cpu_device_address.h"
|
||||
|
||||
namespace mindspore {
|
||||
namespace kernel {
|
||||
namespace {
|
||||
size_t get_element_num(const std::vector<size_t> &shape) {
|
||||
size_t size = 1;
|
||||
for (size_t i = 0; i < shape.size(); i++) {
|
||||
size *= shape[i];
|
||||
}
|
||||
return size;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
bool check_validation(const std::vector<size_t> &shape, const size_t num_before_axis, const size_t num_after_axis,
|
||||
const std::vector<kernel::AddressPtr> &inputs, const std::vector<kernel::AddressPtr> &outputs) {
|
||||
if (inputs.size() != 1 || outputs.size() != 2) {
|
||||
MS_LOG(EXCEPTION) << "Wrong number of inputs or outputs!";
|
||||
return false;
|
||||
}
|
||||
size_t data_size = sizeof(T);
|
||||
size_t input_size = get_element_num(shape) * data_size;
|
||||
size_t output_num = num_before_axis * num_after_axis;
|
||||
size_t out0_size = output_num * sizeof(int);
|
||||
size_t out1_size = output_num * data_size;
|
||||
if (inputs[0]->size != input_size || outputs[0]->size != out0_size || outputs[1]->size != out1_size) {
|
||||
MS_LOG(EXCEPTION) << "invalid input or output data size!";
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
} // namespace
|
||||
|
||||
template <typename T>
|
||||
void ArgMinWithValueCPUKernel<T>::InitKernel(const CNodePtr &kernel_node) {
|
||||
MS_EXCEPTION_IF_NULL(kernel_node);
|
||||
shape_ = AnfAlgo::GetInputDeviceShape(kernel_node, 0);
|
||||
size_t shape_len = shape_.size();
|
||||
int64_t axis = AnfAlgo::GetNodeAttr<int64_t>(kernel_node, AXIS);
|
||||
axis += shape_len;
|
||||
if (axis < 0) {
|
||||
MS_LOG(EXCEPTION) << "Invalid axis:" << axis << ", should in range [-1, " << shape_len - 1 << "]";
|
||||
}
|
||||
axis = axis % static_cast<int64_t>(shape_len);
|
||||
num_before_axis_ = 1;
|
||||
num_after_axis_ = 1;
|
||||
for (size_t i = 0; i < shape_len; i++) {
|
||||
if (static_cast<int64_t>(i) < axis) {
|
||||
num_before_axis_ *= shape_[i];
|
||||
} else if (static_cast<int64_t>(i) > axis) {
|
||||
num_after_axis_ *= shape_[i];
|
||||
}
|
||||
}
|
||||
dim_axis_ = shape_[axis];
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
bool ArgMinWithValueCPUKernel<T>::Launch(const std::vector<kernel::AddressPtr> &inputs,
|
||||
const std::vector<kernel::AddressPtr> & /*workspaces*/,
|
||||
const std::vector<kernel::AddressPtr> &outputs) {
|
||||
if (!check_validation<T>(shape_, num_before_axis_, num_after_axis_, inputs, outputs)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
auto input = reinterpret_cast<T *>(inputs[0]->addr);
|
||||
auto output0 = reinterpret_cast<int32_t *>(outputs[0]->addr);
|
||||
auto output1 = reinterpret_cast<T *>(outputs[1]->addr);
|
||||
|
||||
for (size_t i = 0; i < num_before_axis_; i++) {
|
||||
size_t src_index_i = i * dim_axis_ * num_after_axis_;
|
||||
for (size_t j = 0; j < num_after_axis_; j++) {
|
||||
std::vector<float> array_axis;
|
||||
size_t src_index_j = src_index_i + j;
|
||||
for (size_t k = 0; k < dim_axis_; k++) {
|
||||
size_t src_index_k = k * num_after_axis_ + src_index_j;
|
||||
array_axis.push_back(static_cast<float>(input[src_index_k]));
|
||||
}
|
||||
auto min_ops = std::min_element(array_axis.begin(), array_axis.end());
|
||||
auto min_index = static_cast<int32_t>(std::distance(array_axis.begin(), min_ops));
|
||||
auto dst_index = i * num_after_axis_ + j;
|
||||
output0[dst_index] = min_index;
|
||||
auto src_index = IntToSize(min_index) * num_after_axis_ + src_index_j;
|
||||
output1[dst_index] = input[src_index];
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
} // namespace kernel
|
||||
} // namespace mindspore
|
@ -0,0 +1,56 @@
|
||||
/**
|
||||
* Copyright 2019 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_ARGMINWITHVALUE_CPU_KERNEL_H_
|
||||
#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_ARGMINWITHVALUE_CPU_KERNEL_H_
|
||||
#include <vector>
|
||||
#include <map>
|
||||
#include <memory>
|
||||
#include <algorithm>
|
||||
#include "backend/kernel_compiler/cpu/cpu_kernel.h"
|
||||
#include "backend/kernel_compiler/cpu/cpu_kernel_factory.h"
|
||||
|
||||
namespace mindspore {
|
||||
namespace kernel {
|
||||
template <typename T>
|
||||
class ArgMinWithValueCPUKernel : public CPUKernel {
|
||||
public:
|
||||
ArgMinWithValueCPUKernel() = default;
|
||||
~ArgMinWithValueCPUKernel() override = default;
|
||||
|
||||
void InitKernel(const CNodePtr &kernel_node) override;
|
||||
|
||||
bool Launch(const std::vector<AddressPtr> &inputs, const std::vector<AddressPtr> &workspace,
|
||||
const std::vector<AddressPtr> &outputs) override;
|
||||
|
||||
private:
|
||||
std::vector<size_t> shape_;
|
||||
size_t num_before_axis_;
|
||||
size_t num_after_axis_;
|
||||
size_t dim_axis_;
|
||||
};
|
||||
|
||||
MS_REG_CPU_KERNEL_T(
|
||||
ArgMinWithValue,
|
||||
KernelAttr().AddInputAttr(kNumberTypeFloat32).AddOutputAttr(kNumberTypeInt32).AddOutputAttr(kNumberTypeFloat32),
|
||||
ArgMinWithValueCPUKernel, float);
|
||||
MS_REG_CPU_KERNEL_T(
|
||||
ArgMinWithValue,
|
||||
KernelAttr().AddInputAttr(kNumberTypeFloat16).AddOutputAttr(kNumberTypeInt32).AddOutputAttr(kNumberTypeFloat16),
|
||||
ArgMinWithValueCPUKernel, float16);
|
||||
} // namespace kernel
|
||||
} // namespace mindspore
|
||||
|
||||
#endif // MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_ARGMINWITHVALUE_CPU_KERNEL_H_
|
@ -0,0 +1,221 @@
|
||||
/**
|
||||
* Copyright 2020 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "backend/kernel_compiler/cpu/minimum_cpu_kernel.h"
|
||||
#include "runtime/device/cpu/cpu_device_address.h"
|
||||
|
||||
namespace mindspore {
|
||||
namespace kernel {
|
||||
|
||||
template <typename T>
|
||||
void MinimumCPUKernel<T>::InitKernel(const CNodePtr &kernel_node) {
|
||||
CheckParam(kernel_node);
|
||||
input_x_shape_ = AnfAlgo::GetInputDeviceShape(kernel_node, 0);
|
||||
input_y_shape_ = AnfAlgo::GetInputDeviceShape(kernel_node, 1);
|
||||
output_shape_ = AnfAlgo::GetOutputDeviceShape(kernel_node, 0);
|
||||
TypeId input_x_dtype = AnfAlgo::GetInputDeviceDataType(kernel_node, 0);
|
||||
TypeId input_y_dtype = AnfAlgo::GetInputDeviceDataType(kernel_node, 1);
|
||||
size_t max_input_shape_size =
|
||||
input_x_shape_.size() > input_y_shape_.size() ? input_x_shape_.size() : input_y_shape_.size();
|
||||
for (size_t i = 0; i < output_shape_.size(); i++) {
|
||||
output_num_ *= output_shape_[i];
|
||||
}
|
||||
if ((input_x_shape_.size() == 0 && input_y_shape_.size() != 0) ||
|
||||
(input_x_shape_.size() != 0 && input_y_shape_.size() == 0)) {
|
||||
InitInputTensorAndScalar(max_input_shape_size);
|
||||
} else if (max_input_shape_size == output_shape_.size() && output_shape_.size() != 0) {
|
||||
InitInputTensors(input_x_dtype, input_y_dtype);
|
||||
} else {
|
||||
MS_LOG(EXCEPTION) << "Only support input two tensors or one tensor and one scalar";
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void MinimumCPUKernel<T>::CheckParam(const CNodePtr &kernel_node) {
|
||||
size_t input_num = AnfAlgo::GetInputTensorNum(kernel_node);
|
||||
if (input_num != 2) {
|
||||
MS_LOG(EXCEPTION) << "Input number is " << input_num << ", but MinimumCPUKernel needs 2 input.";
|
||||
}
|
||||
size_t output_num = AnfAlgo::GetOutputTensorNum(kernel_node);
|
||||
if (output_num != 1) {
|
||||
MS_LOG(EXCEPTION) << "Output number is " << output_num << ", but MinimumCPUKernel needs 1 output.";
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void MinimumCPUKernel<T>::InitInputTensorAndScalar(size_t max_input_shape_size) {
|
||||
if (max_input_shape_size != output_shape_.size()) {
|
||||
MS_LOG(EXCEPTION) << "Output tensor size must be equal to the max shape size of inputs";
|
||||
}
|
||||
need_broadcast_ = false;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void MinimumCPUKernel<T>::InitInputTensors(TypeId input_x_dtype, TypeId input_y_dtype) {
|
||||
if (input_x_dtype == kNumberTypeBool && input_y_dtype == kNumberTypeBool) {
|
||||
MS_LOG(EXCEPTION) << "Input tensor types cannot be both bool";
|
||||
}
|
||||
// Check if the shape needs to be broadcast
|
||||
need_broadcast_ = IsBroadcast();
|
||||
if (need_broadcast_) {
|
||||
InitTensorBroadcastShape();
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
bool MinimumCPUKernel<T>::Launch(const std::vector<kernel::AddressPtr> &inputs,
|
||||
const std::vector<kernel::AddressPtr> & /*workspace*/,
|
||||
const std::vector<kernel::AddressPtr> &outputs) {
|
||||
T *input_x_ = reinterpret_cast<T *>(inputs[0]->addr);
|
||||
T *input_y_ = reinterpret_cast<T *>(inputs[1]->addr);
|
||||
T *output_ = reinterpret_cast<T *>(outputs[0]->addr);
|
||||
BroadcastArith(input_x_, input_y_, output_);
|
||||
return true;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void MinimumCPUKernel<T>::BroadcastArith(const T *input_x, const T *input_y, T *output) {
|
||||
MS_EXCEPTION_IF_NULL(input_x);
|
||||
MS_EXCEPTION_IF_NULL(input_y);
|
||||
MS_EXCEPTION_IF_NULL(output);
|
||||
if (need_broadcast_) {
|
||||
BroadcastArithKernel(broadcast_input_x_shape_[0], broadcast_input_x_shape_[1], broadcast_input_x_shape_[2],
|
||||
broadcast_input_x_shape_[3], broadcast_input_x_shape_[4], broadcast_input_x_shape_[5],
|
||||
broadcast_input_x_shape_[6], broadcast_input_y_shape_[0], broadcast_input_y_shape_[1],
|
||||
broadcast_input_y_shape_[2], broadcast_input_y_shape_[3], broadcast_input_y_shape_[4],
|
||||
broadcast_input_y_shape_[5], broadcast_input_y_shape_[6], broadcast_output_shape_[0],
|
||||
broadcast_output_shape_[1], broadcast_output_shape_[2], broadcast_output_shape_[3],
|
||||
broadcast_output_shape_[4], broadcast_output_shape_[5], broadcast_output_shape_[6], input_x,
|
||||
input_y, output);
|
||||
} else {
|
||||
if (input_x_shape_.size() == 0 || input_y_shape_.size() == 0) {
|
||||
BroadcastArithOneScalarOneTensor(input_x, input_y, output);
|
||||
} else {
|
||||
BroadcastArithTensors(input_x, input_y, output);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
bool MinimumCPUKernel<T>::IsBroadcast() {
|
||||
if (input_x_shape_.size() != input_y_shape_.size()) {
|
||||
return true;
|
||||
}
|
||||
for (size_t i = 0; i < input_x_shape_.size(); i++) {
|
||||
if (input_x_shape_[i] != input_y_shape_[i]) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void MinimumCPUKernel<T>::InitTensorBroadcastShape() {
|
||||
if (output_shape_.size() > max_dims) {
|
||||
MS_LOG(EXCEPTION) << "Broadcast operation not support dim greater than 7";
|
||||
}
|
||||
broadcast_input_x_shape_.resize(max_dims, 1);
|
||||
broadcast_input_y_shape_.resize(max_dims, 1);
|
||||
broadcast_output_shape_.resize(max_dims, 1);
|
||||
for (size_t i = 0; i < output_shape_.size(); i++) {
|
||||
broadcast_output_shape_[i] = output_shape_[i];
|
||||
}
|
||||
int input_x_dim_offset = output_shape_.size() - input_x_shape_.size();
|
||||
for (size_t j = 0; j < input_x_shape_.size(); j++) {
|
||||
broadcast_input_x_shape_[j + input_x_dim_offset] = input_x_shape_[j];
|
||||
input_x_num_ *= input_x_shape_[j];
|
||||
}
|
||||
int input_y_dim_offset = output_shape_.size() - input_y_shape_.size();
|
||||
for (size_t k = 0; k < input_y_shape_.size(); k++) {
|
||||
if (need_broadcast_) {
|
||||
broadcast_input_y_shape_[k + input_y_dim_offset] = input_y_shape_[k];
|
||||
input_y_num_ *= input_y_shape_[k];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Broadcast comparation
|
||||
template <typename T>
|
||||
size_t MinimumCPUKernel<T>::Index(const size_t &index, const size_t &dim) {
|
||||
return dim == 1 ? 0 : index;
|
||||
}
|
||||
|
||||
// Broadcast Arithmetic
|
||||
template <typename T>
|
||||
void MinimumCPUKernel<T>::BroadcastArithKernel(const size_t l0, const size_t l1, const size_t l2, const size_t l3,
|
||||
const size_t l4, const size_t l5, const size_t l6, const size_t r0,
|
||||
const size_t r1, const size_t r2, const size_t r3, const size_t r4,
|
||||
const size_t r5, const size_t r6, const size_t d0, const size_t d1,
|
||||
const size_t d2, const size_t d3, const size_t d4, const size_t d5,
|
||||
const size_t d6, const T *input_x, const T *input_y, T *output) {
|
||||
MS_EXCEPTION_IF_NULL(input_x);
|
||||
MS_EXCEPTION_IF_NULL(input_y);
|
||||
MS_EXCEPTION_IF_NULL(output);
|
||||
for (size_t pos = 0; pos < output_num_; pos++) {
|
||||
size_t i = pos / (d1 * d2 * d3 * d4 * d5 * d6) % d0;
|
||||
size_t j = pos / (d2 * d3 * d4 * d5 * d6) % d1;
|
||||
size_t k = pos / (d3 * d4 * d5 * d6) % d2;
|
||||
size_t l = pos / (d4 * d5 * d6) % d3;
|
||||
size_t m = pos / (d5 * d6) % d4;
|
||||
size_t n = pos / d6 % d5;
|
||||
size_t o = pos % d6;
|
||||
|
||||
size_t l_index = Index(i, l0) * l1 * l2 * l3 * l4 * l5 * l6;
|
||||
l_index += Index(j, l1) * l2 * l3 * l4 * l5 * l6;
|
||||
l_index += Index(k, l2) * l3 * l4 * l5 * l6;
|
||||
l_index += Index(l, l3) * l4 * l5 * l6;
|
||||
l_index += Index(m, l4) * l5 * l6;
|
||||
l_index += Index(n, l5) * l6;
|
||||
l_index += Index(o, l6);
|
||||
size_t r_index = Index(i, r0) * r1 * r2 * r3 * r4 * r5 * r6;
|
||||
r_index += Index(j, r1) * r2 * r3 * r4 * r5 * r6;
|
||||
r_index += Index(k, r2) * r3 * r4 * r5 * r6;
|
||||
r_index += Index(l, r3) * r4 * r5 * r6;
|
||||
r_index += Index(m, r4) * r5 * r6;
|
||||
r_index += Index(n, r5) * r6;
|
||||
r_index += Index(o, r6);
|
||||
output[pos] = MinimumFunc(input_x[l_index], input_y[r_index]);
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void MinimumCPUKernel<T>::BroadcastArithOneScalarOneTensor(const T *input_x, const T *input_y, T *output) {
|
||||
MS_EXCEPTION_IF_NULL(input_x);
|
||||
MS_EXCEPTION_IF_NULL(input_y);
|
||||
MS_EXCEPTION_IF_NULL(output);
|
||||
if (input_x_shape_.size() == 0) {
|
||||
for (size_t i = 0; i < output_num_; ++i) {
|
||||
output[i] = MinimumFunc(input_x[0], input_y[i]);
|
||||
}
|
||||
} else {
|
||||
for (size_t i = 0; i < output_num_; ++i) {
|
||||
output[i] = MinimumFunc(input_x[i], input_y[0]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void MinimumCPUKernel<T>::BroadcastArithTensors(const T *input_x, const T *input_y, T *output) {
|
||||
MS_EXCEPTION_IF_NULL(input_x);
|
||||
MS_EXCEPTION_IF_NULL(input_y);
|
||||
MS_EXCEPTION_IF_NULL(output);
|
||||
for (size_t i = 0; i < output_num_; ++i) {
|
||||
output[i] = MinimumFunc(input_x[i], input_y[i]);
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace kernel
|
||||
} // namespace mindspore
|
@ -0,0 +1,108 @@
|
||||
/**
|
||||
* Copyright 2020 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_MINIMUM_CPU_KERNEL_H_
|
||||
#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_MINIMUM_CPU_KERNEL_H_
|
||||
|
||||
#include <vector>
|
||||
#include "backend/kernel_compiler/cpu/cpu_kernel.h"
|
||||
#include "backend/kernel_compiler/cpu/cpu_kernel_factory.h"
|
||||
|
||||
namespace mindspore {
|
||||
namespace kernel {
|
||||
template <typename T>
|
||||
class MinimumCPUKernel : public CPUKernel {
|
||||
public:
|
||||
MinimumCPUKernel() = default;
|
||||
~MinimumCPUKernel() override = default;
|
||||
|
||||
void InitKernel(const CNodePtr &kernel_node) override;
|
||||
|
||||
bool Launch(const std::vector<AddressPtr> &inputs, const std::vector<AddressPtr> &workspace,
|
||||
const std::vector<AddressPtr> &outputs) override;
|
||||
|
||||
private:
|
||||
void CheckParam(const CNodePtr &kernel_node);
|
||||
|
||||
bool IsBroadcast();
|
||||
|
||||
size_t Index(const size_t &index, const size_t &dim);
|
||||
|
||||
void InitTensorBroadcastShape();
|
||||
|
||||
void InitInputTensorAndScalar(size_t max_input_shape_size);
|
||||
|
||||
void InitInputTensors(TypeId input_x_dtype, TypeId input_y_dtype);
|
||||
|
||||
// Broadcast Arithmetic
|
||||
void BroadcastArithKernel(const size_t l0, const size_t l1, const size_t l2, const size_t l3, const size_t l4,
|
||||
const size_t l5, const size_t l6, const size_t r0, const size_t r1, const size_t r2,
|
||||
const size_t r3, const size_t r4, const size_t r5, const size_t r6, const size_t d0,
|
||||
const size_t d1, const size_t d2, const size_t d3, const size_t d4, const size_t d5,
|
||||
const size_t d6, const T *input_x, const T *input_y, T *output);
|
||||
|
||||
T MinimumFunc(const T &lhs, const T &rhs) { return lhs < rhs ? lhs : rhs; }
|
||||
|
||||
void BroadcastArithOneScalarOneTensor(const T *input_x, const T *input_y, T *output);
|
||||
|
||||
void BroadcastArithTensors(const T *input_x, const T *input_y, T *output);
|
||||
|
||||
void BroadcastArith(const T *input_x, const T *input_y, T *output);
|
||||
|
||||
private:
|
||||
bool need_broadcast_{false};
|
||||
size_t input_x_num_{1};
|
||||
size_t input_y_num_{1};
|
||||
size_t output_num_{1};
|
||||
std::vector<size_t> input_x_shape_;
|
||||
std::vector<size_t> input_y_shape_;
|
||||
std::vector<size_t> output_shape_;
|
||||
std::vector<size_t> broadcast_input_x_shape_;
|
||||
std::vector<size_t> broadcast_input_y_shape_;
|
||||
std::vector<size_t> broadcast_output_shape_;
|
||||
const size_t max_dims{7};
|
||||
};
|
||||
|
||||
MS_REG_CPU_KERNEL_T(
|
||||
Minimum, KernelAttr().AddInputAttr(kNumberTypeInt32).AddInputAttr(kNumberTypeInt32).AddOutputAttr(kNumberTypeInt32),
|
||||
MinimumCPUKernel, int32_t);
|
||||
|
||||
MS_REG_CPU_KERNEL_T(
|
||||
Minimum,
|
||||
KernelAttr().AddInputAttr(kNumberTypeUInt32).AddInputAttr(kNumberTypeUInt32).AddOutputAttr(kNumberTypeUInt32),
|
||||
MinimumCPUKernel, uint32_t);
|
||||
|
||||
MS_REG_CPU_KERNEL_T(
|
||||
Minimum,
|
||||
KernelAttr().AddInputAttr(kNumberTypeFloat32).AddInputAttr(kNumberTypeFloat32).AddOutputAttr(kNumberTypeFloat32),
|
||||
MinimumCPUKernel, float);
|
||||
|
||||
MS_REG_CPU_KERNEL_T(
|
||||
Minimum, KernelAttr().AddInputAttr(kNumberTypeInt64).AddInputAttr(kNumberTypeInt64).AddOutputAttr(kNumberTypeInt64),
|
||||
MinimumCPUKernel, int64_t);
|
||||
|
||||
MS_REG_CPU_KERNEL_T(
|
||||
Minimum,
|
||||
KernelAttr().AddInputAttr(kNumberTypeUInt64).AddInputAttr(kNumberTypeUInt64).AddOutputAttr(kNumberTypeUInt64),
|
||||
MinimumCPUKernel, uint64_t);
|
||||
|
||||
MS_REG_CPU_KERNEL_T(
|
||||
Minimum,
|
||||
KernelAttr().AddInputAttr(kNumberTypeFloat64).AddInputAttr(kNumberTypeFloat64).AddOutputAttr(kNumberTypeFloat64),
|
||||
MinimumCPUKernel, double);
|
||||
} // namespace kernel
|
||||
} // namespace mindspore
|
||||
|
||||
#endif // MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_UPDATE_CACHE_CPU_KERNEL_H_
|
@ -0,0 +1,139 @@
|
||||
# Copyright 2019 Huawei Technologies Co., Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ============================================================================
|
||||
|
||||
import numpy as np
|
||||
import pytest
|
||||
|
||||
import mindspore.context as context
|
||||
import mindspore.nn as nn
|
||||
from mindspore import Tensor
|
||||
from mindspore.ops import operations as P
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
|
||||
|
||||
|
||||
class NetArgminWithValue(nn.Cell):
|
||||
def __init__(self, axis=0, keep_dims=False):
|
||||
super(NetArgminWithValue, self).__init__()
|
||||
self.argmin = P.ArgMinWithValue(axis=axis, keep_dims=keep_dims)
|
||||
|
||||
def construct(self, x):
|
||||
return self.argmin(x)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_cpu
|
||||
@pytest.mark.env_onecard
|
||||
def test_argminwithvalue_fp32():
|
||||
x = np.array([[1., 20., 5.],
|
||||
[67., 8., 9.],
|
||||
[130., 24., 15.],
|
||||
[-0.5, 25, 100]]).astype(np.float32)
|
||||
argmin_a0 = NetArgminWithValue(axis=0, keep_dims=False)
|
||||
|
||||
output0, output1 = argmin_a0(Tensor(x))
|
||||
expect0 = np.array([3, 1, 0]).astype(np.int32)
|
||||
expect1 = np.array([-0.5, 8., 5.]).astype(np.float32)
|
||||
error = np.ones(shape=expect1.shape) * 1.0e-6
|
||||
assert np.all(output0.asnumpy() == expect0)
|
||||
assert np.all(np.abs(output1.asnumpy() - expect1) < error)
|
||||
|
||||
argmin_a0k = NetArgminWithValue(axis=0, keep_dims=True)
|
||||
|
||||
output0, output1 = argmin_a0k(Tensor(x))
|
||||
expect0 = np.array([[3, 1, 0]]).astype(np.int32)
|
||||
expect1 = np.array([[-0.5, 8., 5.]]).astype(np.float32)
|
||||
error = np.ones(shape=expect1.shape) * 1.0e-6
|
||||
assert np.all(output0.asnumpy() == expect0)
|
||||
assert np.all(np.abs(output1.asnumpy() - expect1) < error)
|
||||
|
||||
argmin_a1 = NetArgminWithValue(axis=1, keep_dims=False)
|
||||
|
||||
output0, output1 = argmin_a1(Tensor(x))
|
||||
expect0 = np.array([0, 1, 2, 0]).astype(np.int32)
|
||||
expect1 = np.array([1., 8., 15., -0.5]).astype(np.float32)
|
||||
error = np.ones(shape=expect1.shape) * 1.0e-6
|
||||
assert np.all(output0.asnumpy() == expect0)
|
||||
assert np.all(np.abs(output1.asnumpy() - expect1) < error)
|
||||
|
||||
argmin_a1k = NetArgminWithValue(axis=-1, keep_dims=True)
|
||||
|
||||
output0, output1 = argmin_a1k(Tensor(x))
|
||||
expect0 = np.array([[0], [1], [2], [0]]).astype(np.int32)
|
||||
expect1 = np.array([[1.], [8.], [15.], [-0.5]]).astype(np.float32)
|
||||
error = np.ones(shape=expect1.shape) * 1.0e-6
|
||||
assert np.all(output0.asnumpy() == expect0)
|
||||
assert np.all(np.abs(output1.asnumpy() - expect1) < error)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_cpu
|
||||
@pytest.mark.env_onecard
|
||||
def test_argminwithvalue_fp16():
|
||||
x = np.array([[1., 20., 5.],
|
||||
[67., 8., 9.],
|
||||
[130., 24., 15.],
|
||||
[-0.5, 25, 100]]).astype(np.float16)
|
||||
argmin_a0 = NetArgminWithValue(axis=0, keep_dims=False)
|
||||
|
||||
output0, output1 = argmin_a0(Tensor(x))
|
||||
expect0 = np.array([3, 1, 0]).astype(np.int32)
|
||||
expect1 = np.array([-0.5, 8., 5.]).astype(np.float16)
|
||||
error = np.ones(shape=expect1.shape) * 1.0e-6
|
||||
assert np.all(output0.asnumpy() == expect0)
|
||||
assert np.all(np.abs(output1.asnumpy() - expect1) < error)
|
||||
|
||||
argmin_a0k = NetArgminWithValue(axis=0, keep_dims=True)
|
||||
|
||||
output0, output1 = argmin_a0k(Tensor(x))
|
||||
expect0 = np.array([[3, 1, 0]]).astype(np.int32)
|
||||
expect1 = np.array([[-0.5, 8., 5.]]).astype(np.float16)
|
||||
error = np.ones(shape=expect1.shape) * 1.0e-6
|
||||
assert np.all(output0.asnumpy() == expect0)
|
||||
assert np.all(np.abs(output1.asnumpy() - expect1) < error)
|
||||
|
||||
argmin_a1 = NetArgminWithValue(axis=1, keep_dims=False)
|
||||
|
||||
output0, output1 = argmin_a1(Tensor(x))
|
||||
expect0 = np.array([0, 1, 2, 0]).astype(np.int32)
|
||||
expect1 = np.array([1., 8., 15., -0.5]).astype(np.float16)
|
||||
error = np.ones(shape=expect1.shape) * 1.0e-6
|
||||
assert np.all(output0.asnumpy() == expect0)
|
||||
assert np.all(np.abs(output1.asnumpy() - expect1) < error)
|
||||
|
||||
argmin_a1k = NetArgminWithValue(axis=-1, keep_dims=True)
|
||||
|
||||
output0, output1 = argmin_a1k(Tensor(x))
|
||||
expect0 = np.array([[0], [1], [2], [0]]).astype(np.int32)
|
||||
expect1 = np.array([[1.], [8.], [15.], [-0.5]]).astype(np.float16)
|
||||
error = np.ones(shape=expect1.shape) * 1.0e-6
|
||||
assert np.all(output0.asnumpy() == expect0)
|
||||
assert np.all(np.abs(output1.asnumpy() - expect1) < error)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_cpu
|
||||
@pytest.mark.env_onecard
|
||||
def test_argminwithvalue_tensor():
|
||||
prop = 100 if np.random.random() > 0.5 else -100
|
||||
x = np.random.randn(3, 4, 5, 6).astype(np.float16) * prop
|
||||
argmin_a0 = NetArgminWithValue(axis=-2, keep_dims=False)
|
||||
|
||||
output0, output1 = argmin_a0(Tensor(x))
|
||||
expect0 = np.argmin(x, axis=-2)
|
||||
expect1 = np.min(x, axis=-2).astype(np.float16)
|
||||
error = np.ones(shape=expect1.shape) * 1.0e-6
|
||||
assert np.all(output0.asnumpy() == expect0)
|
||||
assert np.all(np.abs(output1.asnumpy() - expect1) < error)
|
@ -0,0 +1,185 @@
|
||||
# Copyright 2020 Huawei Technologies Co., Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ============================================================================
|
||||
|
||||
import numpy as np
|
||||
import pytest
|
||||
|
||||
import mindspore.context as context
|
||||
from mindspore.common.tensor import Tensor
|
||||
from mindspore.nn import Cell
|
||||
from mindspore.ops import operations as P
|
||||
|
||||
|
||||
class ConstScalarAndTensorMinimum(Cell):
|
||||
def __init__(self):
|
||||
super(ConstScalarAndTensorMinimum, self).__init__()
|
||||
self.min = P.Minimum()
|
||||
self.x = 20
|
||||
|
||||
def construct(self, y):
|
||||
return self.min(self.x, y)
|
||||
|
||||
|
||||
class TwoTensorsMinimum(Cell):
|
||||
def __init__(self):
|
||||
super(TwoTensorsMinimum, self).__init__()
|
||||
self.min = P.Minimum()
|
||||
|
||||
def construct(self, x, y):
|
||||
return self.min(x, y)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_cpu_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_minimum_constScalar_tensor_int():
|
||||
x = Tensor(np.array([[2, 3, 4], [100, 200, 300]]).astype(np.int32))
|
||||
expect = [[2, 3, 4], [20, 20, 20]]
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
|
||||
min_op = ConstScalarAndTensorMinimum()
|
||||
output = min_op(x)
|
||||
assert np.all(output.asnumpy() == expect)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_cpu_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_minimum_two_tensors_Not_Broadcast_int():
|
||||
prop = 100 if np.random.random() > 0.5 else -100
|
||||
x = np.random.randn(3, 4, 5).astype(np.int32) * prop
|
||||
y = np.random.randn(3, 4, 5).astype(np.int32) * prop
|
||||
expect = np.minimum(x, y).astype(np.int32)
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
|
||||
min_op = TwoTensorsMinimum()
|
||||
output = min_op(Tensor(x), Tensor(y))
|
||||
assert np.all(output.asnumpy() == expect)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_cpu_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_minimum_two_tensors_Broadcast_int():
|
||||
prop = 100 if np.random.random() > 0.5 else -100
|
||||
x = np.random.randn(3, 4, 5).astype(np.int32) * prop
|
||||
y = np.random.randn(3, 1, 1).astype(np.int32) * prop
|
||||
expect = np.minimum(x, y).astype(np.int32)
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
|
||||
min_op = TwoTensorsMinimum()
|
||||
output = min_op(Tensor(x), Tensor(y))
|
||||
assert np.all(output.asnumpy() == expect)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_cpu_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_minimum_two_tensors_Broadcast_oneDimension_int():
|
||||
prop = 100 if np.random.random() > 0.5 else -100
|
||||
x = np.random.randn(3).astype(np.int32) * prop
|
||||
y = np.random.randn(3).astype(np.int32) * prop
|
||||
expect = np.minimum(x, y).astype(np.int32)
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
|
||||
min_op = TwoTensorsMinimum()
|
||||
output = min_op(Tensor(x), Tensor(y))
|
||||
assert np.all(output.asnumpy() == expect)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_cpu_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_minimum_two_tensors_notBroadcast_all_oneDimension_int():
|
||||
x = Tensor(np.array([[2]]).astype(np.int32))
|
||||
y = Tensor(np.array([[100]]).astype(np.int32))
|
||||
expect = [[2]]
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
|
||||
min_op = TwoTensorsMinimum()
|
||||
output = min_op(x, y)
|
||||
assert np.all(output.asnumpy() == expect)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_cpu_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_minimum_two_tensors_notBroadcast_float32():
|
||||
prop = 100 if np.random.random() > 0.5 else -100
|
||||
x = np.random.randn(3, 4, 5).astype(np.float32) * prop
|
||||
y = np.random.randn(3, 4, 5).astype(np.float32) * prop
|
||||
expect = np.minimum(x, y).astype(np.float32)
|
||||
error = np.ones(shape=expect.shape) * 1.0e-5
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
|
||||
min_op = TwoTensorsMinimum()
|
||||
output = min_op(Tensor(x), Tensor(y))
|
||||
diff = output.asnumpy() - expect
|
||||
assert np.all(np.abs(diff) < error)
|
||||
assert output.shape == expect.shape
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_cpu_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_minimum_two_tensors_notBroadcast_float16():
|
||||
prop = 100 if np.random.random() > 0.5 else -100
|
||||
x = np.random.randn(3, 4, 5).astype(np.float16) * prop
|
||||
y = np.random.randn(3, 4, 5).astype(np.float16) * prop
|
||||
expect = np.minimum(x, y).astype(np.float16)
|
||||
error = np.ones(shape=expect.shape) * 1.0e-5
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
|
||||
min_op = TwoTensorsMinimum()
|
||||
output = min_op(Tensor(x), Tensor(y))
|
||||
diff = output.asnumpy() - expect
|
||||
assert np.all(np.abs(diff) < error)
|
||||
assert output.shape == expect.shape
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_cpu_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_minimum_two_tensors_Broadcast_float16():
|
||||
prop = 100 if np.random.random() > 0.5 else -100
|
||||
x = np.random.randn(3, 4, 5).astype(np.float16) * prop
|
||||
y = np.random.randn(3, 4, 1).astype(np.float16) * prop
|
||||
expect = np.minimum(x, y).astype(np.float16)
|
||||
error = np.ones(shape=expect.shape) * 1.0e-5
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
|
||||
min_op = TwoTensorsMinimum()
|
||||
output = min_op(Tensor(x), Tensor(y))
|
||||
diff = output.asnumpy() - expect
|
||||
assert np.all(np.abs(diff) < error)
|
||||
assert output.shape == expect.shape
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_cpu_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_minimum_two_tensors_notBroadcast_float64():
|
||||
prop = 100 if np.random.random() > 0.5 else -100
|
||||
x = np.random.randn(3, 4, 1).astype(np.float64) * prop
|
||||
y = np.random.randn(3, 4, 5).astype(np.float64) * prop
|
||||
expect = np.minimum(x, y).astype(np.float64)
|
||||
error = np.ones(shape=expect.shape) * 1.0e-5
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
|
||||
min_op = TwoTensorsMinimum()
|
||||
output = min_op(Tensor(x), Tensor(y))
|
||||
diff = output.asnumpy() - expect
|
||||
assert np.all(np.abs(diff) < error)
|
||||
assert output.shape == expect.shape
|
Loading…
Reference in new issue