!9559 add DynamicAssign, PadAndShift cpu ops
From: @fangzehua Reviewed-by: Signed-off-by:pull/9559/MERGE
commit
6b4abe8514
@ -0,0 +1,89 @@
|
|||||||
|
/**
|
||||||
|
* Copyright 2020 Huawei Technologies Co., Ltd
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
#include <algorithm>
|
||||||
|
#include "backend/kernel_compiler/cpu/dynamic_assign_cpu_kernel.h"
|
||||||
|
#include "runtime/device/cpu/cpu_device_address.h"
|
||||||
|
|
||||||
|
namespace mindspore {
|
||||||
|
namespace kernel {
|
||||||
|
void DynamicAssignCPUKernel::InitKernel(const CNodePtr &kernel_node) {
|
||||||
|
node_ = kernel_node;
|
||||||
|
input_x_dtype_ = AnfAlgo::GetPrevNodeOutputInferDataType(kernel_node, 0);
|
||||||
|
input_x_dtype_size_ = GetTypeByte(TypeIdToType(input_x_dtype_));
|
||||||
|
}
|
||||||
|
|
||||||
|
bool DynamicAssignCPUKernel::Launch(const std::vector<kernel::AddressPtr> &inputs,
|
||||||
|
const std::vector<kernel::AddressPtr> & /*workspace*/,
|
||||||
|
const std::vector<kernel::AddressPtr> &outputs) {
|
||||||
|
if (input_x_dtype_ == kNumberTypeInt32) {
|
||||||
|
LaunchKernel<int>(inputs, outputs);
|
||||||
|
} else if (input_x_dtype_ == kNumberTypeInt64) {
|
||||||
|
LaunchKernel<int64_t>(inputs, outputs);
|
||||||
|
} else if (input_x_dtype_ == kNumberTypeFloat32) {
|
||||||
|
LaunchKernel<float>(inputs, outputs);
|
||||||
|
} else if (input_x_dtype_ == kNumberTypeFloat64) {
|
||||||
|
LaunchKernel<double>(inputs, outputs);
|
||||||
|
} else {
|
||||||
|
MS_LOG(ERROR) << "Dtype of indices only support float32, float64, int32, int64";
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
void DynamicAssignCPUKernel::LaunchKernel(const std::vector<AddressPtr> &inputs,
|
||||||
|
const std::vector<kernel::AddressPtr> &outputs) {
|
||||||
|
auto input_x_shape = AnfAlgo::GetPrevNodeOutputInferShape(node_, 0);
|
||||||
|
auto input_y_shape = AnfAlgo::GetPrevNodeOutputInferShape(node_, 1);
|
||||||
|
batch_size_ = 1;
|
||||||
|
for (size_t i = 0; i < input_x_shape.size(); ++i) {
|
||||||
|
batch_size_ *= input_x_shape[i];
|
||||||
|
}
|
||||||
|
|
||||||
|
if (input_x_shape.size() != input_y_shape.size()) MS_LOG(EXCEPTION) << "x y must be same shape";
|
||||||
|
for (size_t i = 0; i < input_x_shape.size(); ++i) {
|
||||||
|
if (input_x_shape[i] != input_y_shape[i]) {
|
||||||
|
MS_LOG(EXCEPTION) << "x y must be same shape";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
T *input_x = reinterpret_cast<T *>(inputs[0]->addr);
|
||||||
|
T *input_y = reinterpret_cast<T *>(inputs[1]->addr);
|
||||||
|
auto max_size = inputs[0]->size;
|
||||||
|
size_t total_size = input_x_dtype_size_ * batch_size_;
|
||||||
|
if (total_size > max_size) {
|
||||||
|
MS_LOG(EXCEPTION) << "Memcpy size must <= max_size, but got memcpy size is : " << total_size
|
||||||
|
<< ", max size is : " << max_size;
|
||||||
|
}
|
||||||
|
int ret = memcpy_s(input_x, total_size, input_y, total_size);
|
||||||
|
if (ret != 0) {
|
||||||
|
MS_LOG(EXCEPTION) << "Memcpy_s error, errorno" << ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
auto node_with_idx = AnfAlgo::GetPrevNodeOutput(node_, 0);
|
||||||
|
auto node = node_with_idx.first;
|
||||||
|
if (node->isa<Parameter>()) {
|
||||||
|
auto node_ptr = node->cast<ParameterPtr>();
|
||||||
|
auto value = node_ptr->default_param();
|
||||||
|
auto tensor = value->cast<std::shared_ptr<tensor::Tensor>>();
|
||||||
|
ShapeVector shape_tmp;
|
||||||
|
(void)std::transform(input_x_shape.begin(), input_x_shape.end(), std::back_inserter(shape_tmp), SizeToLong);
|
||||||
|
tensor->set_shape(shape_tmp);
|
||||||
|
} else {
|
||||||
|
MS_LOG(EXCEPTION) << "Input x must be a Parameter.";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} // namespace kernel
|
||||||
|
} // namespace mindspore
|
@ -0,0 +1,69 @@
|
|||||||
|
/**
|
||||||
|
* Copyright 2020 Huawei Technologies Co., Ltd
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_DYNAMIC_ASSIGN_CPU_KERNEL_H_
|
||||||
|
#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_DYNAMIC_ASSIGN_CPU_KERNEL_H_
|
||||||
|
|
||||||
|
#include <vector>
|
||||||
|
#include <memory>
|
||||||
|
#include <unordered_map>
|
||||||
|
#include "backend/kernel_compiler/cpu/cpu_kernel.h"
|
||||||
|
#include "backend/kernel_compiler/cpu/cpu_kernel_factory.h"
|
||||||
|
|
||||||
|
namespace mindspore {
|
||||||
|
namespace kernel {
|
||||||
|
class DynamicAssignCPUKernel : public CPUKernel {
|
||||||
|
public:
|
||||||
|
DynamicAssignCPUKernel() = default;
|
||||||
|
~DynamicAssignCPUKernel() override = default;
|
||||||
|
|
||||||
|
void InitKernel(const CNodePtr &kernel_node) override;
|
||||||
|
|
||||||
|
bool Launch(const std::vector<AddressPtr> &inputs, const std::vector<AddressPtr> &workspace,
|
||||||
|
const std::vector<AddressPtr> &outputs) override;
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
void LaunchKernel(const std::vector<AddressPtr> &inputs, const std::vector<kernel::AddressPtr> &outputs);
|
||||||
|
|
||||||
|
private:
|
||||||
|
size_t batch_size_{1};
|
||||||
|
TypeId input_x_dtype_{kTypeUnknown};
|
||||||
|
size_t input_x_dtype_size_ = 4;
|
||||||
|
CNodePtr node_ = nullptr;
|
||||||
|
};
|
||||||
|
|
||||||
|
MS_REG_CPU_KERNEL(
|
||||||
|
DynamicAssign,
|
||||||
|
KernelAttr().AddInputAttr(kNumberTypeInt32).AddInputAttr(kNumberTypeInt32).AddOutputAttr(kNumberTypeInt32),
|
||||||
|
DynamicAssignCPUKernel);
|
||||||
|
|
||||||
|
MS_REG_CPU_KERNEL(
|
||||||
|
DynamicAssign,
|
||||||
|
KernelAttr().AddInputAttr(kNumberTypeInt64).AddInputAttr(kNumberTypeInt64).AddOutputAttr(kNumberTypeInt64),
|
||||||
|
DynamicAssignCPUKernel);
|
||||||
|
|
||||||
|
MS_REG_CPU_KERNEL(
|
||||||
|
DynamicAssign,
|
||||||
|
KernelAttr().AddInputAttr(kNumberTypeFloat32).AddInputAttr(kNumberTypeFloat32).AddOutputAttr(kNumberTypeFloat32),
|
||||||
|
DynamicAssignCPUKernel);
|
||||||
|
|
||||||
|
MS_REG_CPU_KERNEL(
|
||||||
|
DynamicAssign,
|
||||||
|
KernelAttr().AddInputAttr(kNumberTypeFloat64).AddInputAttr(kNumberTypeFloat64).AddOutputAttr(kNumberTypeFloat64),
|
||||||
|
DynamicAssignCPUKernel);
|
||||||
|
} // namespace kernel
|
||||||
|
} // namespace mindspore
|
||||||
|
|
||||||
|
#endif // MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_DYNAMIC_ASSIGN_CPU_KERNEL_H_
|
@ -0,0 +1,87 @@
|
|||||||
|
/**
|
||||||
|
* Copyright 2020 Huawei Technologies Co., Ltd
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "backend/kernel_compiler/cpu/pad_and_shift_cpu_kernel.h"
|
||||||
|
#include <string>
|
||||||
|
#include "runtime/device/cpu/cpu_device_address.h"
|
||||||
|
|
||||||
|
namespace mindspore {
|
||||||
|
namespace kernel {
|
||||||
|
void PadAndShiftCPUKernel::InitKernel(const CNodePtr &kernel_node) {
|
||||||
|
MS_EXCEPTION_IF_NULL(kernel_node);
|
||||||
|
node_ = kernel_node;
|
||||||
|
input_x_dtype_ = AnfAlgo::GetPrevNodeOutputInferDataType(kernel_node, 0);
|
||||||
|
type_size_ = GetTypeByte(TypeIdToType(input_x_dtype_));
|
||||||
|
auto indices_shape = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0);
|
||||||
|
batch_size_ = 1;
|
||||||
|
for (size_t i = 0; i < indices_shape.size(); ++i) {
|
||||||
|
batch_size_ *= indices_shape[i];
|
||||||
|
}
|
||||||
|
MS_LOG(INFO) << "PadAndShift batch_size:" << batch_size_;
|
||||||
|
auto cum_sum_arr_shape = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 1);
|
||||||
|
if (cum_sum_arr_shape.size() != 1) {
|
||||||
|
MS_LOG(ERROR) << "The shape of cum_sum_arr must be 1.";
|
||||||
|
}
|
||||||
|
cum_sum_size_ = cum_sum_arr_shape[0];
|
||||||
|
}
|
||||||
|
|
||||||
|
bool PadAndShiftCPUKernel::Launch(const std::vector<kernel::AddressPtr> &inputs,
|
||||||
|
const std::vector<kernel::AddressPtr> & /*workspace*/,
|
||||||
|
const std::vector<kernel::AddressPtr> &outputs) {
|
||||||
|
if (input_x_dtype_ == kNumberTypeInt32) {
|
||||||
|
LaunchKernel<int>(inputs, outputs);
|
||||||
|
} else if (input_x_dtype_ == kNumberTypeInt64) {
|
||||||
|
LaunchKernel<int64_t>(inputs, outputs);
|
||||||
|
} else {
|
||||||
|
MS_LOG(ERROR) << "Dtype of input_x only support int32, int64";
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
void PadAndShiftCPUKernel::LaunchKernel(const std::vector<AddressPtr> &inputs,
|
||||||
|
const std::vector<kernel::AddressPtr> &outputs) {
|
||||||
|
T *input_x = reinterpret_cast<T *>(inputs[0]->addr);
|
||||||
|
T *cum_sum_arr = reinterpret_cast<T *>(inputs[1]->addr);
|
||||||
|
T shift_idx = *reinterpret_cast<T *>(inputs[2]->addr);
|
||||||
|
T *output = reinterpret_cast<T *>(outputs[0]->addr);
|
||||||
|
|
||||||
|
if (shift_idx >= static_cast<T>(cum_sum_size_)) {
|
||||||
|
MS_LOG(EXCEPTION) << "Shift index must small than cumsum size.";
|
||||||
|
}
|
||||||
|
size_t output_size = cum_sum_arr[cum_sum_size_ - 1];
|
||||||
|
T shift_size = cum_sum_arr[shift_idx];
|
||||||
|
T valid_size = cum_sum_arr[shift_idx + 1] - shift_size;
|
||||||
|
int ret = memset_s(output, outputs[0]->size, -1, type_size_ * output_size);
|
||||||
|
if (ret != 0) {
|
||||||
|
MS_LOG(EXCEPTION) << "memset_s error, errorno" << ret;
|
||||||
|
}
|
||||||
|
ret = memcpy_s(output + shift_size, valid_size * type_size_, input_x, valid_size * type_size_);
|
||||||
|
if (ret != 0) {
|
||||||
|
MS_LOG(EXCEPTION) << "memcpy_s error, errorno" << ret;
|
||||||
|
}
|
||||||
|
std::vector<size_t> out_shape;
|
||||||
|
out_shape.emplace_back(output_size);
|
||||||
|
std::vector<TypeId> dtypes;
|
||||||
|
auto output_nums = AnfAlgo::GetOutputTensorNum(node_);
|
||||||
|
for (size_t i = 0; i < output_nums; i++) {
|
||||||
|
dtypes.push_back(AnfAlgo::GetOutputInferDataType(node_, i));
|
||||||
|
}
|
||||||
|
AnfAlgo::SetOutputInferTypeAndShape(dtypes, {out_shape}, node_.get());
|
||||||
|
}
|
||||||
|
} // namespace kernel
|
||||||
|
} // namespace mindspore
|
@ -0,0 +1,66 @@
|
|||||||
|
/**
|
||||||
|
* Copyright 2020 Huawei Technologies Co., Ltd
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_PAD_AND_SHIFT_CPU_KERNEL_H_
|
||||||
|
#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_PAD_AND_SHIFT_CPU_KERNEL_H_
|
||||||
|
|
||||||
|
#include <vector>
|
||||||
|
#include <memory>
|
||||||
|
#include <unordered_map>
|
||||||
|
#include "backend/kernel_compiler/cpu/cpu_kernel.h"
|
||||||
|
#include "backend/kernel_compiler/cpu/cpu_kernel_factory.h"
|
||||||
|
|
||||||
|
namespace mindspore {
|
||||||
|
namespace kernel {
|
||||||
|
class PadAndShiftCPUKernel : public CPUKernel {
|
||||||
|
public:
|
||||||
|
PadAndShiftCPUKernel() = default;
|
||||||
|
~PadAndShiftCPUKernel() override = default;
|
||||||
|
|
||||||
|
void InitKernel(const CNodePtr &kernel_node) override;
|
||||||
|
|
||||||
|
bool Launch(const std::vector<AddressPtr> &inputs, const std::vector<AddressPtr> &workspace,
|
||||||
|
const std::vector<AddressPtr> &outputs) override;
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
void LaunchKernel(const std::vector<AddressPtr> &inputs, const std::vector<kernel::AddressPtr> &outputs);
|
||||||
|
|
||||||
|
private:
|
||||||
|
size_t batch_size_{1};
|
||||||
|
size_t cum_sum_size_{1};
|
||||||
|
size_t type_size_{4};
|
||||||
|
TypeId input_x_dtype_{kTypeUnknown};
|
||||||
|
CNodePtr node_ = nullptr;
|
||||||
|
};
|
||||||
|
|
||||||
|
MS_REG_CPU_KERNEL(PadAndShift,
|
||||||
|
KernelAttr()
|
||||||
|
.AddInputAttr(kNumberTypeInt32)
|
||||||
|
.AddInputAttr(kNumberTypeInt32)
|
||||||
|
.AddInputAttr(kNumberTypeInt32)
|
||||||
|
.AddOutputAttr(kNumberTypeInt32),
|
||||||
|
PadAndShiftCPUKernel);
|
||||||
|
|
||||||
|
MS_REG_CPU_KERNEL(PadAndShift,
|
||||||
|
KernelAttr()
|
||||||
|
.AddInputAttr(kNumberTypeInt64)
|
||||||
|
.AddInputAttr(kNumberTypeInt64)
|
||||||
|
.AddInputAttr(kNumberTypeInt64)
|
||||||
|
.AddOutputAttr(kNumberTypeInt64),
|
||||||
|
PadAndShiftCPUKernel);
|
||||||
|
} // namespace kernel
|
||||||
|
} // namespace mindspore
|
||||||
|
|
||||||
|
#endif // MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_PAD_AND_SHIFT_CPU_KERNEL_H_
|
@ -0,0 +1,33 @@
|
|||||||
|
# Copyright 2020 Huawei Technologies Co., Ltd
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
# ============================================================================
|
||||||
|
|
||||||
|
"""PadAndShift op"""
|
||||||
|
from mindspore.ops.op_info_register import op_info_register, AiCPURegOp, DataType
|
||||||
|
|
||||||
|
pad_and_shift_op_info = AiCPURegOp("PadAndShift") \
|
||||||
|
.fusion_type("OPAQUE") \
|
||||||
|
.input(0, "x", "required") \
|
||||||
|
.input(1, "cum_sum_arr", "required") \
|
||||||
|
.input(2, "shift_idx", "required") \
|
||||||
|
.output(0, "output", "required") \
|
||||||
|
.dtype_format(DataType.I32_Default, DataType.I32_Default, DataType.I32_Default, DataType.I32_Default) \
|
||||||
|
.dtype_format(DataType.I64_Default, DataType.I64_Default, DataType.I64_Default, DataType.I64_Default) \
|
||||||
|
.get_op_info()
|
||||||
|
|
||||||
|
|
||||||
|
@op_info_register(pad_and_shift_op_info)
|
||||||
|
def _pad_and_shift_aicpu():
|
||||||
|
"""PadAndShift AiCPU register"""
|
||||||
|
return
|
@ -0,0 +1,36 @@
|
|||||||
|
# Copyright 2020 Huawei Technologies Co., Ltd
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
# ============================================================================
|
||||||
|
|
||||||
|
"""SubAndFilter op"""
|
||||||
|
from mindspore.ops.op_info_register import op_info_register, AiCPURegOp, DataType
|
||||||
|
|
||||||
|
sub_and_filter_op_info = AiCPURegOp("SubAndFilter") \
|
||||||
|
.fusion_type("OPAQUE") \
|
||||||
|
.input(0, "x", "required") \
|
||||||
|
.input(1, "max_num", "required") \
|
||||||
|
.input(2, "offset", "required") \
|
||||||
|
.output(0, "filter_res", "required") \
|
||||||
|
.output(1, "filter_idx", "required") \
|
||||||
|
.dtype_format(DataType.I32_Default, DataType.I32_Default,
|
||||||
|
DataType.I32_Default, DataType.I32_Default, DataType.I32_Default) \
|
||||||
|
.dtype_format(DataType.I64_Default, DataType.I64_Default,
|
||||||
|
DataType.I64_Default, DataType.I64_Default, DataType.I64_Default) \
|
||||||
|
.get_op_info()
|
||||||
|
|
||||||
|
|
||||||
|
@op_info_register(sub_and_filter_op_info)
|
||||||
|
def _sub_and_filter_aicpu():
|
||||||
|
"""SubAndFilter AiCPU register"""
|
||||||
|
return
|
@ -0,0 +1,49 @@
|
|||||||
|
# Copyright 2020 Huawei Technologies Co., Ltd
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
# ============================================================================
|
||||||
|
import numpy as np
|
||||||
|
import pytest
|
||||||
|
import mindspore.context as context
|
||||||
|
import mindspore.nn as nn
|
||||||
|
from mindspore import Tensor, Parameter
|
||||||
|
import mindspore.common.dtype as mstype
|
||||||
|
from mindspore.ops import operations as P
|
||||||
|
|
||||||
|
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
|
||||||
|
|
||||||
|
|
||||||
|
class Net(nn.Cell):
|
||||||
|
def __init__(self):
|
||||||
|
super(Net, self).__init__()
|
||||||
|
self.unique = P.Unique()
|
||||||
|
self.dynamic_assign = P.DynamicAssign()
|
||||||
|
self.param = Parameter(
|
||||||
|
Tensor(np.zeros((5,), np.int32)), name="assign_x")
|
||||||
|
|
||||||
|
def construct(self, y):
|
||||||
|
y, _ = self.unique(y)
|
||||||
|
return self.dynamic_assign(self.param, y)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.level0
|
||||||
|
@pytest.mark.platform_arm_ascend_training
|
||||||
|
@pytest.mark.platform_x86_ascend_training
|
||||||
|
@pytest.mark.env_onecard
|
||||||
|
def test_dynamic_assign():
|
||||||
|
y = Tensor(np.array([2, 2, 3, 3, 4]), mstype.int32)
|
||||||
|
dynamic_assign = Net()
|
||||||
|
_ = dynamic_assign(y)
|
||||||
|
expect1 = np.array([2, 3, 4])
|
||||||
|
param_np = dynamic_assign.param.data.asnumpy()
|
||||||
|
assert (param_np == expect1).all()
|
@ -0,0 +1,46 @@
|
|||||||
|
# Copyright 2020 Huawei Technologies Co., Ltd
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
# ============================================================================
|
||||||
|
import numpy as np
|
||||||
|
import pytest
|
||||||
|
import mindspore.context as context
|
||||||
|
import mindspore.nn as nn
|
||||||
|
from mindspore import Tensor
|
||||||
|
import mindspore.common.dtype as mstype
|
||||||
|
from mindspore.ops import operations as P
|
||||||
|
|
||||||
|
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
|
||||||
|
|
||||||
|
|
||||||
|
class Net(nn.Cell):
|
||||||
|
def __init__(self):
|
||||||
|
super(Net, self).__init__()
|
||||||
|
self.pad_and_shift = P.PadAndShift()
|
||||||
|
self.shift_idx = 1
|
||||||
|
|
||||||
|
def construct(self, x, y):
|
||||||
|
return self.pad_and_shift(x, y, self.shift_idx)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.level0
|
||||||
|
@pytest.mark.platform_arm_ascend_training
|
||||||
|
@pytest.mark.platform_x86_ascend_training
|
||||||
|
@pytest.mark.env_onecard
|
||||||
|
def test_pad_and_shift_cpu():
|
||||||
|
x = Tensor(np.array([9, 13, -1, -1, -1, -1, -1, -1]), mstype.int32)
|
||||||
|
y = Tensor(np.array([0, 3, 5]), mstype.int32)
|
||||||
|
net = Net()
|
||||||
|
output = net(x, y)
|
||||||
|
expect = np.array([-1, -1, -1, 9, 13])
|
||||||
|
assert (output.asnumpy() == expect).all()
|
Loading…
Reference in new issue