update Pool's attr kernel_size, pad_mode

pull/11332/head
yuchaojie 5 years ago
parent dfa6daaa57
commit b51b3a6764

File diff suppressed because one or more lines are too long

@ -30,14 +30,13 @@ using mindspore::kernel::AddressPtr;
using CTask = std::function<void(size_t, size_t)>;
namespace mindspore {
namespace kernel {
const char KSIZE[] = "ksize";
const char KERNEL_SIZE[] = "kernel_size";
const char STRIDE[] = "stride";
const char STRIDES[] = "strides";
const char DILATION[] = "dilation";
const char PAD[] = "pad";
const char PAD_LIST[] = "pad_list";
const char PAD_MODE[] = "pad_mode";
const char PADDING[] = "padding";
const char PAD_MODE_LOWER_SAME[] = "same";
const char PAD_MODE_LOWER_VALID[] = "valid";
const char PAD_MODE_UPPER_SAME[] = "SAME";

@ -31,7 +31,7 @@ void AvgPoolingGradCPUKernel::InitKernel(const CNodePtr &kernel_node) {
dnnl::memory::desc dst_desc = GetDefaultMemDesc(dst_shape);
std::vector<int> origin_kernel_sizes;
std::vector<int> strides;
std::vector<int64_t> kernel_sizes_me = AnfAlgo::GetNodeAttr<std::vector<int64_t>>(kernel_node, KSIZE);
std::vector<int64_t> kernel_sizes_me = AnfAlgo::GetNodeAttr<std::vector<int64_t>>(kernel_node, KERNEL_SIZE);
std::vector<int64_t> strides_me = AnfAlgo::GetNodeAttr<std::vector<int64_t>>(kernel_node, STRIDES);
(void)std::transform(kernel_sizes_me.begin(), kernel_sizes_me.end(), std::back_inserter(origin_kernel_sizes),
[](const int64_t &value) { return static_cast<int>(value); });
@ -42,7 +42,7 @@ void AvgPoolingGradCPUKernel::InitKernel(const CNodePtr &kernel_node) {
}
dnnl::memory::dims strides_dims{strides[2], strides[3]};
dnnl::memory::dims kernels_dims{origin_kernel_sizes[2], origin_kernel_sizes[3]};
const std::string pad_mode = AnfAlgo::GetNodeAttr<std::string>(kernel_node, PADDING);
const std::string pad_mode = AnfAlgo::GetNodeAttr<std::string>(kernel_node, PAD_MODE);
std::vector<int> int_padding_l;
std::vector<int> int_padding_r;
std::vector<size_t> kernel_size({IntToSize(origin_kernel_sizes[2]), IntToSize(origin_kernel_sizes[3])});

@ -30,7 +30,7 @@ void PoolingCPUKernel::InitKernel(const CNodePtr &kernel_node) {
dnnl::memory::desc dst_desc = GetDefaultMemDesc(dst_shape);
std::vector<int> origin_kernel_sizes;
std::vector<int> strides;
std::vector<int64_t> kernel_sizes_me = AnfAlgo::GetNodeAttr<std::vector<int64_t>>(kernel_node, KSIZE);
std::vector<int64_t> kernel_sizes_me = AnfAlgo::GetNodeAttr<std::vector<int64_t>>(kernel_node, KERNEL_SIZE);
std::vector<int64_t> strides_me = AnfAlgo::GetNodeAttr<std::vector<int64_t>>(kernel_node, STRIDES);
(void)std::transform(kernel_sizes_me.begin(), kernel_sizes_me.end(), std::back_inserter(origin_kernel_sizes),
[](const int64_t &value) { return static_cast<int>(value); });
@ -41,7 +41,7 @@ void PoolingCPUKernel::InitKernel(const CNodePtr &kernel_node) {
}
dnnl::memory::dims strides_dims{strides[2], strides[3]};
dnnl::memory::dims kernels_dims{origin_kernel_sizes[2], origin_kernel_sizes[3]};
const std::string pad_mode = AnfAlgo::GetNodeAttr<std::string>(kernel_node, PADDING);
const std::string pad_mode = AnfAlgo::GetNodeAttr<std::string>(kernel_node, PAD_MODE);
std::vector<int> int_padding_l;
std::vector<int> int_padding_r;
std::vector<size_t> kernel_size({IntToSize(origin_kernel_sizes[2]), IntToSize(origin_kernel_sizes[3])});

@ -29,7 +29,7 @@ void MaxPoolingGradCPUKernel::InitKernel(const CNodePtr &kernel_node) {
dst_shape_ = AnfAlgo::GetInputDeviceShape(kernel_node, 1);
std::vector<int> kernel_sizes;
std::vector<int> strides;
auto kernel_sizes_me = AnfAlgo::GetNodeAttr<std::vector<int64_t>>(kernel_node, KSIZE);
auto kernel_sizes_me = AnfAlgo::GetNodeAttr<std::vector<int64_t>>(kernel_node, KERNEL_SIZE);
auto strides_me = AnfAlgo::GetNodeAttr<std::vector<int64_t>>(kernel_node, STRIDES);
(void)std::transform(kernel_sizes_me.begin(), kernel_sizes_me.end(), std::back_inserter(kernel_sizes),
[](const int64_t &value) { return static_cast<int>(value); });
@ -39,7 +39,7 @@ void MaxPoolingGradCPUKernel::InitKernel(const CNodePtr &kernel_node) {
MS_LOG(EXCEPTION) << "pooling grad invalid input size";
}
std::vector<int> padding_r;
const std::string pad_mode = AnfAlgo::GetNodeAttr<std::string>(kernel_node, PADDING);
const std::string pad_mode = AnfAlgo::GetNodeAttr<std::string>(kernel_node, PAD_MODE);
kernel_size_ = {IntToSize(kernel_sizes[2]), IntToSize(kernel_sizes[3])};
stride_ = strides[3];
GetPadding(kernel_node, pad_mode, src_shape_, kernel_size_, stride_, &padding_l_, &padding_r);

@ -92,7 +92,7 @@ class MaxPoolWithArgmaxGpuFwdKernel : public GpuKernel {
output_width_ = SizeToInt(output_shape[3]);
std::vector<int> window;
std::vector<int64_t> window_me =
GetValue<std::vector<int64_t>>(AnfAlgo::GetCNodePrimitive(kernel_node)->GetAttr("ksize"));
GetValue<std::vector<int64_t>>(AnfAlgo::GetCNodePrimitive(kernel_node)->GetAttr("kernel_size"));
(void)std::transform(window_me.begin(), window_me.end(), std::back_inserter(window),
[](const int64_t &value) { return static_cast<int>(value); });
window_height_ = window[1];
@ -104,7 +104,7 @@ class MaxPoolWithArgmaxGpuFwdKernel : public GpuKernel {
[](const int64_t &value) { return static_cast<int>(value); });
stride_height_ = stride[1];
stride_width_ = stride[2];
pad_mode_ = GetValue<std::string>(AnfAlgo::GetCNodePrimitive(kernel_node)->GetAttr("padding"));
pad_mode_ = GetValue<std::string>(AnfAlgo::GetCNodePrimitive(kernel_node)->GetAttr("pad_mode"));
pad_top_ = 0;
pad_left_ = 0;
if (pad_mode_ == kSamePadModeUpperCase || pad_mode_ == kSamePadModeLowerCase) {

@ -169,10 +169,10 @@ class PoolingGpuFwdKernel : public GpuKernel {
}
}
void SetPad(const CNodePtr &kernel_node) {
pad_mode_ = GetValue<std::string>(AnfAlgo::GetCNodePrimitive(kernel_node)->GetAttr("padding"));
pad_mode_ = GetValue<std::string>(AnfAlgo::GetCNodePrimitive(kernel_node)->GetAttr("pad_mode"));
std::vector<int> window;
std::vector<int64_t> window_me =
GetValue<std::vector<int64_t>>(AnfAlgo::GetCNodePrimitive(kernel_node)->GetAttr("ksize"));
GetValue<std::vector<int64_t>>(AnfAlgo::GetCNodePrimitive(kernel_node)->GetAttr("kernel_size"));
(void)std::transform(window_me.begin(), window_me.end(), std::back_inserter(window),
[](const int64_t &value) { return static_cast<int>(value); });
int window_height = window[2];

@ -204,10 +204,10 @@ class PoolingGradGpuKernel : public GpuKernel {
return true;
}
void SetPad(const CNodePtr &kernel_node) {
pad_mode_ = GetAttr<std::string>(kernel_node, "padding");
pad_mode_ = GetAttr<std::string>(kernel_node, "pad_mode");
std::vector<int64_t> stride_me = GetAttr<std::vector<int64_t>>(kernel_node, "strides");
std::vector<int> window;
std::vector<int64_t> window_me = GetAttr<std::vector<int64_t>>(kernel_node, "ksize");
std::vector<int64_t> window_me = GetAttr<std::vector<int64_t>>(kernel_node, "kernel_size");
(void)std::transform(stride_me.begin(), stride_me.end(), std::back_inserter(stride_),
[](const int64_t &value) { return static_cast<int>(value); });
(void)std::transform(window_me.begin(), window_me.end(), std::back_inserter(window),

@ -89,7 +89,7 @@ CNodePtr CreateMaxPoolGradWithArgmax(const FuncGraphPtr &graph, const CNodePtr &
void SetNodeAttrs(const CNodePtr &maxpool, const CNodePtr &maxpool_grad, const CNodePtr &maxpool_argmax,
const CNodePtr &maxpool_grad_argmax) {
auto strides = AnfAlgo::GetNodeAttr<std::vector<int64_t>>(maxpool, kAttrStrides);
auto ksize = AnfAlgo::GetNodeAttr<std::vector<int64_t>>(maxpool, kAttrKsize);
auto ksize = AnfAlgo::GetNodeAttr<std::vector<int64_t>>(maxpool, kAttrKernelSize);
if (strides.size() != kMaxPoolAttrAxisNum) {
MS_LOG(EXCEPTION) << "MaxPool's attr strides has wrong axis number, should be " << kMaxPoolAttrAxisNum
<< ", but got " << strides.size();
@ -110,8 +110,8 @@ void SetNodeAttrs(const CNodePtr &maxpool, const CNodePtr &maxpool_grad, const C
AnfAlgo::CopyNodeAttrs(maxpool_grad, maxpool_grad_argmax);
AnfAlgo::SetNodeAttr(kAttrStrides, MakeValue(strides), maxpool_argmax);
AnfAlgo::SetNodeAttr(kAttrStrides, MakeValue(strides), maxpool_grad_argmax);
AnfAlgo::SetNodeAttr(kAttrKsize, MakeValue(ksize), maxpool_argmax);
AnfAlgo::SetNodeAttr(kAttrKsize, MakeValue(ksize), maxpool_grad_argmax);
AnfAlgo::SetNodeAttr(kAttrKernelSize, MakeValue(ksize), maxpool_argmax);
AnfAlgo::SetNodeAttr(kAttrKernelSize, MakeValue(ksize), maxpool_grad_argmax);
}
} // namespace

@ -61,7 +61,7 @@ const AnfNodePtr MaxPoolWithArgmaxUnifyMindIR::Process(const FuncGraphPtr &graph
MS_EXCEPTION_IF_NULL(maxpool_with_argmax);
TypeId argmax_dtype = kNumberTypeUInt16;
auto ksize = AnfAlgo::GetNodeAttr<std::vector<int64_t>>(maxpool_with_argmax, kAttrKsize);
auto ksize = AnfAlgo::GetNodeAttr<std::vector<int64_t>>(maxpool_with_argmax, kAttrKernelSize);
auto output_shape = AnfAlgo::GetOutputInferShape(maxpool_with_argmax, 0);
auto argmax_shape = output_shape;
if (argmax_shape.size() != 4) {
@ -96,7 +96,7 @@ const AnfNodePtr MaxPoolGradWithArgmaxUnifyMindIR::Process(const FuncGraphPtr &g
MS_EXCEPTION_IF_NULL(tuple_getitem0_anf);
TypeId argmax_dtype = kNumberTypeUInt16;
auto ksize = AnfAlgo::GetNodeAttr<std::vector<int64_t>>(maxpool_grad_with_argmax, kAttrKsize);
auto ksize = AnfAlgo::GetNodeAttr<std::vector<int64_t>>(maxpool_grad_with_argmax, kAttrKernelSize);
auto argmax_shape = AnfAlgo::GetOutputInferShape(tuple_getitem0_anf, 0);
if (argmax_shape.size() != 4) {
MS_LOG(DEBUG) << "argmax's infer shape size not equal 4";

@ -229,22 +229,22 @@ OPERATOR_ONNX_CONVERT_DEFINE(SimpleMean, AveragePool, OpNameInfo())
OPERATOR_ONNX_CONVERT_DEFINE(
MaxPool, MaxPool,
OpNameInfo()
.Attr("ksize", "kernel_shape", onnx::AttributeProto_AttributeType_INTS, SetAttrTupleValueToProto<2>)
.Attr("padding", "auto_pad", onnx::AttributeProto_AttributeType_STRING, SetPoolingPadMode)
.Attr("kernel_size", "kernel_shape", onnx::AttributeProto_AttributeType_INTS, SetAttrTupleValueToProto<2>)
.Attr("pad_mode", "auto_pad", onnx::AttributeProto_AttributeType_STRING, SetPoolingPadMode)
.Attr("strides", "strides", onnx::AttributeProto_AttributeType_INTS, SetAttrTupleValueToProto<2>))
OPERATOR_ONNX_CONVERT_DEFINE(
MaxPoolWithArgmax, MaxPool,
OpNameInfo()
.Attr("ksize", "kernel_shape", onnx::AttributeProto_AttributeType_INTS, SetAttrTupleValueToProto<2>)
.Attr("padding", "auto_pad", onnx::AttributeProto_AttributeType_STRING, SetPoolingPadMode)
.Attr("kernel_size", "kernel_shape", onnx::AttributeProto_AttributeType_INTS, SetAttrTupleValueToProto<2>)
.Attr("pad_mode", "auto_pad", onnx::AttributeProto_AttributeType_STRING, SetPoolingPadMode)
.Attr("strides", "strides", onnx::AttributeProto_AttributeType_INTS, SetAttrTupleValueToProto<2>))
OPERATOR_ONNX_CONVERT_DEFINE(
AvgPool, AveragePool,
OpNameInfo()
.Attr("ksize", "kernel_shape", onnx::AttributeProto_AttributeType_INTS, SetAttrTupleValueToProto<2>)
.Attr("padding", "auto_pad", onnx::AttributeProto_AttributeType_STRING, SetPoolingPadMode)
.Attr("kernel_size", "kernel_shape", onnx::AttributeProto_AttributeType_INTS, SetAttrTupleValueToProto<2>)
.Attr("pad_mode", "auto_pad", onnx::AttributeProto_AttributeType_STRING, SetPoolingPadMode)
.Attr("strides", "strides", onnx::AttributeProto_AttributeType_INTS, SetAttrTupleValueToProto<2>))
OPERATOR_ONNX_CONVERT_DEFINE(GatherV2, Gather, OpNameInfo())

@ -20,54 +20,55 @@
namespace mindspore::transform {
// MaxPool
INPUT_MAP(MaxPool) = {{1, INPUT_DESC(x)}};
ATTR_MAP(MaxPool) = {{"ksize", ATTR_DESC(ksize, AnyTraits<int64_t>(), AnyTraits<std::vector<int64_t>>())},
ATTR_MAP(MaxPool) = {{"kernel_size", ATTR_DESC(ksize, AnyTraits<int64_t>(), AnyTraits<std::vector<int64_t>>())},
{"strides", ATTR_DESC(strides, AnyTraits<int64_t>(), AnyTraits<std::vector<int64_t>>())},
{"padding", ATTR_DESC(padding, AnyTraits<std::string>())},
{"pad_mode", ATTR_DESC(padding, AnyTraits<std::string>())},
{"data_format", ATTR_DESC(data_format, AnyTraits<std::string>())}};
OUTPUT_MAP(MaxPool) = {{0, OUTPUT_DESC(y)}};
REG_ADPT_DESC(MaxPool, kNameMaxPool, ADPT_DESC(MaxPool))
// AvgPool
INPUT_MAP(AvgPool) = {{1, INPUT_DESC(x)}};
ATTR_MAP(AvgPool) = {{"ksize", ATTR_DESC(ksize, AnyTraits<int64_t>(), AnyTraits<std::vector<int64_t>>())},
ATTR_MAP(AvgPool) = {{"kernel_size", ATTR_DESC(ksize, AnyTraits<int64_t>(), AnyTraits<std::vector<int64_t>>())},
{"strides", ATTR_DESC(strides, AnyTraits<int64_t>(), AnyTraits<std::vector<int64_t>>())},
{"padding", ATTR_DESC(padding, AnyTraits<std::string>())},
{"pad_mode", ATTR_DESC(padding, AnyTraits<std::string>())},
{"data_format", ATTR_DESC(data_format, AnyTraits<std::string>())}};
OUTPUT_MAP(AvgPool) = {{0, OUTPUT_DESC(y)}};
REG_ADPT_DESC(AvgPool, kNameAvgPool, ADPT_DESC(AvgPool))
// MaxPoolGrad
INPUT_MAP(MaxPoolGrad) = {{1, INPUT_DESC(x1)}, {2, INPUT_DESC(x2)}, {3, INPUT_DESC(grad)}};
ATTR_MAP(MaxPoolGrad) = {{"ksize", ATTR_DESC(ksize, AnyTraits<int64_t>(), AnyTraits<std::vector<int64_t>>())},
ATTR_MAP(MaxPoolGrad) = {{"kernel_size", ATTR_DESC(ksize, AnyTraits<int64_t>(), AnyTraits<std::vector<int64_t>>())},
{"strides", ATTR_DESC(strides, AnyTraits<int64_t>(), AnyTraits<std::vector<int64_t>>())},
{"padding", ATTR_DESC(padding, AnyTraits<std::string>())},
{"pad_mode", ATTR_DESC(padding, AnyTraits<std::string>())},
{"data_format", ATTR_DESC(data_format, AnyTraits<std::string>())}};
OUTPUT_MAP(MaxPoolGrad) = {{0, OUTPUT_DESC(y)}};
REG_ADPT_DESC(MaxPoolGrad, kNameMaxPoolGrad, ADPT_DESC(MaxPoolGrad))
// avgpoolgrad
INPUT_MAP(AvgPoolGrad) = {{1, INPUT_DESC(orig_input_shape)}, {2, INPUT_DESC(input_grad)}};
ATTR_MAP(AvgPoolGrad) = {{"ksize", ATTR_DESC(ksize, AnyTraits<int64_t>(), AnyTraits<std::vector<int64_t>>())},
ATTR_MAP(AvgPoolGrad) = {{"kernel_size", ATTR_DESC(ksize, AnyTraits<int64_t>(), AnyTraits<std::vector<int64_t>>())},
{"strides", ATTR_DESC(strides, AnyTraits<int64_t>(), AnyTraits<std::vector<int64_t>>())},
{"padding", ATTR_DESC(padding, AnyTraits<std::string>())},
{"pad_mode", ATTR_DESC(padding, AnyTraits<std::string>())},
{"data_format", ATTR_DESC(data_format, AnyTraits<std::string>())}};
OUTPUT_MAP(AvgPoolGrad) = {{0, OUTPUT_DESC(out_grad)}};
REG_ADPT_DESC(AvgPoolGrad, kNameAvgPoolGrad, ADPT_DESC(AvgPoolGrad))
// MaxPoolWithArgmax
INPUT_MAP(MaxPoolWithArgmax) = {{1, INPUT_DESC(x)}};
ATTR_MAP(MaxPoolWithArgmax) = {{"ksize", ATTR_DESC(ksize, AnyTraits<int64_t>(), AnyTraits<std::vector<int64_t>>())},
{"strides", ATTR_DESC(strides, AnyTraits<int64_t>(), AnyTraits<std::vector<int64_t>>())},
{"padding", ATTR_DESC(padding, AnyTraits<std::string>())}};
ATTR_MAP(MaxPoolWithArgmax) = {
{"kernel_size", ATTR_DESC(ksize, AnyTraits<int64_t>(), AnyTraits<std::vector<int64_t>>())},
{"strides", ATTR_DESC(strides, AnyTraits<int64_t>(), AnyTraits<std::vector<int64_t>>())},
{"pad_mode", ATTR_DESC(padding, AnyTraits<std::string>())}};
OUTPUT_MAP(MaxPoolWithArgmax) = {{0, OUTPUT_DESC(y)}, {1, OUTPUT_DESC(argmax)}};
REG_ADPT_DESC(MaxPoolWithArgmax, kNameMaxPoolWithArgmax, ADPT_DESC(MaxPoolWithArgmax))
// MaxPoolGradWithArgmax
INPUT_MAP(MaxPoolGradWithArgmax) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(grad)}, {3, INPUT_DESC(argmax)}};
ATTR_MAP(MaxPoolGradWithArgmax) = {
{"ksize", ATTR_DESC(ksize, AnyTraits<int64_t>(), AnyTraits<std::vector<int64_t>>())},
{"kernel_size", ATTR_DESC(ksize, AnyTraits<int64_t>(), AnyTraits<std::vector<int64_t>>())},
{"strides", ATTR_DESC(strides, AnyTraits<int64_t>(), AnyTraits<std::vector<int64_t>>())},
{"padding", ATTR_DESC(padding, AnyTraits<std::string>())}};
{"pad_mode", ATTR_DESC(padding, AnyTraits<std::string>())}};
OUTPUT_MAP(MaxPoolGradWithArgmax) = {{0, OUTPUT_DESC(y)}};
REG_ADPT_DESC(MaxPoolGradWithArgmax, kNameMaxPoolGradWithArgmax, ADPT_DESC(MaxPoolGradWithArgmax))
} // namespace mindspore::transform

@ -372,7 +372,6 @@ constexpr auto kAttrCompileInfo = "compile_info";
constexpr auto kAttrFusionType = "fusion_type";
constexpr auto kAttrStride = "stride";
constexpr auto kAttrStrides = "strides";
constexpr auto kAttrKsize = "ksize";
constexpr auto kAttrKernelSize = "kernel_size";
constexpr auto kAttrDilation = "dilation";
constexpr auto kAttrPadMode = "pad_mode";

@ -117,9 +117,9 @@ class MaxPool2d(_PoolNd):
def __init__(self, kernel_size=1, stride=1, pad_mode="valid", data_format="NCHW"):
super(MaxPool2d, self).__init__(kernel_size, stride, pad_mode, data_format)
self.max_pool = P.MaxPool(ksize=self.kernel_size,
self.max_pool = P.MaxPool(kernel_size=self.kernel_size,
strides=self.stride,
padding=self.pad_mode,
pad_mode=self.pad_mode,
data_format=self.format)
def construct(self, x):
@ -185,9 +185,9 @@ class MaxPool1d(_PoolNd):
validator.check_int(stride, 1, Rel.GE, "stride", self.cls_name)
self.kernel_size = (1, kernel_size)
self.stride = (1, stride)
self.max_pool = P.MaxPool(ksize=self.kernel_size,
self.max_pool = P.MaxPool(kernel_size=self.kernel_size,
strides=self.stride,
padding=self.pad_mode)
pad_mode=self.pad_mode)
self.shape = F.shape
self.reduce_mean = P.ReduceMean(keep_dims=True)
self.expand = P.ExpandDims()
@ -263,9 +263,9 @@ class AvgPool2d(_PoolNd):
pad_mode="valid",
data_format="NCHW"):
super(AvgPool2d, self).__init__(kernel_size, stride, pad_mode, data_format)
self.avg_pool = P.AvgPool(ksize=self.kernel_size,
self.avg_pool = P.AvgPool(kernel_size=self.kernel_size,
strides=self.stride,
padding=self.pad_mode,
pad_mode=self.pad_mode,
data_format=self.format)
def construct(self, x):
@ -335,9 +335,9 @@ class AvgPool1d(_PoolNd):
super(AvgPool1d, self).__init__(kernel_size, stride, pad_mode)
self.kernel_size = (1, kernel_size)
self.stride = (1, stride)
self.avg_pool = P.AvgPool(ksize=self.kernel_size,
self.avg_pool = P.AvgPool(kernel_size=self.kernel_size,
strides=self.stride,
padding=self.pad_mode)
pad_mode=self.pad_mode)
self.shape = F.shape
self.reduce_mean = P.ReduceMean(keep_dims=True)
self.slice = P.Slice()

@ -189,9 +189,9 @@ def get_bprop_depthwise_conv2d_native(self):
def get_bprop_max_pool_with_argmax(self):
"""Grad definition for `MaxPoolWithArgmax` operation."""
maxpool_grad = G.MaxPoolGradWithArgmax(
ksize=self.ksize,
kernel_size=self.kernel_size,
strides=self.strides,
padding=self.padding)
pad_mode=self.pad_mode)
def bprop(x, out, dout):
dx = maxpool_grad(x, dout[0], out[1])
@ -204,9 +204,9 @@ def get_bprop_max_pool_with_argmax(self):
def get_bprop_max_pool_grad_grad(self):
"""Grad definition for `MaxPoolGrad` operation."""
maxpool_grad_grad = G.MaxPoolGradGrad(
ksize=self.ksize,
kernel_size=self.kernel_size,
strides=self.strides,
padding=self.padding)
pad_mode=self.pad_mode)
def bprop(x1, x2, grad, out, dout):
dx1 = zeros_like(x1)
@ -221,9 +221,9 @@ def get_bprop_max_pool_grad_grad(self):
def get_bprop_max_pool_grad_grad_grad(self):
"""Grad definition for `MaxPoolGradGrad` operation."""
maxpool_grad = G.MaxPoolGrad(
ksize=self.ksize,
kernel_size=self.kernel_size,
strides=self.strides,
padding=self.padding)
pad_mode=self.pad_mode)
def bprop(x1, x2, grad, out, dout):
dx1 = zeros_like(x1)
@ -238,9 +238,9 @@ def get_bprop_max_pool_grad_grad_grad(self):
def get_bprop_max_pool_grad(self):
"""Grad definition for `MaxPool` operation."""
maxpool_grad = G.MaxPoolGrad(
ksize=self.ksize,
kernel_size=self.kernel_size,
strides=self.strides,
padding=self.padding,
pad_mode=self.pad_mode,
data_format=self.format)
def bprop(x, out, dout):
@ -250,7 +250,7 @@ def get_bprop_max_pool_grad(self):
return bprop
def _windowed_output_size(input_size, ksize, stride, padding):
def _windowed_output_size(input_size, ksize, stride, pad_mode):
"""
helper func for AvgPoolGrad
"""
@ -259,11 +259,11 @@ def _windowed_output_size(input_size, ksize, stride, padding):
tmp_pad_need = 0
tmp_pad_before = 0
tmp_pad_after = 0
if padding == 'VALID':
if pad_mode == 'VALID':
tmp_output = (input_size - ksize + stride) // stride
tmp_pad_before = 0
tmp_pad_after = 0
elif padding == 'SAME':
elif pad_mode == 'SAME':
tmp_output = (input_size + stride - 1) // stride
tmp_pad_need = max(0, (tmp_output - 1) * stride + ksize - input_size)
tmp_pad_before = tmp_pad_need // 2
@ -272,7 +272,7 @@ def _windowed_output_size(input_size, ksize, stride, padding):
@constexpr
def _get_mean_matrix(x_shape, ksize, stride, padding, x_dtype):
def _get_mean_matrix(x_shape, ksize, stride, pad_mode, x_dtype):
"""
helper func for AvgPoolGrad.
@ -291,9 +291,9 @@ def _get_mean_matrix(x_shape, ksize, stride, padding, x_dtype):
h_output, w_output = 0, 0
pad_top, pad_bottom, pad_left, pad_right = 0, 0, 0, 0
h_output, pad_top, pad_bottom = _windowed_output_size(h_input, h_ksize,
h_stride, padding)
h_stride, pad_mode)
w_output, pad_left, pad_right = _windowed_output_size(w_input, w_ksize,
w_stride, padding)
w_stride, pad_mode)
output_size = n_output * c_output * h_output * w_output
output_shape = (n_output, c_output, h_output, w_output)
@ -321,7 +321,7 @@ def _get_mean_matrix(x_shape, ksize, stride, padding, x_dtype):
@constexpr
def _get_kernel_matrix(x_shape_nchw, kernel_matrix_shape, padding, x_dtype):
def _get_kernel_matrix(x_shape_nchw, kernel_matrix_shape, pad_mode, x_dtype):
kernel_matrix = np.ones(kernel_matrix_shape)
return Tensor(kernel_matrix, x_dtype)
@ -333,9 +333,9 @@ def get_bprop_avg_pool_grad(self):
# the parameter of AvgPoolGrad in GPU and TBE/CPU is not same
if self.target == "GPU":
avgpool_grad_gpu = G.AvgPoolGradGpu(
ksize=self.ksize,
kernel_size=self.kernel_size,
strides=self.strides,
padding=self.padding,
pad_mode=self.pad_mode,
data_format=self.format)
def bprop_gpu(x, out, dout):
@ -346,9 +346,9 @@ def get_bprop_avg_pool_grad(self):
elif self.target == "CPU":
avgpool_grad_cpu = G.AvgPoolGradCpu(
ksize=self.ksize,
kernel_size=self.kernel_size,
strides=self.strides,
padding=self.padding,
pad_mode=self.pad_mode,
data_format=self.format)
def bprop_cpu(x, out, dout):
@ -359,9 +359,9 @@ def get_bprop_avg_pool_grad(self):
elif self.target == "GE":
avgpool_grad_ge = G.AvgPoolGrad(
ksize=self.ksize,
kernel_size=self.kernel_size,
strides=self.strides,
padding=self.padding)
pad_mode=self.pad_mode)
shape_op = P.Shape()
def bprop_ge(x, out, dout):
@ -372,12 +372,12 @@ def get_bprop_avg_pool_grad(self):
else:
avgpool_grad_vm = G.AvgPoolGradVm(
ksize=self.ksize,
kernel_size=self.kernel_size,
strides=self.strides,
padding=self.padding)
k_size_nchw = avgpool_grad_vm.ksize
pad_mode=self.pad_mode)
k_size_nchw = avgpool_grad_vm.kernel_size
stride_nchw = avgpool_grad_vm.strides
padding = self.padding
pad_mode = self.pad_mode
def bprop_vm(x, out, dout):
x_shape_nchw = F.shape(x)
@ -385,8 +385,8 @@ def get_bprop_avg_pool_grad(self):
kernel_matrix_shape = (1, x_shape_nchw[1],
k_size_nchw[2],
k_size_nchw[3])
mean_matrix = _get_mean_matrix(x_shape_nchw, k_size_nchw, stride_nchw, padding, x_dtype)
kernel_matrix = _get_kernel_matrix(x_shape_nchw, kernel_matrix_shape, padding, x_dtype)
mean_matrix = _get_mean_matrix(x_shape_nchw, k_size_nchw, stride_nchw, pad_mode, x_dtype)
kernel_matrix = _get_kernel_matrix(x_shape_nchw, kernel_matrix_shape, pad_mode, x_dtype)
dx = avgpool_grad_vm(x_shape_nchw, dout, mean_matrix, kernel_matrix)
return (dx,)

@ -23,9 +23,9 @@ avg_pool_op_info = TBERegOp("AvgPool") \
.compute_cost(10) \
.kernel_name("avg_pool") \
.partial_flag(True) \
.attr("ksize", "required", "listInt", "all") \
.attr("kernel_size", "required", "listInt", "all") \
.attr("strides", "required", "listInt", "all") \
.attr("padding", "required", "str", "all") \
.attr("pad_mode", "required", "str", "all") \
.attr("data_format", "optional", "str", "all") \
.input(0, "x", False, "required", "all") \
.input(1, "filter", False, "optional", "all") \

@ -24,9 +24,9 @@ avg_pool_grad_op_info = TBERegOp("AvgPoolGrad") \
.kernel_name("avg_pool_grad_d") \
.partial_flag(True) \
.attr("x_origin", "required", "listInt", "all") \
.attr("ksize", "required", "listInt", "all") \
.attr("kernel_size", "required", "listInt", "all") \
.attr("strides", "required", "listInt", "all") \
.attr("padding", "required", "str", "all") \
.attr("pad_mode", "required", "str", "all") \
.attr("data_format", "optional", "str", "all") \
.input(0, "input_grad", False, "required", "all") \
.input(1, "mean_matrix", False, "optional", "all") \

@ -24,9 +24,9 @@ avg_pool_grad_vm_op_info = TBERegOp("AvgPoolGradVm") \
.kernel_name("avg_pool_grad_d") \
.partial_flag(True) \
.attr("x_origin", "required", "listInt", "all") \
.attr("ksize", "required", "listInt", "all") \
.attr("kernel_size", "required", "listInt", "all") \
.attr("strides", "required", "listInt", "all") \
.attr("padding", "required", "str", "all") \
.attr("pad_mode", "required", "str", "all") \
.attr("data_format", "optional", "str", "all") \
.input(0, "input_grad", False, "required", "all") \
.input(1, "mean_matrix", False, "optional", "all") \

@ -23,9 +23,9 @@ max_pool_op_info = TBERegOp("MaxPool") \
.compute_cost(10) \
.kernel_name("max_pool") \
.partial_flag(True) \
.attr("ksize", "required", "listInt", "all") \
.attr("kernel_size", "required", "listInt", "all") \
.attr("strides", "required", "listInt", "all") \
.attr("padding", "required", "str", "all") \
.attr("pad_mode", "required", "str", "all") \
.attr("data_format", "required", "str", "all") \
.input(0, "input_data", False, "required", "all") \
.output(0, "output_data", False, "required", "all") \

@ -23,9 +23,9 @@ max_pool_grad_op_info = TBERegOp("MaxPoolGrad") \
.compute_cost(10) \
.kernel_name("max_pool_grad") \
.partial_flag(True) \
.attr("ksize", "required", "listInt", "all") \
.attr("kernel_size", "required", "listInt", "all") \
.attr("strides", "required", "listInt", "all") \
.attr("padding", "required", "str", "all") \
.attr("pad_mode", "required", "str", "all") \
.input(0, "x1", False, "required", "all") \
.input(1, "x2", False, "required", "all") \
.input(2, "grad", False, "required", "all") \

@ -23,9 +23,9 @@ max_pool_grad_grad_op_info = TBERegOp("MaxPoolGradGrad") \
.compute_cost(10) \
.kernel_name("max_pool_grad_grad") \
.partial_flag(True) \
.attr("ksize", "required", "listInt", "all") \
.attr("kernel_size", "required", "listInt", "all") \
.attr("strides", "required", "listInt", "all") \
.attr("padding", "required", "str", "all") \
.attr("pad_mode", "required", "str", "all") \
.attr("data_format", "optional", "str", "all") \
.input(0, "x1", False, "required", "all") \
.input(1, "x2", False, "required", "all") \

@ -23,9 +23,9 @@ max_pool_grad_grad_with_argmax_op_info = TBERegOp("MaxPoolGradGradWithArgmax") \
.compute_cost(10) \
.kernel_name("max_pool_grad_grad_with_argmax") \
.partial_flag(True) \
.attr("ksize", "required", "listInt", "all") \
.attr("kernel_size", "required", "listInt", "all") \
.attr("strides", "required", "listInt", "all") \
.attr("padding", "required", "str", "all") \
.attr("pad_mode", "required", "str", "all") \
.input(0, "x", False, "required", "all") \
.input(1, "grad", False, "required", "all") \
.input(2, "argmax", False, "optional", "all") \

@ -23,9 +23,9 @@ max_pool_grad_with_argmax_op_info = TBERegOp("MaxPoolGradWithArgmax") \
.compute_cost(10) \
.kernel_name("max_pool_grad_with_argmax") \
.partial_flag(True) \
.attr("ksize", "required", "listInt", "all") \
.attr("kernel_size", "required", "listInt", "all") \
.attr("strides", "required", "listInt", "all") \
.attr("padding", "required", "str", "all") \
.attr("pad_mode", "required", "str", "all") \
.input(0, "x", False, "required", "all") \
.input(1, "grad", False, "required", "all") \
.input(2, "argmax", False, "optional", "all") \

@ -23,9 +23,9 @@ max_pool_with_argmax_op_info = TBERegOp("MaxPoolWithArgmax") \
.compute_cost(10) \
.kernel_name("max_pool_with_argmax") \
.partial_flag(True) \
.attr("ksize", "required", "listInt", "all") \
.attr("kernel_size", "required", "listInt", "all") \
.attr("strides", "required", "listInt", "all") \
.attr("padding", "required", "str", "all") \
.attr("pad_mode", "required", "str", "all") \
.input(0, "x", False, "required", "all") \
.output(0, "y", False, "required", "all") \
.output(1, "argmax", False, "required", "all") \

@ -810,13 +810,13 @@ class _PoolGrad(PrimitiveWithInfer):
"""Gradients of the max/avg pool operation."""
@prim_attr_register
def __init__(self, ksize, strides, padding="VALID", data_format="NCHW"):
def __init__(self, kernel_size, strides, pad_mode="VALID", data_format="NCHW"):
self.init_prim_io_names(inputs=['x_origin', 'out_origin', 'grad'], outputs=['output'])
validator.check_value_type('ksize', ksize, [int, tuple], self.name)
validator.check_value_type('kernel_size', kernel_size, [int, tuple], self.name)
validator.check_value_type('strides', strides, [int, tuple], self.name)
self.padding = validator.check_string(padding.upper(), ['VALID', 'SAME'], 'padding', self.name)
self.add_prim_attr("padding", self.padding)
self.pad_mode = validator.check_string(pad_mode.upper(), ['VALID', 'SAME'], 'pad_mode', self.name)
self.add_prim_attr("pad_mode", self.pad_mode)
self.format = validator.check_string(data_format, ['NCHW', 'NHWC'], 'format', self.name)
if context.get_context("device_target") != "GPU" and self.format == "NHWC":
raise ValueError("NHWC format only support in GPU target.")
@ -842,9 +842,10 @@ class _PoolGrad(PrimitiveWithInfer):
raise error_msg
return ret
ksize = _grad_check_int_or_tuple("ksize", ksize, self.is_maxpoolgradwithargmax)
self.ksize = ksize if self.format == "NCHW" else [ksize[0], ksize[2], ksize[3], ksize[1]]
self.add_prim_attr("ksize", self.ksize)
kernel_size = _grad_check_int_or_tuple("kernel_size", kernel_size, self.is_maxpoolgradwithargmax)
self.kernel_size = kernel_size if self.format == "NCHW" else [kernel_size[0], kernel_size[2],
kernel_size[3], kernel_size[1]]
self.add_prim_attr("kernel_size", self.kernel_size)
strides = _grad_check_int_or_tuple("strides", strides, self.is_maxpoolgradwithargmax)
self.strides = strides if self.format == "NCHW" else [strides[0], strides[2], strides[3], strides[1]]
@ -855,8 +856,8 @@ class AvgPoolGrad(_PoolGrad):
"""Gradients of the avg pool operation for ge."""
@prim_attr_register
def __init__(self, ksize=1, strides=1, padding="VALID"):
super(AvgPoolGrad, self).__init__(ksize, strides, padding)
def __init__(self, kernel_size=1, strides=1, pad_mode="VALID"):
super(AvgPoolGrad, self).__init__(kernel_size, strides, pad_mode)
def __infer__(self, origin_input, dout):
out = {
@ -872,8 +873,8 @@ class AvgPoolGradVm(_PoolGrad):
"""Gradients of the avg pool operation for vm."""
@prim_attr_register
def __init__(self, ksize=1, strides=1, padding="VALID"):
super(AvgPoolGradVm, self).__init__(ksize, strides, padding)
def __init__(self, kernel_size=1, strides=1, pad_mode="VALID"):
super(AvgPoolGradVm, self).__init__(kernel_size, strides, pad_mode)
self.init_prim_io_names(inputs=['x_origin', 'grad', 'mean_matrix', 'kernel_matrix'], outputs=['output'])
def __infer__(self, origin_input, dout, mean_matrix, kernel_matrix):
@ -890,8 +891,8 @@ class AvgPoolGradGpu(_PoolGrad):
"""Gradients of the avg pool operation for gpu."""
@prim_attr_register
def __init__(self, ksize=1, strides=1, padding="VALID", data_format="NCHW"):
super(AvgPoolGradGpu, self).__init__(ksize, strides, padding, data_format)
def __init__(self, kernel_size=1, strides=1, pad_mode="VALID", data_format="NCHW"):
super(AvgPoolGradGpu, self).__init__(kernel_size, strides, pad_mode, data_format)
def infer_shape(self, x1_shape, x2_shape, grad_shape):
return x1_shape
@ -904,8 +905,8 @@ class AvgPoolGradCpu(_PoolGrad):
"""Gradients of the avg pool operation for cpu."""
@prim_attr_register
def __init__(self, ksize=1, strides=1, padding="VALID", data_format="NCHW"):
super(AvgPoolGradCpu, self).__init__(ksize, strides, padding, data_format)
def __init__(self, kernel_size=1, strides=1, pad_mode="VALID", data_format="NCHW"):
super(AvgPoolGradCpu, self).__init__(kernel_size, strides, pad_mode, data_format)
def infer_shape(self, x1_shape, x2_shape, grad_shape):
return x1_shape
@ -918,8 +919,8 @@ class MaxPoolGrad(_PoolGrad):
"""Performs gradients of the max pool operation."""
@prim_attr_register
def __init__(self, ksize=1, strides=1, padding="VALID", data_format="NCHW"):
super(MaxPoolGrad, self).__init__(ksize, strides, padding, data_format)
def __init__(self, kernel_size=1, strides=1, pad_mode="VALID", data_format="NCHW"):
super(MaxPoolGrad, self).__init__(kernel_size, strides, pad_mode, data_format)
def infer_shape(self, x1_shape, x2_shape, grad_shape):
return x1_shape
@ -933,13 +934,13 @@ class MaxPoolGradGrad(_PoolGrad):
Performs gradients of the MaxPoolGrad operation.
Args:
ksize (Union[int, tuple[int]]): The size of kernel used to take the maximum value,
is an int number that represents height and width are both ksize, or a tuple
kernel_size (Union[int, tuple[int]]): The size of kernel used to take the maximum value,
is an int number that represents height and width are both kernel_size, or a tuple
of two int numbers that represent height and width respectively. Default: 1.
strides (Union[int, tuple[int]]): The distance of kernel moving, an int number that represents
the height and width of movement are both strides, or a tuple of two int numbers that
represent height and width of movement respectively. Default: 1.
padding (str): The optional value for pad mode, is "same" or "valid", not case sensitive.
pad_mode (str): The optional value for pad mode, is "same" or "valid", not case sensitive.
Default: "valid".
- same: Adopts the way of completion. The height and width of the output will be the same as
@ -961,8 +962,8 @@ class MaxPoolGradGrad(_PoolGrad):
"""
@prim_attr_register
def __init__(self, ksize=1, strides=1, padding="VALID"):
super(MaxPoolGradGrad, self).__init__(ksize, strides, padding)
def __init__(self, kernel_size=1, strides=1, pad_mode="VALID"):
super(MaxPoolGradGrad, self).__init__(kernel_size, strides, pad_mode)
def infer_shape(self, x1_shape, x2_shape, grad_shape):
return x1_shape
@ -985,9 +986,9 @@ class MaxPoolGradWithArgmax(_PoolGrad):
"""Computes the gradients of MaxPoolWithArgmax."""
@prim_attr_register
def __init__(self, ksize=1, strides=1, padding="VALID"):
def __init__(self, kernel_size=1, strides=1, pad_mode="VALID"):
self.init_prim_io_names(inputs=['x', 'grad', 'argmax'], outputs=['output'])
super(MaxPoolGradWithArgmax, self).__init__(ksize, strides, padding)
super(MaxPoolGradWithArgmax, self).__init__(kernel_size, strides, pad_mode)
def infer_shape(self, x_shape, grad_shape, argmax_shape):
if not grad_shape:
@ -1003,13 +1004,13 @@ class MaxPoolGradGradWithArgmax(_PoolGrad):
Computes the gradients of MaxPoolGradWithArgmax.
Args:
ksize (Union[int, tuple[int]]): The size of kernel used to take the maximum value,
is an int number that represents height and width are both ksize, or a tuple
kernel_size (Union[int, tuple[int]]): The size of kernel used to take the maximum value,
is an int number that represents height and width are both kernel_size, or a tuple
of two int numbers that represent height and width respectively. Default: 1.
strides (Union[int, tuple[int]]): The distance of kernel moving, an int number that represents
the height and width of movement are both strides, or a tuple of two int numbers that
represent height and width of movement respectively. Default: 1.
padding (str): The optional value for pad mode, is "same" or "valid", not case sensitive.
pad_mode (str): The optional value for pad mode, is "same" or "valid", not case sensitive.
Default: "valid".
- same: Adopts the way of completion. The height and width of the output will be the same as
@ -1031,9 +1032,9 @@ class MaxPoolGradGradWithArgmax(_PoolGrad):
"""
@prim_attr_register
def __init__(self, ksize=1, strides=1, padding="VALID"):
def __init__(self, kernel_size=1, strides=1, pad_mode="VALID"):
self.init_prim_io_names(inputs=['x', 'grad', 'argmax'], outputs=['output'])
super(MaxPoolGradGradWithArgmax, self).__init__(ksize, strides, padding)
super(MaxPoolGradGradWithArgmax, self).__init__(kernel_size, strides, pad_mode)
def infer_shape(self, x_shape, grad_shape, argmax_shape):
if not grad_shape:

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save