Develop op MaxPoolWithArgMax

pull/915/head
buxue 5 years ago
parent 22cc03a54a
commit 1d3bb0b731

@ -148,8 +148,6 @@ void TbeAdapter::InputOrderPass(const std::string &op_name, std::vector<std::vec
}
std::map<std::string, FAttrsPass> TbeAdapter::build_json_attr_pass_map_ = {
{"MaxPoolWithArgmax", TbeAdapter::MaxPoolWithArgmaxAttrJsonPass},
{"MaxPoolGradWithArgmax", TbeAdapter::MaxPoolGradWithArgmaxAttrJsonPass},
{"Conv2D", TbeAdapter::Conv2DAttrJsonPass},
{"Conv2DBackpropFilter", TbeAdapter::Conv2DBackpropFilterAttrJsonPass},
{"Conv2DBackpropInput", TbeAdapter::Conv2DBackpropInputAttrJsonPass},
@ -170,48 +168,6 @@ bool TbeAdapter::RunAttrPass(const mindspore::AnfNodePtr &anf_node,
return false;
}
void TbeAdapter::MaxPoolWithArgmaxAttrJsonPass(
const mindspore::AnfNodePtr &anf_node, const std::vector<std::shared_ptr<mindspore::kernel::OpAttr>> &op_info_attrs,
nlohmann::json *attrs_json) {
MS_EXCEPTION_IF_NULL(anf_node);
MS_EXCEPTION_IF_NULL(attrs_json);
auto attr_num = op_info_attrs.size();
auto primitive = AnfAlgo::GetCNodePrimitive(anf_node);
MS_EXCEPTION_IF_NULL(primitive);
for (size_t i = 0; i < attr_num; i++) {
nlohmann::json attr_obj;
MS_EXCEPTION_IF_NULL(op_info_attrs[i]);
std::string attr_name = op_info_attrs[i]->name();
if (primitive->GetAttr(attr_name) != nullptr) {
auto value = primitive->GetAttr(attr_name);
if (attr_name == "pad_mode") {
std::string attr_value = GetValue<std::string>(value);
(void)transform(attr_value.begin(), attr_value.end(), attr_value.begin(), ::toupper);
attr_obj["value"] = attr_value;
} else {
std::vector<int> attr_value;
int data = GetValue<int>(value);
attr_value.push_back(1);
attr_value.push_back(data);
attr_value.push_back(data);
attr_value.push_back(1);
attr_obj["value"] = attr_value;
}
attr_obj["valid"] = true;
} else {
attr_obj["valid"] = false;
}
attr_obj["name"] = attr_name;
attrs_json->push_back(attr_obj);
}
}
void TbeAdapter::MaxPoolGradWithArgmaxAttrJsonPass(
const mindspore::AnfNodePtr &anf_node, const std::vector<std::shared_ptr<mindspore::kernel::OpAttr>> &op_info_attrs,
nlohmann::json *attrs_json) {
MaxPoolWithArgmaxAttrJsonPass(anf_node, op_info_attrs, attrs_json);
}
void TbeAdapter::Conv2DAttrJsonPass(const mindspore::AnfNodePtr &anf_node,
const std::vector<std::shared_ptr<mindspore::kernel::OpAttr>> &op_info_attrs,
nlohmann::json *attrs_json) {

@ -161,6 +161,7 @@ const char kNameTopK[] = "TopK";
const char kNameSoftmaxGrad[] = "SoftmaxGrad";
const char kNameMaxPool[] = "MaxPool";
const char kNameAvgPool[] = "AvgPool";
const char kNameMaxPoolWithArgmax[] = "MaxPoolWithArgmax";
const char kNameBatchNorm[] = "BatchNorm";
const char kNameBatchNormGrad[] = "BatchNormGrad";
const char kNameROIAlign[] = "ROIAlign";
@ -199,6 +200,7 @@ std::unordered_map<std::string, OpAdapterDescPtr> &DfGraphConvertor::get_adpt_ma
{string(kNameApplyMomentum), ADPT_DESC(ApplyMomentum)},
{string(kNameMaxPool), ADPT_DESC(MaxPool)},
{string(kNameAvgPool), ADPT_DESC(AvgPool)},
{string(kNameMaxPoolWithArgmax), ADPT_DESC(MaxPoolWithArgmax)},
{string(kNameTopK), ADPT_DESC(TopK)},
{string(kNamePack), ADPT_DESC(Pack)},
{string(kNameSplitD), ADPT_DESC(SplitD)},

@ -192,8 +192,7 @@ ATTR_MAP(PRelu) = EMPTY_ATTR_MAP;
OUTPUT_MAP(PRelu) = {{0, OUTPUT_DESC(y)}};
// PReluGrad
INPUT_MAP(PReluGrad) = {
{1, INPUT_DESC(grads)}, {2, INPUT_DESC(features)}, {3, INPUT_DESC(weights)}};
INPUT_MAP(PReluGrad) = {{1, INPUT_DESC(grads)}, {2, INPUT_DESC(features)}, {3, INPUT_DESC(weights)}};
ATTR_MAP(PReluGrad) = EMPTY_ATTR_MAP;
OUTPUT_MAP(PReluGrad) = {{0, OUTPUT_DESC(dx)}, {1, OUTPUT_DESC(da)}};
@ -702,24 +701,30 @@ ATTR_MAP(AvgPoolGrad) = {{"ksize", ATTR_DESC(ksize, AnyTraits<int>(), AnyTraits<
OUTPUT_MAP(AvgPoolGrad) = {{0, OUTPUT_DESC(out_grad)}};
// MaxPoolWithArgmax
INPUT_MAP(MaxPoolWithArgmax) = {{1, INPUT_DESC(x)}};
ATTR_MAP(MaxPoolWithArgmax) = {{"ksize", ATTR_DESC(ksize, AnyTraits<int>(), AnyTraits<std::vector<int64_t>>())},
{"strides", ATTR_DESC(strides, AnyTraits<int>(), AnyTraits<std::vector<int64_t>>())},
{"padding", ATTR_DESC(padding, AnyTraits<std::string>())}};
OUTPUT_MAP(MaxPoolWithArgmax) = {{0, OUTPUT_DESC(y)}, {1, OUTPUT_DESC(argmax)}};
// MaxPoolGradWithArgmax
INPUT_MAP(MaxPoolGradWithArgmax) = {
{1, INPUT_DESC(x)},
{2, INPUT_DESC(argmax)},
{3, INPUT_DESC(grad)},
{2, INPUT_DESC(grad)},
{3, INPUT_DESC(argmax)},
};
ATTR_MAP(MaxPoolGradWithArgmax) = {{"pad_mode", ATTR_DESC(padding, AnyTraits<std::string>())},
{"window", ATTR_DESC(ksize, "window", AnyTraits<std::vector<int64_t>>())},
{"stride", ATTR_DESC(strides, "stride", AnyTraits<std::vector<int64_t>>())}};
ATTR_MAP(MaxPoolGradWithArgmax) = {{"ksize", ATTR_DESC(ksize, AnyTraits<int>(), AnyTraits<std::vector<int64_t>>())},
{"strides", ATTR_DESC(strides, AnyTraits<int>(), AnyTraits<std::vector<int64_t>>())},
{"padding", ATTR_DESC(padding, AnyTraits<std::string>())}};
OUTPUT_MAP(MaxPoolGradWithArgmax) = {{0, OUTPUT_DESC(y)}};
// Conv2D
INPUT_MAP(Conv2D) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(filter)}};
ATTR_MAP(Conv2D) = {
{"stride", ATTR_DESC(strides, "pad", AnyTraits<std::vector<int64_t>>())},
ATTR_MAP(Conv2D) = {{"stride", ATTR_DESC(strides, "pad", AnyTraits<std::vector<int64_t>>())},
{"pad_list", ATTR_DESC(pads, AnyTraits<std::vector<int64_t>>(), AnyTraits<std::vector<int64_t>>())},
{"dilation", ATTR_DESC(dilations, "pad", AnyTraits<std::vector<int64_t>>())},
{"data_format", ATTR_DESC(data_format, AnyTraits<std::string>())},
{"group", ATTR_DESC(groups, AnyTraits<int>())}
};
{"group", ATTR_DESC(groups, AnyTraits<int>())}};
OUTPUT_MAP(Conv2D) = {{0, OUTPUT_DESC(y)}};
// Conv2DBackpropInputD
@ -731,8 +736,7 @@ ATTR_MAP(Conv2DBackpropInputD) = {
{"stride", ATTR_DESC(strides, "pad", AnyTraits<std::vector<int64_t>>())},
{"dilation", ATTR_DESC(dilations, "pad", AnyTraits<std::vector<int64_t>>())},
{"data_format", ATTR_DESC(data_format, AnyTraits<std::string>())},
{"group", ATTR_DESC(groups, AnyTraits<int>())}
};
{"group", ATTR_DESC(groups, AnyTraits<int>())}};
OUTPUT_MAP(Conv2DBackpropInputD) = {{0, OUTPUT_DESC(y)}};
// Conv2DBackpropFilterD
@ -744,8 +748,7 @@ ATTR_MAP(Conv2DBackpropFilterD) = {
{"stride", ATTR_DESC(strides, "pad", AnyTraits<std::vector<int64_t>>())},
{"dilation", ATTR_DESC(dilations, "pad", AnyTraits<std::vector<int64_t>>())},
{"data_format", ATTR_DESC(data_format, AnyTraits<std::string>())},
{"group", ATTR_DESC(groups, AnyTraits<int>())}
};
{"group", ATTR_DESC(groups, AnyTraits<int>())}};
OUTPUT_MAP(Conv2DBackpropFilterD) = {{0, OUTPUT_DESC(y)}};
// DepthwiseConv2D

@ -88,8 +88,10 @@ DECLARE_OP_ADAPTER(FusedBatchNormGrad)
DECLARE_OP_USE_OUTPUT(FusedBatchNormGrad)
DECLARE_OP_ADAPTER(BiasAddGrad)
DECLARE_OP_USE_OUTPUT(BiasAddGrad)
DECLARE_OP_ADAPTER(MaxPoolWithArgmax)
DECLARE_OP_USE_OUTPUT(MaxPoolWithArgmax)
DECLARE_OP_ADAPTER(MaxPoolGradWithArgmax)
DECLARE_OP_USE_ENUM(MaxPoolGradWithArgmax)
DECLARE_OP_USE_OUTPUT(MaxPoolGradWithArgmax)
DECLARE_OP_ADAPTER(Conv2D)
DECLARE_OP_USE_ENUM(Conv2D)
DECLARE_OP_USE_OUTPUT(Conv2D)

@ -168,7 +168,7 @@ class ResNet(nn.Cell):
self.conv1 = _conv7x7(3, 64, stride=2)
self.bn1 = _bn(64)
self.relu = P.ReLU()
self.maxpool = P.MaxPoolWithArgmax(pad_mode='same', window=3, stride=2)
self.maxpool = P.MaxPoolWithArgmax(padding="same", ksize=3, strides=2)
self.layer1 = self._make_layer(block,
layer_nums[0],

@ -13,33 +13,49 @@
# limitations under the License.
# ============================================================================
"""pooling"""
from mindspore.ops import operations as P
from mindspore._checkparam import ParamValidator as validator
from mindspore._checkparam import Rel
from ... import context
from ..cell import Cell
class _PoolNd(Cell):
"""N-D AvgPool"""
def __init__(self,
kernel_size,
stride,
pad_mode,
padding=0,
pool=None):
def __init__(self, kernel_size, stride, pad_mode):
name = self.__class__.__name__
super(_PoolNd, self).__init__()
validator.check_type('kernel_size', kernel_size, [int, tuple])
validator.check_type('stride', stride, [int, tuple])
self.pad_mode = validator.check_string('pad_mode', pad_mode.upper(), ['VALID', 'SAME'])
if isinstance(kernel_size, int):
validator.check_integer("kernel_size", kernel_size, 1, Rel.GE)
else:
if (len(kernel_size) != 2 or
(not isinstance(kernel_size[0], int)) or
(not isinstance(kernel_size[1], int)) or
kernel_size[0] <= 0 or
kernel_size[1] <= 0):
raise ValueError(f'The kernel_size passed to cell {name} should be an positive int number or'
f'a tuple of two positive int numbers, but got {kernel_size}')
self.kernel_size = kernel_size
if isinstance(stride, int):
validator.check_integer("stride", stride, 1, Rel.GE)
else:
if (len(stride) != 2 or
(not isinstance(stride[0], int)) or
(not isinstance(stride[1], int)) or
stride[0] <= 0 or
stride[1] <= 0):
raise ValueError(f'The stride passed to cell {name} should be an positive int number or'
f'a tuple of two positive int numbers, but got {stride}')
self.stride = stride
self.pad_mode = pad_mode
self.padding = validator.check_integer('padding', padding, 0, Rel.GE)
self.pool = pool
if self.pool is None:
raise NotImplementedError
def construct(self, x):
return self.pool(x)
def construct(self, *inputs):
pass
def extend_repr(self):
return 'kernel_size={kernel_size}, stride={stride}, pad_mode={pad_mode}'.format(**self.__dict__)
@ -63,19 +79,23 @@ class MaxPool2d(_PoolNd):
pad_mode for training only supports "same" and "valid".
Args:
kernel_size (int): Size of the window to take a max over. Default 1.
stride (int): Stride size of the window. Default: 1.
pad_mode (str): Select the mode of the pad. The optional values are
"same" and "valid". Default: "valid".
kernel_size (Union[int, tuple[int]]): The size of kernel used to take the max value,
is an int number that represents height and width are both kernel_size,
or a tuple of two int numbers that represent height and width respectively.
Default: 1.
stride (Union[int, tuple[int]]): The distance of kernel moving, an int number that represents
the height and width of movement are both strides, or a tuple of two int numbers that
represent height and width of movement respectively. Default: 1.
pad_mode (str): The optional values for pad mode, is "same" or "valid", not case sensitive.
Default: "valid".
- same: Adopts the way of completion. Output height and width will be the same as
the input. Total number of padding will be calculated for horizontal and vertical
direction and evenly distributed to top and bottom, left and right if possible. Otherwise, the
last extra padding will be done from the bottom and the right side.
direction and evenly distributed to top and bottom, left and right if possible.
Otherwise, the last extra padding will be done from the bottom and the right side.
- valid: Adopts the way of discarding. The possibly largest height and width of output will be return
without padding. Extra pixels will be discarded.
padding (int): Implicit zero padding to be added on both sides. Default: 0.
- valid: Adopts the way of discarding. The possibly largest height and width of output
will be return without padding. Extra pixels will be discarded.
Inputs:
- **input** (Tensor) - Tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})`.
@ -103,31 +123,22 @@ class MaxPool2d(_PoolNd):
[[7. 8.]
[8. 8.]]]]
"""
def __init__(self,
kernel_size=1,
stride=1,
pad_mode="VALID",
padding=0):
max_pool = P.MaxPool(ksize=kernel_size,
strides=stride,
padding=pad_mode)
self.is_autodiff_backend = False
if self.is_autodiff_backend:
# At present, pad mode of max pool is not unified, so it is a temporarily avoided
pad_mode = validator.check_string('pad_mode', pad_mode.lower(), ['valid', 'same'])
max_pool = P.MaxPoolWithArgmax(window=kernel_size,
stride=stride,
pad_mode=pad_mode,
pad=padding)
super(MaxPool2d, self).__init__(kernel_size, stride, pad_mode, padding, max_pool)
def __init__(self, kernel_size=1, stride=1, pad_mode="valid"):
super(MaxPool2d, self).__init__(kernel_size, stride, pad_mode)
self.max_pool = P.MaxPool(ksize=self.kernel_size,
strides=self.stride,
padding=self.pad_mode)
self.max_pool_with_arg_max = P.MaxPoolWithArgmax(ksize=self.kernel_size,
strides=self.stride,
padding=self.pad_mode)
self.is_tbe = context.get_context("device_target") == "Ascend"
def construct(self, x):
if self.is_autodiff_backend:
out = self.pool(x)[0]
if self.is_tbe and self.training:
out = self.max_pool_with_arg_max(x)[0]
else:
out = self.pool(x)
out = self.max_pool(x)
return out
@ -149,19 +160,24 @@ class AvgPool2d(_PoolNd):
pad_mode for training only supports "same" and "valid".
Args:
kernel_size (int): Size of the window to take a max over. Default: 1.
stride (int): Stride size of the window. Default: 1.
pad_mode (str): Select the mode of the pad. The optional values are
"same", "valid". Default: "valid".
kernel_size (Union[int, tuple[int]]): The size of kernel used to take the average value,
is an int number that represents height and width are both kernel_size,
or a tuple of two int numbers that represent height and width respectively.
Default: 1.
stride (Union[int, tuple[int]]): The distance of kernel moving, an int number that represents
the height and width of movement are both strides, or a tuple of two int numbers that
represent height and width of movement respectively. Default: 1.
pad_mode (str): The optional values for pad mode, is "same" or "valid", not case sensitive.
Default: "valid".
- same: Adopts the way of completion. Output height and width will be the same as
the input. Total number of padding will be calculated for horizontal and vertical
direction and evenly distributed to top and bottom, left and right if possible. Otherwise, the
last extra padding will be done from the bottom and the right side.
direction and evenly distributed to top and bottom, left and right if possible.
Otherwise, the last extra padding will be done from the bottom and the right side.
- valid: Adopts the way of discarding. The possibly largest height and width of output
will be return without padding. Extra pixels will be discarded.
- valid: Adopts the way of discarding. The possibly largest height and width of output will be return
without padding. Extra pixels will be discarded.
padding (int): Implicit zero padding to be added on both sides. Default: 0.
Inputs:
- **input** (Tensor) - Tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})`.
@ -170,7 +186,7 @@ class AvgPool2d(_PoolNd):
Tensor of shape :math:`(N, C_{out}, H_{out}, W_{out})`.
Examples:
>>> pool = AvgPool2d(kernel_size=3, stride=1)
>>> pool = AvgPool2d(kernel_size=3, strides=1)
>>> x = Tensor(np.random.randint(0, 10, [1, 2, 4, 4]), mindspore.float32)
[[[[5. 5. 9. 9.]
[8. 4. 3. 0.]
@ -189,12 +205,15 @@ class AvgPool2d(_PoolNd):
[[4.2222223 4.5555553]
[3.2222223 4.5555553]]]]
"""
def __init__(self,
kernel_size=1,
stride=1,
pad_mode="VALID",
padding=0):
avg_pool = P.AvgPool(ksize=kernel_size,
strides=stride,
padding=pad_mode)
super(AvgPool2d, self).__init__(kernel_size, stride, pad_mode, padding, avg_pool)
pad_mode="valid"):
super(AvgPool2d, self).__init__(kernel_size, stride, pad_mode)
self.avg_pool = P.AvgPool(ksize=self.kernel_size,
strides=self.stride,
padding=self.pad_mode)
def construct(self, x):
return self.avg_pool(x)

@ -76,14 +76,9 @@ def get_bprop_depthwise_conv2d_native(self):
def get_bprop_max_pool_with_argmax(self):
"""Grad definition for `MaxPoolWithArgmax` operation."""
maxpool_grad = G.MaxPoolGradWithArgmax(
pad_mode=self.pad_mode,
window=self.window,
pad=self.pad,
stride=self.stride,
data_mode=self.data_mode,
ceil_mode=self.ceil_mode,
alpha=self.alpha,
beta=self.beta)
ksize=self.ksize,
strides=self.strides,
padding=self.padding,)
def bprop(x, out, dout):
dx = maxpool_grad(x, dout[0], out[1])

@ -28,19 +28,19 @@ from mindspore.ops.op_info_register import op_info_register
"partial_flag": true,
"attr": [
{
"name": "window",
"name": "ksize",
"param_type": "required",
"type": "listInt",
"value": "all"
},
{
"name": "stride",
"name": "strides",
"param_type": "required",
"type": "listInt",
"value": "all"
},
{
"name": "pad_mode",
"name": "padding",
"param_type": "required",
"type": "str",
"value": "all"

@ -28,19 +28,19 @@ from mindspore.ops.op_info_register import op_info_register
"partial_flag": true,
"attr": [
{
"name": "window",
"name": "ksize",
"param_type": "required",
"type": "listInt",
"value": "all"
},
{
"name": "stride",
"name": "strides",
"param_type": "required",
"type": "listInt",
"value": "all"
},
{
"name": "pad_mode",
"name": "padding",
"param_type": "required",
"type": "str",
"value": "all"

@ -15,7 +15,6 @@
"""Operators for gradients."""
import math
from ..._c_expression import signature_rw as sig_rw
from ..._c_expression import signature_kind as sig_kind
from ..primitive import Primitive, PrimitiveWithInfer, prim_attr_register
@ -340,59 +339,60 @@ class _PoolGrad(PrimitiveWithInfer):
"""Gradients of the max/avg pool operation."""
@prim_attr_register
def __init__(self, ksize=1, strides=1, padding="VALID"):
def __init__(self, ksize, strides, padding="VALID"):
self.init_prim_io_names(inputs=['x_origin', 'out_origin', 'grad'], outputs=['output'])
self.ksize = ksize
self.strides = strides
self.padding = padding
self.ksize = validator.check_type('ksize', self.ksize, [int, tuple])
self.strides = validator.check_type('strides', self.strides, [int, tuple])
validator.check_type('padding', self.padding, [str])
self.padding = validator.check_string('padding', self.padding, ['VALID', 'SAME'])
validator.check_type('ksize', ksize, [int, tuple])
validator.check_type('strides', strides, [int, tuple])
self.padding = validator.check_string('padding', padding.upper(), ['VALID', 'SAME'])
self.add_prim_attr("padding", self.padding)
self.is_maxpoolgradwithargmax = (self.name == "MaxPoolGradWithArgmax")
if not self.is_maxpoolgradwithargmax:
self.add_prim_attr('data_format', "NCHW")
if isinstance(self.ksize, int):
self.pool_h = validator.check_integer("ksize", self.ksize, 1, Rel.GE)
self.pool_w = self.pool_h
self.add_prim_attr("ksize", (1, 1, self.ksize, self.ksize))
elif isinstance(self.ksize, tuple):
if (len(self.ksize) != 2 and len(self.ksize) != 4):
raise ValueError('Attr \'ksize\' of \'Pool\' Op passed ' +
str(self.ksize)+', should be a int or a tuple of length 2 or 4.')
for ksize_val in self.ksize:
if (not isinstance(ksize_val, int)) or (ksize_val <= 0):
raise ValueError('Each value of attr \'ksize\' of \'MaxPool\' Op passed ' +
str(self.ksize)+', should be int and greater than 0.')
self.pool_h = self.ksize[-2]
self.pool_w = self.ksize[-1]
self.add_prim_attr("ksize", (1, 1, self.ksize[-2], self.ksize[-1]))
if isinstance(self.strides, int):
self.stride_h = validator.check_integer("strides", self.strides, 1, Rel.GE)
self.stride_w = self.stride_h
self.add_prim_attr("strides", (1, 1, self.strides, self.strides))
elif isinstance(self.strides, tuple):
if (len(self.strides) != 2 and len(self.strides) != 4):
raise ValueError('Attr \'strides\' of \'MaxPool\' Op passed ' +
str(self.strides)+', should be a int or a tuple of length 2 or 4.')
for stride_val in self.strides:
if (not isinstance(stride_val, int)) or (stride_val <= 0):
raise ValueError('Each value of attr \'strides\' of \'MaxPool\' Op passed ' +
str(self.strides)+', should be int and greater than 0.')
self.stride_h = self.strides[-2]
self.stride_w = self.strides[-1]
self.add_prim_attr("strides", (1, 1, self.strides[-2], self.strides[-1]))
if self.padding == "VALID":
self.pad = 0
elif self.padding == "SAME":
self.pad = math.floor((self.pool_h - 1) / 2)
if isinstance(ksize, int):
validator.check_integer("ksize", ksize, 1, Rel.GE)
if self.is_maxpoolgradwithargmax:
self.ksize = (1, ksize, ksize, 1)
else:
self.ksize = (1, 1, ksize, ksize)
else:
ksize_error = ValueError(f"The 'ksize' passed to operator {self.name} should be an positive int number"
f"or a tuple of two or four positive int numbers, but got {ksize}")
if len(ksize) != 2 and len(ksize) != 4:
raise ksize_error
for ksize_val in ksize:
if not isinstance(ksize_val, int) or (ksize_val <= 0):
raise ksize_error
if len(ksize) == 2 and self.is_maxpoolgradwithargmax:
self.ksize = (1, ksize[0], ksize[1], 1)
elif len(ksize) == 2 and not self.is_maxpoolgradwithargmax:
self.ksize = (1, 1, ksize[0], ksize[1])
else:
self.ksize = ksize
self.add_prim_attr("ksize", self.ksize)
if isinstance(strides, int):
validator.check_integer("strides", strides, 1, Rel.GE)
if self.is_maxpoolgradwithargmax:
self.strides = (1, strides, strides, 1)
else:
self.strides = (1, 1, strides, strides)
else:
raise ValueError('The padding should be str and must be SAME or VALID,'
' but got {}.'.format(self.padding))
strides_error = ValueError(f"The 'strides' passed to operator {self.name} should be an positive int number"
f"or a tuple of two or four positive int numbers, but got {strides}")
if len(strides) != 2 and len(strides) != 4:
raise strides_error
for strides_val in strides:
if not isinstance(strides_val, int) or (strides_val <= 0):
raise strides_error
if len(strides) == 2 and self.is_maxpoolgradwithargmax:
self.strides = (1, strides[0], strides[1], 1)
elif len(strides) == 2 and not self.is_maxpoolgradwithargmax:
self.strides = (1, 1, strides[0], strides[1])
else:
self.strides = strides
self.add_prim_attr("strides", self.strides)
class AvgPoolGrad(_PoolGrad):
@ -451,28 +451,13 @@ class MaximumGrad(Primitive):
raise NotImplementedError
class MaxPoolGradWithArgmax(PrimitiveWithInfer):
class MaxPoolGradWithArgmax(_PoolGrad):
"""Computes the gradients of MaxPoolWithArgmax."""
@prim_attr_register
def __init__(self,
pad_mode="valid",
window=0,
pad=0,
stride=1,
data_mode=1,
ceil_mode=0,
alpha=1.0,
beta=0.0):
def __init__(self, ksize=1, strides=1, padding="VALID",):
self.init_prim_io_names(inputs=['x', 'grad', 'argmax'], outputs=['output'])
self.window = window
self.pool_h = self.pool_w = window
self.pad = pad
self.pad_mode = pad_mode
self.stride = stride
self.data_mode = data_mode
self.ceil_mode = ceil_mode
super(MaxPoolGradWithArgmax, self).__init__(ksize, strides, padding)
def infer_shape(self, x_shape, grad_shape, argmax_shape):
if not grad_shape:

File diff suppressed because it is too large Load Diff

@ -103,7 +103,7 @@ class ResNet50(nn.Cell):
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, pad_mode='pad')
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU()
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1, pad_mode='valid')
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, pad_mode='valid')
self.layer1 = self.MakeLayer(
block, 3, in_channels=64, out_channels=256, stride=1)

@ -21,6 +21,7 @@ import mindspore.nn as nn
from mindspore.ops import operations as P
from mindspore import Tensor
class LeNet(nn.Cell):
def __init__(self):
super(LeNet, self).__init__()
@ -50,8 +51,10 @@ class LeNet(nn.Cell):
output = self.fc3(output)
return output
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
def train(net, data, label):
learning_rate = 0.01
momentum = 0.9
@ -67,6 +70,7 @@ def train(net, data, label):
print("+++++++++++++++++++++++++++")
assert res
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard

@ -38,7 +38,7 @@ class AlexNet(nn.Cell):
self.conv4 = nn.Conv2d(384, 384, 3, stride=1, pad_mode="same")
self.conv5 = nn.Conv2d(384, 256, 3, stride=1, pad_mode="same")
self.relu = nn.ReLU()
self.max_pool2d = nn.MaxPool2d(kernel_size=3, stride=2,pad_mode="valid",padding=0)
self.max_pool2d = nn.MaxPool2d(kernel_size=3, stride=2, pad_mode="valid")
self.flatten = nn.Flatten()
self.fc1 = nn.Dense(6*6*256, 4096)
self.fc2 = nn.Dense(4096, 4096)

@ -20,24 +20,27 @@ import numpy as np
import mindspore.context as context
from mindspore.common.initializer import initializer
from mindspore.common.parameter import Parameter
context.set_context(device_target="Ascend")
class Net(nn.Cell):
def __init__(self):
super(Net, self).__init__()
self.maxpool = P.MaxPoolWithArgmax(pad_mode="same",
window=3,
stride=2)
self.maxpool = P.MaxPoolWithArgmax(padding="same",
ksize=3,
strides=2)
self.x = Parameter(initializer(
'normal', [1, 64, 112, 112]), name='w')
self.add = P.TensorAdd()
@ms_function
def construct(self):
output = self.maxpool(self.x)
return output[0]
def test_net():
x = np.random.randn(1, 64, 112, 112).astype(np.float32)
maxpool = Net()

@ -37,9 +37,9 @@ class Net(nn.Cell):
def __init__(self):
super(Net, self).__init__()
self.maxpool = P.MaxPoolWithArgmax(pad_mode="same",
window=3,
stride=2)
self.maxpool = P.MaxPoolWithArgmax(padding="same",
ksize=3,
strides=2)
@ms_function
def construct(self, x):

@ -267,7 +267,7 @@ class ResNet(nn.Cell):
self.bn1 = bn_with_initialize(64)
self.relu = P.ReLU()
self.maxpool = P.MaxPoolWithArgmax(window=3, stride=2, pad_mode="same")
self.maxpool = P.MaxPoolWithArgmax(ksize=3, strides=2, padding="SAME")
self.layer1 = MakeLayer0(block, layer_num[0], in_channels=64, out_channels=256, stride=1)
self.layer2 = MakeLayer1(block, layer_num[1], in_channels=256, out_channels=512, stride=2)

@ -21,7 +21,7 @@ addn = P.AddN()
add = P.TensorAdd()
sub = P.Sub()
mul = P.Mul()
max_pool = P.MaxPoolWithArgmax(pad_mode="same", window=3, stride=2)
max_pool = P.MaxPoolWithArgmax(padding="same", ksize=3, strides=2)
make_tuple = Primitive('make_tuple')
four2five = Primitive('Four2Five')
five2four = Primitive('Five2Four')

@ -17,7 +17,7 @@ from mindspore.ops import Primitive
tuple_getitem = Primitive('tuple_getitem')
add = P.TensorAdd()
max_pool = P.MaxPoolWithArgmax(pad_mode="same", window=3, stride=2)
max_pool = P.MaxPoolWithArgmax(padding="same", ksize=3, strides=2)
make_tuple = Primitive('make_tuple')
transdata = Primitive("TransData")

@ -21,7 +21,7 @@ addn = P.AddN()
add = P.TensorAdd()
sub = P.Sub()
mul = P.Mul()
max_pool = P.MaxPoolWithArgmax(pad_mode="same", window=3, stride=2)
max_pool = P.MaxPoolWithArgmax(padding="same", ksize=3, strides=2)
make_tuple = Primitive('make_tuple')
cast = Primitive('Cast')

@ -17,7 +17,7 @@ from mindspore.ops import Primitive
tuple_getitem = Primitive('tuple_getitem')
add = P.TensorAdd()
max_pool = P.MaxPoolWithArgmax(pad_mode="same", window=3, stride=2)
max_pool = P.MaxPoolWithArgmax(padding="same", ksize=3, strides=2)
make_tuple = Primitive('make_tuple')
four2five = Primitive('Four2Five')
five2four = Primitive('Five2Four')

@ -17,7 +17,7 @@ from mindspore.ops import Primitive
tuple_getitem = Primitive('tuple_getitem')
add = P.TensorAdd()
max_pool = P.MaxPoolWithArgmax(pad_mode="same", window=3, stride=2)
max_pool = P.MaxPoolWithArgmax(padding="same", ksize=3, strides=2)
make_tuple = Primitive('make_tuple')
transdata = Primitive("TransData")
Transpose = P.Transpose()

@ -22,7 +22,7 @@ add = P.TensorAdd()
reshape = P.Reshape()
cast = P.Cast()
tuple_getitem = Primitive('tuple_getitem')
max_pool = P.MaxPoolWithArgmax(pad_mode="same", window=3, stride=2)
max_pool = P.MaxPoolWithArgmax(padding="same", ksize=3, strides=2)
def test_addn_cast(x, y, z):
sum = addn((x, y))

@ -107,7 +107,7 @@ class ResNet18(nn.Cell):
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, pad_mode='pad')
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU()
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1, pad_mode='pad')
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, pad_mode='same')
self.layer1 = self.MakeLayer(
block, 2, in_channels=64, out_channels=256, stride=1)
@ -176,7 +176,7 @@ class ResNet9(nn.Cell):
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, pad_mode='pad')
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU()
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1, pad_mode='same')
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, pad_mode='same')
self.layer1 = self.MakeLayer(
block, 1, in_channels=64, out_channels=256, stride=1)

@ -189,7 +189,7 @@ class ResNet50(nn.Cell):
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, weight_init=weight_conv)
self.bn1 = bn_with_initialize(64)
self.relu = nn.ReLU()
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2)
self.layer1 = MakeLayer3(
block, in_channels=64, out_channels=256, stride=1)

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save