Develop op MaxPoolWithArgMax

pull/99/head
buxue 5 years ago
parent e6ea09082c
commit 7541d3b067

@ -148,8 +148,6 @@ void TbeAdapter::InputOrderPass(const std::string &op_name, std::vector<std::vec
} }
std::map<std::string, FAttrsPass> TbeAdapter::build_json_attr_pass_map_ = { std::map<std::string, FAttrsPass> TbeAdapter::build_json_attr_pass_map_ = {
{"MaxPoolWithArgmax", TbeAdapter::MaxPoolWithArgmaxAttrJsonPass},
{"MaxPoolGradWithArgmax", TbeAdapter::MaxPoolGradWithArgmaxAttrJsonPass},
{"Conv2D", TbeAdapter::Conv2DAttrJsonPass}, {"Conv2D", TbeAdapter::Conv2DAttrJsonPass},
{"Conv2DBackpropFilter", TbeAdapter::Conv2DBackpropFilterAttrJsonPass}, {"Conv2DBackpropFilter", TbeAdapter::Conv2DBackpropFilterAttrJsonPass},
{"Conv2DBackpropInput", TbeAdapter::Conv2DBackpropInputAttrJsonPass}, {"Conv2DBackpropInput", TbeAdapter::Conv2DBackpropInputAttrJsonPass},
@ -170,48 +168,6 @@ bool TbeAdapter::RunAttrPass(const mindspore::AnfNodePtr &anf_node,
return false; return false;
} }
void TbeAdapter::MaxPoolWithArgmaxAttrJsonPass(
const mindspore::AnfNodePtr &anf_node, const std::vector<std::shared_ptr<mindspore::kernel::OpAttr>> &op_info_attrs,
nlohmann::json *attrs_json) {
MS_EXCEPTION_IF_NULL(anf_node);
MS_EXCEPTION_IF_NULL(attrs_json);
auto attr_num = op_info_attrs.size();
auto primitive = AnfAlgo::GetCNodePrimitive(anf_node);
MS_EXCEPTION_IF_NULL(primitive);
for (size_t i = 0; i < attr_num; i++) {
nlohmann::json attr_obj;
MS_EXCEPTION_IF_NULL(op_info_attrs[i]);
std::string attr_name = op_info_attrs[i]->name();
if (primitive->GetAttr(attr_name) != nullptr) {
auto value = primitive->GetAttr(attr_name);
if (attr_name == "pad_mode") {
std::string attr_value = GetValue<std::string>(value);
(void)transform(attr_value.begin(), attr_value.end(), attr_value.begin(), ::toupper);
attr_obj["value"] = attr_value;
} else {
std::vector<int> attr_value;
int data = GetValue<int>(value);
attr_value.push_back(1);
attr_value.push_back(data);
attr_value.push_back(data);
attr_value.push_back(1);
attr_obj["value"] = attr_value;
}
attr_obj["valid"] = true;
} else {
attr_obj["valid"] = false;
}
attr_obj["name"] = attr_name;
attrs_json->push_back(attr_obj);
}
}
void TbeAdapter::MaxPoolGradWithArgmaxAttrJsonPass(
const mindspore::AnfNodePtr &anf_node, const std::vector<std::shared_ptr<mindspore::kernel::OpAttr>> &op_info_attrs,
nlohmann::json *attrs_json) {
MaxPoolWithArgmaxAttrJsonPass(anf_node, op_info_attrs, attrs_json);
}
void TbeAdapter::Conv2DAttrJsonPass(const mindspore::AnfNodePtr &anf_node, void TbeAdapter::Conv2DAttrJsonPass(const mindspore::AnfNodePtr &anf_node,
const std::vector<std::shared_ptr<mindspore::kernel::OpAttr>> &op_info_attrs, const std::vector<std::shared_ptr<mindspore::kernel::OpAttr>> &op_info_attrs,
nlohmann::json *attrs_json) { nlohmann::json *attrs_json) {

@ -161,6 +161,7 @@ const char kNameTopK[] = "TopK";
const char kNameSoftmaxGrad[] = "SoftmaxGrad"; const char kNameSoftmaxGrad[] = "SoftmaxGrad";
const char kNameMaxPool[] = "MaxPool"; const char kNameMaxPool[] = "MaxPool";
const char kNameAvgPool[] = "AvgPool"; const char kNameAvgPool[] = "AvgPool";
const char kNameMaxPoolWithArgmax[] = "MaxPoolWithArgmax";
const char kNameBatchNorm[] = "BatchNorm"; const char kNameBatchNorm[] = "BatchNorm";
const char kNameBatchNormGrad[] = "BatchNormGrad"; const char kNameBatchNormGrad[] = "BatchNormGrad";
const char kNameROIAlign[] = "ROIAlign"; const char kNameROIAlign[] = "ROIAlign";
@ -198,6 +199,7 @@ std::unordered_map<std::string, OpAdapterDescPtr> &DfGraphConvertor::get_adpt_ma
{string(kNameApplyMomentum), ADPT_DESC(ApplyMomentum)}, {string(kNameApplyMomentum), ADPT_DESC(ApplyMomentum)},
{string(kNameMaxPool), ADPT_DESC(MaxPool)}, {string(kNameMaxPool), ADPT_DESC(MaxPool)},
{string(kNameAvgPool), ADPT_DESC(AvgPool)}, {string(kNameAvgPool), ADPT_DESC(AvgPool)},
{string(kNameMaxPoolWithArgmax), ADPT_DESC(MaxPoolWithArgmax)},
{string(kNameTopK), ADPT_DESC(TopKV2)}, {string(kNameTopK), ADPT_DESC(TopKV2)},
{string(kNamePack), ADPT_DESC(Pack)}, {string(kNamePack), ADPT_DESC(Pack)},
{string(kNameSplitD), ADPT_DESC(SplitD)}, {string(kNameSplitD), ADPT_DESC(SplitD)},

@ -734,14 +734,22 @@ ATTR_MAP(AvgPoolGrad) = {{"ksize", ATTR_DESC(ksize, AnyTraits<int>(), AnyTraits<
OUTPUT_MAP(AvgPoolGrad) = {{0, OUTPUT_DESC(out_grad)}}; OUTPUT_MAP(AvgPoolGrad) = {{0, OUTPUT_DESC(out_grad)}};
// MaxPoolWithArgmax // MaxPoolWithArgmax
INPUT_MAP(MaxPoolWithArgmax) = {{1, INPUT_DESC(x)}};
ATTR_MAP(MaxPoolWithArgmax) = {{"ksize", ATTR_DESC(ksize, AnyTraits<int>(), AnyTraits<std::vector<int64_t>>())},
{"strides", ATTR_DESC(strides, AnyTraits<int>(), AnyTraits<std::vector<int64_t>>())},
{"padding", ATTR_DESC(padding, AnyTraits<std::string>())}};
OUTPUT_MAP(MaxPoolWithArgmax) = {{0, OUTPUT_DESC(y)}, {1, OUTPUT_DESC(argmax)}};
// MaxPoolGradWithArgmax
INPUT_MAP(MaxPoolGradWithArgmax) = { INPUT_MAP(MaxPoolGradWithArgmax) = {
{1, INPUT_DESC(x)}, {1, INPUT_DESC(x)},
{2, INPUT_DESC(argmax)}, {2, INPUT_DESC(grad)},
{3, INPUT_DESC(grad)}, {3, INPUT_DESC(argmax)},
}; };
ATTR_MAP(MaxPoolGradWithArgmax) = {{"pad_mode", ATTR_DESC(padding, AnyTraits<std::string>())}, ATTR_MAP(MaxPoolGradWithArgmax) = {{"ksize", ATTR_DESC(ksize, AnyTraits<int>(), AnyTraits<std::vector<int64_t>>())},
{"window", ATTR_DESC(ksize, "window", AnyTraits<std::vector<int64_t>>())}, {"strides", ATTR_DESC(strides, AnyTraits<int>(), AnyTraits<std::vector<int64_t>>())},
{"stride", ATTR_DESC(strides, "stride", AnyTraits<std::vector<int64_t>>())}}; {"padding", ATTR_DESC(padding, AnyTraits<std::string>())}};
OUTPUT_MAP(MaxPoolGradWithArgmax) = {{0, OUTPUT_DESC(y)}};
// Conv2D // Conv2D
INPUT_MAP(Conv2D) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(filter)}}; INPUT_MAP(Conv2D) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(filter)}};

@ -88,8 +88,10 @@ DECLARE_OP_ADAPTER(FusedBatchNormGrad)
DECLARE_OP_USE_OUTPUT(FusedBatchNormGrad) DECLARE_OP_USE_OUTPUT(FusedBatchNormGrad)
DECLARE_OP_ADAPTER(BiasAddGrad) DECLARE_OP_ADAPTER(BiasAddGrad)
DECLARE_OP_USE_OUTPUT(BiasAddGrad) DECLARE_OP_USE_OUTPUT(BiasAddGrad)
DECLARE_OP_ADAPTER(MaxPoolWithArgmax)
DECLARE_OP_USE_OUTPUT(MaxPoolWithArgmax)
DECLARE_OP_ADAPTER(MaxPoolGradWithArgmax) DECLARE_OP_ADAPTER(MaxPoolGradWithArgmax)
DECLARE_OP_USE_ENUM(MaxPoolGradWithArgmax) DECLARE_OP_USE_OUTPUT(MaxPoolGradWithArgmax)
DECLARE_OP_ADAPTER(Conv2D) DECLARE_OP_ADAPTER(Conv2D)
DECLARE_OP_USE_ENUM(Conv2D) DECLARE_OP_USE_ENUM(Conv2D)
DECLARE_OP_USE_OUTPUT(Conv2D) DECLARE_OP_USE_OUTPUT(Conv2D)

@ -168,7 +168,7 @@ class ResNet(nn.Cell):
self.conv1 = _conv7x7(3, 64, stride=2) self.conv1 = _conv7x7(3, 64, stride=2)
self.bn1 = _bn(64) self.bn1 = _bn(64)
self.relu = P.ReLU() self.relu = P.ReLU()
self.maxpool = P.MaxPoolWithArgmax(pad_mode='same', window=3, stride=2) self.maxpool = P.MaxPoolWithArgmax(padding="same", ksize=3, strides=2)
self.layer1 = self._make_layer(block, self.layer1 = self._make_layer(block,
layer_nums[0], layer_nums[0],

@ -13,36 +13,52 @@
# limitations under the License. # limitations under the License.
# ============================================================================ # ============================================================================
"""pooling""" """pooling"""
from mindspore.ops import operations as P from mindspore.ops import operations as P
from mindspore._checkparam import ParamValidator as validator from mindspore._checkparam import ParamValidator as validator
from mindspore._checkparam import Rel from mindspore._checkparam import Rel
from ... import context
from ..cell import Cell from ..cell import Cell
class _PoolNd(Cell): class _PoolNd(Cell):
"""N-D AvgPool""" """N-D AvgPool"""
def __init__(self, def __init__(self, kernel_size, stride, pad_mode):
kernel_size, name = self.__class__.__name__
stride,
pad_mode,
padding=0,
pool=None):
super(_PoolNd, self).__init__() super(_PoolNd, self).__init__()
validator.check_type('kernel_size', kernel_size, [int, tuple])
validator.check_type('stride', stride, [int, tuple])
self.pad_mode = validator.check_string('pad_mode', pad_mode.upper(), ['VALID', 'SAME'])
if isinstance(kernel_size, int):
validator.check_integer("kernel_size", kernel_size, 1, Rel.GE)
else:
if (len(kernel_size) != 2 or
(not isinstance(kernel_size[0], int)) or
(not isinstance(kernel_size[1], int)) or
kernel_size[0] <= 0 or
kernel_size[1] <= 0):
raise ValueError(f'The kernel_size passed to cell {name} should be an positive int number or'
f'a tuple of two positive int numbers, but got {kernel_size}')
self.kernel_size = kernel_size self.kernel_size = kernel_size
if isinstance(stride, int):
validator.check_integer("stride", stride, 1, Rel.GE)
else:
if (len(stride) != 2 or
(not isinstance(stride[0], int)) or
(not isinstance(stride[1], int)) or
stride[0] <= 0 or
stride[1] <= 0):
raise ValueError(f'The stride passed to cell {name} should be an positive int number or'
f'a tuple of two positive int numbers, but got {stride}')
self.stride = stride self.stride = stride
self.pad_mode = pad_mode
self.padding = validator.check_integer('padding', padding, 0, Rel.GE)
self.pool = pool
if self.pool is None:
raise NotImplementedError
def construct(self, x): def construct(self, *inputs):
return self.pool(x) pass
def extend_repr(self): def extend_repr(self):
return 'kernel_size={kernel_size}, stride={stride}, pad_mode={pad_mode}'.format(**self.__dict__) return 'kernel_size={kernel_size}, strides={strides}, pad_mode={pad_mode}'.format(**self.__dict__)
class MaxPool2d(_PoolNd): class MaxPool2d(_PoolNd):
@ -63,19 +79,23 @@ class MaxPool2d(_PoolNd):
pad_mode for training only supports "same" and "valid". pad_mode for training only supports "same" and "valid".
Args: Args:
kernel_size (int): Size of the window to take a max over. Default 1. kernel_size (Union[int, tuple[int]]): The size of kernel used to take the max value,
stride (int): Stride size of the window. Default: 1. is an int number that represents height and width are both kernel_size,
pad_mode (str): Select the mode of the pad. The optional values are or a tuple of two int numbers that represent height and width respectively.
"same" and "valid". Default: "valid". Default: 1.
stride (Union[int, tuple[int]]): The distance of kernel moving, an int number that represents
the height and width of movement are both strides, or a tuple of two int numbers that
represent height and width of movement respectively. Default: 1.
pad_mode (str): The optional values for pad mode, is "same" or "valid", not case sensitive.
Default: "valid".
- same: Adopts the way of completion. Output height and width will be the same as - same: Adopts the way of completion. Output height and width will be the same as
the input. Total number of padding will be calculated for horizontal and vertical the input. Total number of padding will be calculated for horizontal and vertical
direction and evenly distributed to top and bottom, left and right if possible. Otherwise, the direction and evenly distributed to top and bottom, left and right if possible.
last extra padding will be done from the bottom and the right side. Otherwise, the last extra padding will be done from the bottom and the right side.
- valid: Adopts the way of discarding. The possibly largest height and width of output will be return - valid: Adopts the way of discarding. The possibly largest height and width of output
without padding. Extra pixels will be discarded. will be return without padding. Extra pixels will be discarded.
padding (int): Implicit zero padding to be added on both sides. Default: 0.
Inputs: Inputs:
- **input** (Tensor) - Tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})`. - **input** (Tensor) - Tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})`.
@ -103,31 +123,22 @@ class MaxPool2d(_PoolNd):
[[7. 8.] [[7. 8.]
[8. 8.]]]] [8. 8.]]]]
""" """
def __init__(self,
kernel_size=1, def __init__(self, kernel_size=1, stride=1, pad_mode="valid"):
stride=1, super(MaxPool2d, self).__init__(kernel_size, stride, pad_mode)
pad_mode="VALID", self.max_pool = P.MaxPool(ksize=self.kernel_size,
padding=0): strides=self.stride,
max_pool = P.MaxPool(ksize=kernel_size, padding=self.pad_mode)
strides=stride, self.max_pool_with_arg_max = P.MaxPoolWithArgmax(ksize=self.kernel_size,
padding=pad_mode) strides=self.stride,
self.is_autodiff_backend = False padding=self.pad_mode)
if self.is_autodiff_backend: self.is_tbe = context.get_context("device_target") == "Ascend"
# At present, pad mode of max pool is not unified, so it is a temporarily avoided
pad_mode = validator.check_string('pad_mode', pad_mode.lower(), ['valid', 'same'])
max_pool = P.MaxPoolWithArgmax(window=kernel_size,
stride=stride,
pad_mode=pad_mode,
pad=padding)
super(MaxPool2d, self).__init__(kernel_size, stride, pad_mode, padding, max_pool)
def construct(self, x): def construct(self, x):
if self.is_autodiff_backend: if self.is_tbe and self.training:
out = self.pool(x)[0] out = self.max_pool_with_arg_max(x)[0]
else: else:
out = self.pool(x) out = self.max_pool(x)
return out return out
@ -149,19 +160,24 @@ class AvgPool2d(_PoolNd):
pad_mode for training only supports "same" and "valid". pad_mode for training only supports "same" and "valid".
Args: Args:
kernel_size (int): Size of the window to take a max over. Default: 1. kernel_size (Union[int, tuple[int]]): The size of kernel used to take the average value,
stride (int): Stride size of the window. Default: 1. is an int number that represents height and width are both kernel_size,
pad_mode (str): Select the mode of the pad. The optional values are or a tuple of two int numbers that represent height and width respectively.
"same", "valid". Default: "valid". Default: 1.
stride (Union[int, tuple[int]]): The distance of kernel moving, an int number that represents
the height and width of movement are both strides, or a tuple of two int numbers that
represent height and width of movement respectively. Default: 1.
pad_mode (str): The optional values for pad mode, is "same" or "valid", not case sensitive.
Default: "valid".
- same: Adopts the way of completion. Output height and width will be the same as - same: Adopts the way of completion. Output height and width will be the same as
the input. Total number of padding will be calculated for horizontal and vertical the input. Total number of padding will be calculated for horizontal and vertical
direction and evenly distributed to top and bottom, left and right if possible. Otherwise, the direction and evenly distributed to top and bottom, left and right if possible.
last extra padding will be done from the bottom and the right side. Otherwise, the last extra padding will be done from the bottom and the right side.
- valid: Adopts the way of discarding. The possibly largest height and width of output
will be return without padding. Extra pixels will be discarded.
- valid: Adopts the way of discarding. The possibly largest height and width of output will be return
without padding. Extra pixels will be discarded.
padding (int): Implicit zero padding to be added on both sides. Default: 0.
Inputs: Inputs:
- **input** (Tensor) - Tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})`. - **input** (Tensor) - Tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})`.
@ -170,7 +186,7 @@ class AvgPool2d(_PoolNd):
Tensor of shape :math:`(N, C_{out}, H_{out}, W_{out})`. Tensor of shape :math:`(N, C_{out}, H_{out}, W_{out})`.
Examples: Examples:
>>> pool = AvgPool2d(kernel_size=3, stride=1) >>> pool = AvgPool2d(kernel_size=3, strides=1)
>>> x = Tensor(np.random.randint(0, 10, [1, 2, 4, 4]), mindspore.float32) >>> x = Tensor(np.random.randint(0, 10, [1, 2, 4, 4]), mindspore.float32)
[[[[5. 5. 9. 9.] [[[[5. 5. 9. 9.]
[8. 4. 3. 0.] [8. 4. 3. 0.]
@ -189,12 +205,15 @@ class AvgPool2d(_PoolNd):
[[4.2222223 4.5555553] [[4.2222223 4.5555553]
[3.2222223 4.5555553]]]] [3.2222223 4.5555553]]]]
""" """
def __init__(self, def __init__(self,
kernel_size=1, kernel_size=1,
stride=1, stride=1,
pad_mode="VALID", pad_mode="valid"):
padding=0): super(AvgPool2d, self).__init__(kernel_size, stride, pad_mode)
avg_pool = P.AvgPool(ksize=kernel_size, self.avg_pool = P.AvgPool(ksize=self.kernel_size,
strides=stride, strides=self.stride,
padding=pad_mode) padding=self.pad_mode)
super(AvgPool2d, self).__init__(kernel_size, stride, pad_mode, padding, avg_pool)
def construct(self, x):
return self.avg_pool(x)

@ -76,14 +76,9 @@ def get_bprop_depthwise_conv2d_native(self):
def get_bprop_max_pool_with_argmax(self): def get_bprop_max_pool_with_argmax(self):
"""Grad definition for `MaxPoolWithArgmax` operation.""" """Grad definition for `MaxPoolWithArgmax` operation."""
maxpool_grad = G.MaxPoolGradWithArgmax( maxpool_grad = G.MaxPoolGradWithArgmax(
pad_mode=self.pad_mode, ksize=self.ksize,
window=self.window, strides=self.strides,
pad=self.pad, padding=self.padding,)
stride=self.stride,
data_mode=self.data_mode,
ceil_mode=self.ceil_mode,
alpha=self.alpha,
beta=self.beta)
def bprop(x, out, dout): def bprop(x, out, dout):
dx = maxpool_grad(x, dout[0], out[1]) dx = maxpool_grad(x, dout[0], out[1])

@ -28,19 +28,19 @@ from mindspore.ops.op_info_register import op_info_register
"partial_flag": true, "partial_flag": true,
"attr": [ "attr": [
{ {
"name": "window", "name": "ksize",
"param_type": "required", "param_type": "required",
"type": "listInt", "type": "listInt",
"value": "all" "value": "all"
}, },
{ {
"name": "stride", "name": "strides",
"param_type": "required", "param_type": "required",
"type": "listInt", "type": "listInt",
"value": "all" "value": "all"
}, },
{ {
"name": "pad_mode", "name": "padding",
"param_type": "required", "param_type": "required",
"type": "str", "type": "str",
"value": "all" "value": "all"

@ -28,19 +28,19 @@ from mindspore.ops.op_info_register import op_info_register
"partial_flag": true, "partial_flag": true,
"attr": [ "attr": [
{ {
"name": "window", "name": "ksize",
"param_type": "required", "param_type": "required",
"type": "listInt", "type": "listInt",
"value": "all" "value": "all"
}, },
{ {
"name": "stride", "name": "strides",
"param_type": "required", "param_type": "required",
"type": "listInt", "type": "listInt",
"value": "all" "value": "all"
}, },
{ {
"name": "pad_mode", "name": "padding",
"param_type": "required", "param_type": "required",
"type": "str", "type": "str",
"value": "all" "value": "all"

@ -15,7 +15,6 @@
"""Operators for gradients.""" """Operators for gradients."""
import math
from ..._c_expression import signature_rw as sig_rw from ..._c_expression import signature_rw as sig_rw
from ..._c_expression import signature_kind as sig_kind from ..._c_expression import signature_kind as sig_kind
from ..primitive import Primitive, PrimitiveWithInfer, prim_attr_register from ..primitive import Primitive, PrimitiveWithInfer, prim_attr_register
@ -340,59 +339,60 @@ class _PoolGrad(PrimitiveWithInfer):
"""Gradients of the max/avg pool operation.""" """Gradients of the max/avg pool operation."""
@prim_attr_register @prim_attr_register
def __init__(self, ksize=1, strides=1, padding="VALID"): def __init__(self, ksize, strides, padding="VALID"):
self.init_prim_io_names(inputs=['x_origin', 'out_origin', 'grad'], outputs=['output']) self.init_prim_io_names(inputs=['x_origin', 'out_origin', 'grad'], outputs=['output'])
self.ksize = ksize
self.strides = strides
self.padding = padding
self.ksize = validator.check_type('ksize', self.ksize, [int, tuple])
self.strides = validator.check_type('strides', self.strides, [int, tuple])
validator.check_type('padding', self.padding, [str]) validator.check_type('ksize', ksize, [int, tuple])
self.padding = validator.check_string('padding', self.padding, ['VALID', 'SAME']) validator.check_type('strides', strides, [int, tuple])
self.padding = validator.check_string('padding', padding.upper(), ['VALID', 'SAME'])
self.add_prim_attr("padding", self.padding) self.add_prim_attr("padding", self.padding)
self.add_prim_attr('data_format', "NCHW") self.is_maxpoolgradwithargmax = (self.name == "MaxPoolGradWithArgmax")
if not self.is_maxpoolgradwithargmax:
if isinstance(self.ksize, int): self.add_prim_attr('data_format', "NCHW")
self.pool_h = validator.check_integer("ksize", self.ksize, 1, Rel.GE)
self.pool_w = self.pool_h if isinstance(ksize, int):
self.add_prim_attr("ksize", (1, 1, self.ksize, self.ksize)) validator.check_integer("ksize", ksize, 1, Rel.GE)
elif isinstance(self.ksize, tuple): if self.is_maxpoolgradwithargmax:
if (len(self.ksize) != 2 and len(self.ksize) != 4): self.ksize = (1, ksize, ksize, 1)
raise ValueError('Attr \'ksize\' of \'Pool\' Op passed ' + else:
str(self.ksize)+', should be a int or a tuple of length 2 or 4.') self.ksize = (1, 1, ksize, ksize)
for ksize_val in self.ksize:
if (not isinstance(ksize_val, int)) or (ksize_val <= 0):
raise ValueError('Each value of attr \'ksize\' of \'MaxPool\' Op passed ' +
str(self.ksize)+', should be int and greater than 0.')
self.pool_h = self.ksize[-2]
self.pool_w = self.ksize[-1]
self.add_prim_attr("ksize", (1, 1, self.ksize[-2], self.ksize[-1]))
if isinstance(self.strides, int):
self.stride_h = validator.check_integer("strides", self.strides, 1, Rel.GE)
self.stride_w = self.stride_h
self.add_prim_attr("strides", (1, 1, self.strides, self.strides))
elif isinstance(self.strides, tuple):
if (len(self.strides) != 2 and len(self.strides) != 4):
raise ValueError('Attr \'strides\' of \'MaxPool\' Op passed ' +
str(self.strides)+', should be a int or a tuple of length 2 or 4.')
for stride_val in self.strides:
if (not isinstance(stride_val, int)) or (stride_val <= 0):
raise ValueError('Each value of attr \'strides\' of \'MaxPool\' Op passed ' +
str(self.strides)+', should be int and greater than 0.')
self.stride_h = self.strides[-2]
self.stride_w = self.strides[-1]
self.add_prim_attr("strides", (1, 1, self.strides[-2], self.strides[-1]))
if self.padding == "VALID":
self.pad = 0
elif self.padding == "SAME":
self.pad = math.floor((self.pool_h - 1) / 2)
else: else:
raise ValueError('The padding should be str and must be SAME or VALID,' ksize_error = ValueError(f"The 'ksize' passed to operator {self.name} should be an positive int number"
' but got {}.'.format(self.padding)) f"or a tuple of two or four positive int numbers, but got {ksize}")
if len(ksize) != 2 and len(ksize) != 4:
raise ksize_error
for ksize_val in ksize:
if not isinstance(ksize_val, int) or (ksize_val <= 0):
raise ksize_error
if len(ksize) == 2 and self.is_maxpoolgradwithargmax:
self.ksize = (1, ksize[0], ksize[1], 1)
elif len(ksize) == 2 and not self.is_maxpoolgradwithargmax:
self.ksize = (1, 1, ksize[0], ksize[1])
else:
self.ksize = ksize
self.add_prim_attr("ksize", self.ksize)
if isinstance(strides, int):
validator.check_integer("strides", strides, 1, Rel.GE)
if self.is_maxpoolgradwithargmax:
self.strides = (1, strides, strides, 1)
else:
self.strides = (1, 1, strides, strides)
else:
strides_error = ValueError(f"The 'strides' passed to operator {self.name} should be an positive int number"
f"or a tuple of two or four positive int numbers, but got {strides}")
if len(strides) != 2 and len(strides) != 4:
raise strides_error
for strides_val in strides:
if not isinstance(strides_val, int) or (strides_val <= 0):
raise strides_error
if len(strides) == 2 and self.is_maxpoolgradwithargmax:
self.strides = (1, strides[0], strides[1], 1)
elif len(strides) == 2 and not self.is_maxpoolgradwithargmax:
self.strides = (1, 1, strides[0], strides[1])
else:
self.strides = strides
self.add_prim_attr("strides", self.strides)
class AvgPoolGrad(_PoolGrad): class AvgPoolGrad(_PoolGrad):
@ -451,28 +451,13 @@ class MaximumGrad(Primitive):
raise NotImplementedError raise NotImplementedError
class MaxPoolGradWithArgmax(PrimitiveWithInfer): class MaxPoolGradWithArgmax(_PoolGrad):
"""Computes the gradients of MaxPoolWithArgmax.""" """Computes the gradients of MaxPoolWithArgmax."""
@prim_attr_register @prim_attr_register
def __init__(self, def __init__(self, ksize=1, strides=1, padding="VALID",):
pad_mode="valid",
window=0,
pad=0,
stride=1,
data_mode=1,
ceil_mode=0,
alpha=1.0,
beta=0.0):
self.init_prim_io_names(inputs=['x', 'grad', 'argmax'], outputs=['output']) self.init_prim_io_names(inputs=['x', 'grad', 'argmax'], outputs=['output'])
super(MaxPoolGradWithArgmax, self).__init__(ksize, strides, padding)
self.window = window
self.pool_h = self.pool_w = window
self.pad = pad
self.pad_mode = pad_mode
self.stride = stride
self.data_mode = data_mode
self.ceil_mode = ceil_mode
def infer_shape(self, x_shape, grad_shape, argmax_shape): def infer_shape(self, x_shape, grad_shape, argmax_shape):
if not grad_shape: if not grad_shape:

File diff suppressed because it is too large Load Diff

@ -103,7 +103,7 @@ class ResNet50(nn.Cell):
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, pad_mode='pad') self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, pad_mode='pad')
self.bn1 = nn.BatchNorm2d(64) self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU() self.relu = nn.ReLU()
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1, pad_mode='valid') self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, pad_mode='valid')
self.layer1 = self.MakeLayer( self.layer1 = self.MakeLayer(
block, 3, in_channels=64, out_channels=256, stride=1) block, 3, in_channels=64, out_channels=256, stride=1)

@ -21,6 +21,7 @@ import mindspore.nn as nn
from mindspore.ops import operations as P from mindspore.ops import operations as P
from mindspore import Tensor from mindspore import Tensor
class LeNet(nn.Cell): class LeNet(nn.Cell):
def __init__(self): def __init__(self):
super(LeNet, self).__init__() super(LeNet, self).__init__()
@ -50,8 +51,10 @@ class LeNet(nn.Cell):
output = self.fc3(output) output = self.fc3(output)
return output return output
context.set_context(mode=context.GRAPH_MODE, device_target="CPU") context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
def train(net, data, label): def train(net, data, label):
learning_rate = 0.01 learning_rate = 0.01
momentum = 0.9 momentum = 0.9
@ -67,11 +70,12 @@ def train(net, data, label):
print("+++++++++++++++++++++++++++") print("+++++++++++++++++++++++++++")
assert res assert res
@pytest.mark.level0 @pytest.mark.level0
@pytest.mark.platform_x86_cpu @pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_lenet(): def test_lenet():
data = Tensor(np.ones([32, 1 ,32, 32]).astype(np.float32) * 0.01) data = Tensor(np.ones([32, 1, 32, 32]).astype(np.float32) * 0.01)
label = Tensor(np.ones([32]).astype(np.int32)) label = Tensor(np.ones([32]).astype(np.int32))
net = LeNet() net = LeNet()
train(net, data, label) train(net, data, label)

@ -38,7 +38,7 @@ class AlexNet(nn.Cell):
self.conv4 = nn.Conv2d(384, 384, 3, stride=1, pad_mode="same") self.conv4 = nn.Conv2d(384, 384, 3, stride=1, pad_mode="same")
self.conv5 = nn.Conv2d(384, 256, 3, stride=1, pad_mode="same") self.conv5 = nn.Conv2d(384, 256, 3, stride=1, pad_mode="same")
self.relu = nn.ReLU() self.relu = nn.ReLU()
self.max_pool2d = nn.MaxPool2d(kernel_size=3, stride=2,pad_mode="valid",padding=0) self.max_pool2d = nn.MaxPool2d(kernel_size=3, stride=2, pad_mode="valid")
self.flatten = nn.Flatten() self.flatten = nn.Flatten()
self.fc1 = nn.Dense(6*6*256, 4096) self.fc1 = nn.Dense(6*6*256, 4096)
self.fc2 = nn.Dense(4096, 4096) self.fc2 = nn.Dense(4096, 4096)

@ -20,26 +20,29 @@ import numpy as np
import mindspore.context as context import mindspore.context as context
from mindspore.common.initializer import initializer from mindspore.common.initializer import initializer
from mindspore.common.parameter import Parameter from mindspore.common.parameter import Parameter
context.set_context(device_target="Ascend") context.set_context(device_target="Ascend")
class Net(nn.Cell): class Net(nn.Cell):
def __init__(self): def __init__(self):
super(Net, self).__init__() super(Net, self).__init__()
self.maxpool = P.MaxPoolWithArgmax(pad_mode="same", self.maxpool = P.MaxPoolWithArgmax(padding="same",
window=3, ksize=3,
stride=2) strides=2)
self.x = Parameter(initializer( self.x = Parameter(initializer(
'normal', [1, 64, 112, 112]), name='w') 'normal', [1, 64, 112, 112]), name='w')
self.add = P.TensorAdd() self.add = P.TensorAdd()
@ms_function @ms_function
def construct(self): def construct(self):
output = self.maxpool(self.x) output = self.maxpool(self.x)
return output[0] return output[0]
def test_net(): def test_net():
x = np.random.randn(1,64,112,112).astype(np.float32) x = np.random.randn(1, 64, 112, 112).astype(np.float32)
maxpool = Net() maxpool = Net()
output = maxpool() output = maxpool()
print("***********output output*********") print("***********output output*********")

@ -37,9 +37,9 @@ class Net(nn.Cell):
def __init__(self): def __init__(self):
super(Net, self).__init__() super(Net, self).__init__()
self.maxpool = P.MaxPoolWithArgmax(pad_mode="same", self.maxpool = P.MaxPoolWithArgmax(padding="same",
window=3, ksize=3,
stride=2) strides=2)
@ms_function @ms_function
def construct(self, x): def construct(self, x):

@ -267,7 +267,7 @@ class ResNet(nn.Cell):
self.bn1 = bn_with_initialize(64) self.bn1 = bn_with_initialize(64)
self.relu = P.ReLU() self.relu = P.ReLU()
self.maxpool = P.MaxPoolWithArgmax(window=3, stride=2, pad_mode="same") self.maxpool = P.MaxPoolWithArgmax(ksize=3, strides=2, padding="SAME")
self.layer1 = MakeLayer0(block, layer_num[0], in_channels=64, out_channels=256, stride=1) self.layer1 = MakeLayer0(block, layer_num[0], in_channels=64, out_channels=256, stride=1)
self.layer2 = MakeLayer1(block, layer_num[1], in_channels=256, out_channels=512, stride=2) self.layer2 = MakeLayer1(block, layer_num[1], in_channels=256, out_channels=512, stride=2)

@ -21,7 +21,7 @@ addn = P.AddN()
add = P.TensorAdd() add = P.TensorAdd()
sub = P.Sub() sub = P.Sub()
mul = P.Mul() mul = P.Mul()
max_pool = P.MaxPoolWithArgmax(pad_mode="same", window=3, stride=2) max_pool = P.MaxPoolWithArgmax(padding="same", ksize=3, strides=2)
make_tuple = Primitive('make_tuple') make_tuple = Primitive('make_tuple')
four2five = Primitive('Four2Five') four2five = Primitive('Four2Five')
five2four = Primitive('Five2Four') five2four = Primitive('Five2Four')

@ -17,7 +17,7 @@ from mindspore.ops import Primitive
tuple_getitem = Primitive('tuple_getitem') tuple_getitem = Primitive('tuple_getitem')
add = P.TensorAdd() add = P.TensorAdd()
max_pool = P.MaxPoolWithArgmax(pad_mode="same", window=3, stride=2) max_pool = P.MaxPoolWithArgmax(padding="same", ksize=3, strides=2)
make_tuple = Primitive('make_tuple') make_tuple = Primitive('make_tuple')
transdata = Primitive("TransData") transdata = Primitive("TransData")

@ -21,7 +21,7 @@ addn = P.AddN()
add = P.TensorAdd() add = P.TensorAdd()
sub = P.Sub() sub = P.Sub()
mul = P.Mul() mul = P.Mul()
max_pool = P.MaxPoolWithArgmax(pad_mode="same", window=3, stride=2) max_pool = P.MaxPoolWithArgmax(padding="same", ksize=3, strides=2)
make_tuple = Primitive('make_tuple') make_tuple = Primitive('make_tuple')
cast = Primitive('Cast') cast = Primitive('Cast')

@ -17,7 +17,7 @@ from mindspore.ops import Primitive
tuple_getitem = Primitive('tuple_getitem') tuple_getitem = Primitive('tuple_getitem')
add = P.TensorAdd() add = P.TensorAdd()
max_pool = P.MaxPoolWithArgmax(pad_mode="same", window=3, stride=2) max_pool = P.MaxPoolWithArgmax(padding="same", ksize=3, strides=2)
make_tuple = Primitive('make_tuple') make_tuple = Primitive('make_tuple')
four2five = Primitive('Four2Five') four2five = Primitive('Four2Five')
five2four = Primitive('Five2Four') five2four = Primitive('Five2Four')

@ -17,7 +17,7 @@ from mindspore.ops import Primitive
tuple_getitem = Primitive('tuple_getitem') tuple_getitem = Primitive('tuple_getitem')
add = P.TensorAdd() add = P.TensorAdd()
max_pool = P.MaxPoolWithArgmax(pad_mode="same", window=3, stride=2) max_pool = P.MaxPoolWithArgmax(padding="same", ksize=3, strides=2)
make_tuple = Primitive('make_tuple') make_tuple = Primitive('make_tuple')
transdata = Primitive("TransData") transdata = Primitive("TransData")
Transpose = P.Transpose() Transpose = P.Transpose()

@ -22,7 +22,7 @@ add = P.TensorAdd()
reshape = P.Reshape() reshape = P.Reshape()
cast = P.Cast() cast = P.Cast()
tuple_getitem = Primitive('tuple_getitem') tuple_getitem = Primitive('tuple_getitem')
max_pool = P.MaxPoolWithArgmax(pad_mode="same", window=3, stride=2) max_pool = P.MaxPoolWithArgmax(padding="same", ksize=3, strides=2)
def test_addn_cast(x, y, z): def test_addn_cast(x, y, z):
sum = addn((x, y)) sum = addn((x, y))

@ -107,7 +107,7 @@ class ResNet18(nn.Cell):
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, pad_mode='pad') self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, pad_mode='pad')
self.bn1 = nn.BatchNorm2d(64) self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU() self.relu = nn.ReLU()
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1, pad_mode='pad') self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, pad_mode='same')
self.layer1 = self.MakeLayer( self.layer1 = self.MakeLayer(
block, 2, in_channels=64, out_channels=256, stride=1) block, 2, in_channels=64, out_channels=256, stride=1)
@ -176,7 +176,7 @@ class ResNet9(nn.Cell):
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, pad_mode='pad') self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, pad_mode='pad')
self.bn1 = nn.BatchNorm2d(64) self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU() self.relu = nn.ReLU()
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1, pad_mode='same') self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, pad_mode='same')
self.layer1 = self.MakeLayer( self.layer1 = self.MakeLayer(
block, 1, in_channels=64, out_channels=256, stride=1) block, 1, in_channels=64, out_channels=256, stride=1)

@ -189,7 +189,7 @@ class ResNet50(nn.Cell):
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, weight_init=weight_conv) self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, weight_init=weight_conv)
self.bn1 = bn_with_initialize(64) self.bn1 = bn_with_initialize(64)
self.relu = nn.ReLU() self.relu = nn.ReLU()
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2)
self.layer1 = MakeLayer3( self.layer1 = MakeLayer3(
block, in_channels=64, out_channels=256, stride=1) block, in_channels=64, out_channels=256, stride=1)

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save