diff --git a/mindspore/_extends/graph_kernel/model/model.py b/mindspore/_extends/graph_kernel/model/model.py index c1eed10702..ea2044fb29 100644 --- a/mindspore/_extends/graph_kernel/model/model.py +++ b/mindspore/_extends/graph_kernel/model/model.py @@ -257,10 +257,10 @@ class Value: self.data_format = data_format def __str__(self): - return self.name + str(list(self.shape)) + str(self.value) + return self.name + str(list(self.shape)) def __repr__(self): - return "%s.%s%s%s" % (self.name, self.dtype, str(list(self.shape)), str(self.value)) + return "%s.%s%s" % (self.name, self.dtype, str(list(self.shape))) def get_size(self): return 1 diff --git a/mindspore/common/parameter.py b/mindspore/common/parameter.py index c398643e93..c12d2e097a 100644 --- a/mindspore/common/parameter.py +++ b/mindspore/common/parameter.py @@ -158,16 +158,10 @@ class Parameter(MetaTensor_): return (Tensor, data) def __str__(self): - value_str = MetaTensor.__str__(self) - if isinstance(self, Tensor): - value_str = Tensor.__str__(self) - return f'Parameter (name={self._param_info.name}, value={value_str})' + return f'Parameter (name={self._param_info.name})' def __repr__(self): - value_str = MetaTensor.__repr__(self) - if isinstance(self, Tensor): - value_str = Tensor.__repr__(self) - return f'Parameter (name={self._param_info.name}, value={value_str})' + return f'Parameter (name={self._param_info.name})' def __parameter__(self): """For parse check.""" diff --git a/mindspore/nn/layer/basic.py b/mindspore/nn/layer/basic.py index d29c9f56e9..5e9283fed7 100644 --- a/mindspore/nn/layer/basic.py +++ b/mindspore/nn/layer/basic.py @@ -121,8 +121,7 @@ class Dropout(Cell): return self.dropout_do_mask(x, output, keep_prob) def extend_repr(self): - str_info = 'keep_prob={}, dtype={}'.format(self.keep_prob, self.dtype) - return str_info + return 'keep_prob={}, dtype={}'.format(self.keep_prob, self.dtype) class Flatten(Cell): @@ -365,8 +364,7 @@ class Norm(Cell): return x def extend_repr(self): - str_info = 'axis={}, keep_dims={}'.format(self.axis, self.keep_dims) - return str_info + return 'axis={}, keep_dims={}'.format(self.axis, self.keep_dims) class OneHot(Cell): diff --git a/mindspore/nn/layer/normalization.py b/mindspore/nn/layer/normalization.py index 1dd454b270..8555fc2be2 100644 --- a/mindspore/nn/layer/normalization.py +++ b/mindspore/nn/layer/normalization.py @@ -547,9 +547,8 @@ class LayerNorm(Cell): def extend_repr(self): """Display instance object as string.""" - s = 'normalized_shape={}, begin_norm_axis={}, begin_params_axis={}, gamma{}, beta={}'.format( + return 'normalized_shape={}, begin_norm_axis={}, begin_params_axis={}, gamma{}, beta={}'.format( self.normalized_shape, self.begin_norm_axis, self.begin_params_axis, self.gamma, self.beta) - return s class GroupNorm(Cell): @@ -642,5 +641,4 @@ class GroupNorm(Cell): def extend_repr(self): """Display instance object as string.""" - s = 'num_groups={}, num_channels={}'.format(self.num_groups, self.num_channels) - return s + return 'num_groups={}, num_channels={}'.format(self.num_groups, self.num_channels) diff --git a/mindspore/nn/layer/quant.py b/mindspore/nn/layer/quant.py index bbdffa3d1b..a0b5d22d29 100644 --- a/mindspore/nn/layer/quant.py +++ b/mindspore/nn/layer/quant.py @@ -1008,14 +1008,13 @@ class DenseQuant(Cell): def extend_repr(self): """A pretty print for Dense layer.""" - str_info = 'in_channels={}, out_channels={}, weight={}, has_bias={}'.format( + s = 'in_channels={}, out_channels={}, weight={}, has_bias={}'.format( self.in_channels, self.out_channels, self.weight, self.has_bias) if self.has_bias: - str_info = str_info + ', bias={}'.format(self.bias) + s += ', bias={}'.format(self.bias) if self.activation_flag: - str_info = str_info + ', activation={}'.format(self.activation) - - return str_info + s += ', activation={}'.format(self.activation) + return s class _QuantActivation(Cell): @@ -1387,13 +1386,13 @@ class QuantBlock(Cell): return x def extend_repr(self): - str_info = f'quant={self.quant}, core_op={type(self.core_op)}, weight=shape[{self.weight.shape}]' + s = f'quant={self.quant}, core_op={type(self.core_op)}, weight=shape[{self.weight.shape}]' if self.has_bias: - str_info = str_info + f', bias=shape[{self.bias.shape}]' + s += f', bias=shape[{self.bias.shape}]' if self.has_act: - str_info = str_info + f', activation={self.activation}' - str_info = str_info + f', dequant={self.dequant}' - return str_info + s += f', activation={self.activation}' + s += f', dequant={self.dequant}' + return s class QuantMindirBlock(Cell): @@ -1454,9 +1453,9 @@ class QuantMindirBlock(Cell): return x def extend_repr(self): - str_info = f'core_op={type(self.core_op)}, weight=shape[{self.weight.shape}]' + s = f'core_op={type(self.core_op)}, weight=shape[{self.weight.shape}]' if self.has_bias: - str_info = str_info + f', bias=shape[{self.bias.shape}]' + s += f', bias=shape[{self.bias.shape}]' if self.has_act: - str_info = str_info + f', activation={self.activation}' - return str_info + s += f', activation={self.activation}' + return s diff --git a/mindspore/nn/probability/bijector/gumbel_cdf.py b/mindspore/nn/probability/bijector/gumbel_cdf.py index 5cfb6b0a57..d3c3308b56 100644 --- a/mindspore/nn/probability/bijector/gumbel_cdf.py +++ b/mindspore/nn/probability/bijector/gumbel_cdf.py @@ -100,8 +100,7 @@ class GumbelCDF(Bijector): return self._parameter_type def extend_repr(self): - str_info = f'loc = {self.loc}, scale = {self.scale}' - return str_info + return f'loc = {self.loc}, scale = {self.scale}' def shape_mapping(self, shape): return shape diff --git a/mindspore/nn/probability/bijector/power_transform.py b/mindspore/nn/probability/bijector/power_transform.py index 30d653f873..58c2b5bc54 100644 --- a/mindspore/nn/probability/bijector/power_transform.py +++ b/mindspore/nn/probability/bijector/power_transform.py @@ -81,8 +81,7 @@ class PowerTransform(Bijector): return self._power def extend_repr(self): - str_info = f'power = {self.power}' - return str_info + return f'power = {self.power}' def shape_mapping(self, shape): return shape diff --git a/mindspore/nn/probability/bijector/scalar_affine.py b/mindspore/nn/probability/bijector/scalar_affine.py index 598623d71c..f6ab8b8a80 100644 --- a/mindspore/nn/probability/bijector/scalar_affine.py +++ b/mindspore/nn/probability/bijector/scalar_affine.py @@ -90,8 +90,7 @@ class ScalarAffine(Bijector): return self._shift def extend_repr(self): - str_info = f'scale = {self.scale}, shift = {self.shift}' - return str_info + return f'scale = {self.scale}, shift = {self.shift}' def shape_mapping(self, shape): return shape diff --git a/mindspore/nn/probability/bijector/softplus.py b/mindspore/nn/probability/bijector/softplus.py index 5c175e7abe..93184ddde3 100644 --- a/mindspore/nn/probability/bijector/softplus.py +++ b/mindspore/nn/probability/bijector/softplus.py @@ -118,8 +118,7 @@ class Softplus(Bijector): return self._sharpness def extend_repr(self): - str_info = f'sharpness = {self.sharpness}' - return str_info + return f'sharpness = {self.sharpness}' def shape_mapping(self, shape): return shape diff --git a/mindspore/nn/probability/bnn_layers/conv_variational.py b/mindspore/nn/probability/bnn_layers/conv_variational.py index f1ca42f733..f04e0343bf 100644 --- a/mindspore/nn/probability/bnn_layers/conv_variational.py +++ b/mindspore/nn/probability/bnn_layers/conv_variational.py @@ -141,15 +141,15 @@ class _ConvVariational(_Conv): return outputs def extend_repr(self): - str_info = 'in_channels={}, out_channels={}, kernel_size={}, stride={}, pad_mode={}, ' \ - 'padding={}, dilation={}, group={}, weight_mean={}, weight_std={}, has_bias={}'\ + s = 'in_channels={}, out_channels={}, kernel_size={}, stride={}, pad_mode={}, ' \ + 'padding={}, dilation={}, group={}, weight_mean={}, weight_std={}, has_bias={}'\ .format(self.in_channels, self.out_channels, self.kernel_size, self.stride, self.pad_mode, self.padding, self.dilation, self.group, self.weight_posterior.mean, self.weight_posterior.untransformed_std, self.has_bias) if self.has_bias: - str_info = str_info + ', bias_mean={}, bias_std={}'\ + s += ', bias_mean={}, bias_std={}'\ .format(self.bias_posterior.mean, self.bias_posterior.untransformed_std) - return str_info + return s def _apply_variational_bias(self, inputs): bias_posterior_tensor = self.bias_posterior("sample") diff --git a/mindspore/nn/probability/bnn_layers/dense_variational.py b/mindspore/nn/probability/bnn_layers/dense_variational.py index 5dcfe5d80b..13a42717ec 100644 --- a/mindspore/nn/probability/bnn_layers/dense_variational.py +++ b/mindspore/nn/probability/bnn_layers/dense_variational.py @@ -107,16 +107,15 @@ class _DenseVariational(Cell): return outputs def extend_repr(self): - str_info = 'in_channels={}, out_channels={}, weight_mean={}, weight_std={}, has_bias={}' \ + s = 'in_channels={}, out_channels={}, weight_mean={}, weight_std={}, has_bias={}' \ .format(self.in_channels, self.out_channels, self.weight_posterior.mean, self.weight_posterior.untransformed_std, self.has_bias) if self.has_bias: - str_info = str_info + ', bias_mean={}, bias_std={}' \ + s += ', bias_mean={}, bias_std={}' \ .format(self.bias_posterior.mean, self.bias_posterior.untransformed_std) - if self.activation_flag: - str_info = str_info + ', activation={}'.format(self.activation) - return str_info + s += ', activation={}'.format(self.activation) + return s def _apply_variational_bias(self, inputs): bias_posterior_tensor = self.bias_posterior("sample") diff --git a/mindspore/nn/probability/distribution/bernoulli.py b/mindspore/nn/probability/distribution/bernoulli.py index c3ccc2aa42..00382cd5cd 100644 --- a/mindspore/nn/probability/distribution/bernoulli.py +++ b/mindspore/nn/probability/distribution/bernoulli.py @@ -141,10 +141,10 @@ class Bernoulli(Distribution): def extend_repr(self): if self.is_scalar_batch: - str_info = f'probs = {self.probs}' + s = f'probs = {self.probs}' else: - str_info = f'batch_shape = {self._broadcast_shape}' - return str_info + s = f'batch_shape = {self._broadcast_shape}' + return s @property def probs(self): diff --git a/mindspore/nn/probability/distribution/categorical.py b/mindspore/nn/probability/distribution/categorical.py index 63d8bea947..b148475427 100644 --- a/mindspore/nn/probability/distribution/categorical.py +++ b/mindspore/nn/probability/distribution/categorical.py @@ -157,10 +157,10 @@ class Categorical(Distribution): def extend_repr(self): if self.is_scalar_batch: - str_info = f'probs = {self.probs}' + s = f'probs = {self.probs}' else: - str_info = f'batch_shape = {self._broadcast_shape}' - return str_info + s = f'batch_shape = {self._broadcast_shape}' + return s @property def probs(self): diff --git a/mindspore/nn/probability/distribution/exponential.py b/mindspore/nn/probability/distribution/exponential.py index c21b20b612..378e2cba31 100644 --- a/mindspore/nn/probability/distribution/exponential.py +++ b/mindspore/nn/probability/distribution/exponential.py @@ -145,10 +145,10 @@ class Exponential(Distribution): def extend_repr(self): if self.is_scalar_batch: - str_info = f'rate = {self.rate}' + s = f'rate = {self.rate}' else: - str_info = f'batch_shape = {self._broadcast_shape}' - return str_info + s = f'batch_shape = {self._broadcast_shape}' + return s @property def rate(self): diff --git a/mindspore/nn/probability/distribution/geometric.py b/mindspore/nn/probability/distribution/geometric.py index 86c0ca5f85..ad0eef12c4 100644 --- a/mindspore/nn/probability/distribution/geometric.py +++ b/mindspore/nn/probability/distribution/geometric.py @@ -150,10 +150,10 @@ class Geometric(Distribution): def extend_repr(self): if self.is_scalar_batch: - str_info = f'probs = {self.probs}' + s = f'probs = {self.probs}' else: - str_info = f'batch_shape = {self._broadcast_shape}' - return str_info + s = f'batch_shape = {self._broadcast_shape}' + return s @property def probs(self): diff --git a/mindspore/nn/probability/distribution/log_normal.py b/mindspore/nn/probability/distribution/log_normal.py index 69d4059a34..eb5782e10c 100644 --- a/mindspore/nn/probability/distribution/log_normal.py +++ b/mindspore/nn/probability/distribution/log_normal.py @@ -163,10 +163,10 @@ class LogNormal(msd.TransformedDistribution): def extend_repr(self): if self.is_scalar_batch: - str_info = f'loc = {self._mean_value}, scale = {self._sd_value}' + s = f'loc = {self._mean_value}, scale = {self._sd_value}' else: - str_info = f'batch_shape = {self._broadcast_shape}' - return str_info + s = f'batch_shape = {self._broadcast_shape}' + return s def _mean(self, loc=None, scale=None): """ diff --git a/mindspore/nn/probability/distribution/logistic.py b/mindspore/nn/probability/distribution/logistic.py index fa3a0dfb06..de27eee046 100644 --- a/mindspore/nn/probability/distribution/logistic.py +++ b/mindspore/nn/probability/distribution/logistic.py @@ -156,10 +156,10 @@ class Logistic(Distribution): def extend_repr(self): if self.is_scalar_batch: - str_info = f'location = {self._loc}, scale = {self._scale}' + s = f'location = {self._loc}, scale = {self._scale}' else: - str_info = f'batch_shape = {self._broadcast_shape}' - return str_info + s = f'batch_shape = {self._broadcast_shape}' + return s @property def loc(self): diff --git a/mindspore/nn/probability/distribution/normal.py b/mindspore/nn/probability/distribution/normal.py index 4226cccd16..0059c74b22 100644 --- a/mindspore/nn/probability/distribution/normal.py +++ b/mindspore/nn/probability/distribution/normal.py @@ -149,10 +149,10 @@ class Normal(Distribution): def extend_repr(self): if self.is_scalar_batch: - str_info = f'mean = {self._mean_value}, standard deviation = {self._sd_value}' + s = f'mean = {self._mean_value}, standard deviation = {self._sd_value}' else: - str_info = f'batch_shape = {self._broadcast_shape}' - return str_info + s = f'batch_shape = {self._broadcast_shape}' + return s def _mean(self, mean=None, sd=None): """ diff --git a/mindspore/nn/probability/distribution/uniform.py b/mindspore/nn/probability/distribution/uniform.py index feafe87b14..31f317d786 100644 --- a/mindspore/nn/probability/distribution/uniform.py +++ b/mindspore/nn/probability/distribution/uniform.py @@ -154,10 +154,10 @@ class Uniform(Distribution): def extend_repr(self): if self.is_scalar_batch: - str_info = f'low = {self.low}, high = {self.high}' + s = f'low = {self.low}, high = {self.high}' else: - str_info = f'batch_shape = {self._broadcast_shape}' - return str_info + s = f'batch_shape = {self._broadcast_shape}' + return s @property def low(self): diff --git a/model_zoo/official/cv/resnet_thor/src/thor_layer.py b/model_zoo/official/cv/resnet_thor/src/thor_layer.py index 7a5ee3bd86..54fbd2d961 100644 --- a/model_zoo/official/cv/resnet_thor/src/thor_layer.py +++ b/model_zoo/official/cv/resnet_thor/src/thor_layer.py @@ -392,15 +392,12 @@ class Dense_Thor_GPU(Cell): def extend_repr(self): """extend_repr""" - str_info = 'in_channels={}, out_channels={}, weight={}, has_bias={}' \ - .format(self.in_channels, self.out_channels, self.weight, self.has_bias) + s = 'in_channels={}, out_channels={}'.format(self.in_channels, self.out_channels) if self.has_bias: - str_info = str_info + ', bias={}'.format(self.bias) - + s += ', has_bias={}'.format(self.has_bias) if self.activation_flag: - str_info = str_info + ', activation={}'.format(self.activation) - - return str_info + s += ', activation={}'.format(self.activation) + return s class Conv2d_Thor(_Conv): @@ -775,12 +772,9 @@ class Dense_Thor(Cell): def extend_repr(self): """extend_repr""" - str_info = 'in_channels={}, out_channels={}, weight={}, has_bias={}' \ - .format(self.in_channels, self.out_channels, self.weight, self.has_bias) + s = 'in_channels={}, out_channels={}'.format(self.in_channels, self.out_channels) if self.has_bias: - str_info = str_info + ', bias={}'.format(self.bias) - + s += ', has_bias={}'.format(self.has_bias) if self.activation_flag: - str_info = str_info + ', activation={}'.format(self.activation) - - return str_info + s += ', activation={}'.format(self.activation) + return s diff --git a/model_zoo/official/gnn/gat/src/gat.py b/model_zoo/official/gnn/gat/src/gat.py index 97edeada79..d33f765d06 100644 --- a/model_zoo/official/gnn/gat/src/gat.py +++ b/model_zoo/official/gnn/gat/src/gat.py @@ -102,12 +102,10 @@ class GNNFeatureTransform(nn.Cell): return output def extend_repr(self): - str_info = 'in_channels={}, out_channels={}, weight={}, has_bias={}' \ - .format(self.in_channels, self.out_channels, self.weight, self.has_bias) + s = 'in_channels={}, out_channels={}'.format(self.in_channels, self.out_channels) if self.has_bias: - str_info = str_info + ', bias={}'.format(self.bias) - - return str_info + s += ', has_bias={}'.format(self.has_bias) + return s class _BaseAggregator(nn.Cell): diff --git a/model_zoo/official/nlp/bert_thor/src/thor_layer.py b/model_zoo/official/nlp/bert_thor/src/thor_layer.py index 0fb646fe25..5814e95604 100644 --- a/model_zoo/official/nlp/bert_thor/src/thor_layer.py +++ b/model_zoo/official/nlp/bert_thor/src/thor_layer.py @@ -270,12 +270,9 @@ class Dense_Thor(Cell): def extend_repr(self): """extend_repr""" - str_info = 'in_channels={}, out_channels={}, weight={}, has_bias={}' \ - .format(self.in_channels, self.out_channels, self.weight, self.has_bias) + s = 'in_channels={}, out_channels={}'.format(self.in_channels, self.out_channels) if self.has_bias: - str_info = str_info + ', bias={}'.format(self.bias) - + s += ', bias={}'.format(self.bias) if self.activation_flag: - str_info = str_info + ', activation={}'.format(self.activation) - - return str_info + s += ', activation={}'.format(self.activation) + return s diff --git a/tests/st/gnn/aggregator.py b/tests/st/gnn/aggregator.py index b1c69a23a6..829901cdbc 100644 --- a/tests/st/gnn/aggregator.py +++ b/tests/st/gnn/aggregator.py @@ -104,12 +104,10 @@ class GNNFeatureTransform(nn.Cell): return output def extend_repr(self): - str_info = 'in_channels={}, out_channels={}, weight={}, has_bias={}' \ - .format(self.in_channels, self.out_channels, self.weight, self.has_bias) + s = 'in_channels={}, out_channels={}'.format(self.in_channels, self.out_channels) if self.has_bias: - str_info = str_info + ', bias={}'.format(self.bias) - - return str_info + s += ', has_bias={}'.format(self.has_bias) + return s class _BaseAggregator(nn.Cell): diff --git a/tests/st/networks/models/resnet50/src_thor/thor_layer.py b/tests/st/networks/models/resnet50/src_thor/thor_layer.py index 6b56461c45..5442927ea4 100644 --- a/tests/st/networks/models/resnet50/src_thor/thor_layer.py +++ b/tests/st/networks/models/resnet50/src_thor/thor_layer.py @@ -470,12 +470,10 @@ class Dense_Thor(Cell): def extend_repr(self): """extend_repr""" - str_info = 'in_channels={}, out_channels={}, weight={}, has_bias={}' \ - .format(self.in_channels, self.out_channels, self.weight, self.has_bias) + s = 'in_channels={}, out_channels={}'.format(self.in_channels, self.out_channels) if self.has_bias: - str_info = str_info + ', bias={}'.format(self.bias) - + s += ', has_bias={}'.format(self.has_bias) if self.activation_flag: - str_info = str_info + ', activation={}'.format(self.activation) + s += ', activation={}'.format(self.activation) return str_info