diff --git a/mindspore/_akg/gpu/squeeze_grad.py b/mindspore/_akg/gpu/squeeze_grad.py index 8180ff9638..ae31de8e84 100644 --- a/mindspore/_akg/gpu/squeeze_grad.py +++ b/mindspore/_akg/gpu/squeeze_grad.py @@ -15,14 +15,14 @@ """squeeze grad""" import _akg.topi as topi -def SqueezeGrad(y_grad, x_shape, axis=None): + +def SqueezeGrad(y_grad, x_shape): """ Computes gradients for squeeze op. Args: y_grad (tvm.tensor.Tensor): the gradient needed to be propagation. x_shape (Union[list, tuple]): output Tensor shape. - axis (Union[list, tuple, int, None], optional): eliminated axis by squeeze. Returns: tvm.tensor.Tensor: output gradient. diff --git a/mindspore/_akg/message.py b/mindspore/_akg/message.py index 4528771848..3d1f81f914 100644 --- a/mindspore/_akg/message.py +++ b/mindspore/_akg/message.py @@ -46,7 +46,8 @@ def compilewithjson(json_str): impl_path = os.path.realpath(kernel_info['impl_path']) if os.path.isfile(impl_path): custom_mod_name = Path(impl_path).resolve().stem - mod_spec = importlib.util.spec_from_file_location(custom_mod_name, impl_path) + mod_spec = importlib.util.spec_from_file_location( + custom_mod_name, impl_path) custom_mod = importlib.util.module_from_spec(mod_spec) mod_spec.loader.exec_module(custom_mod) op_func = getattr(custom_mod, op_name, None) @@ -57,7 +58,8 @@ def compilewithjson(json_str): op_func = getattr(gpu, op_name, None) if op_func is None: - logging.error("this op not supported, please check op name %s", str(op_name)) + logging.error( + "this op not supported, please check op name %s", str(op_name)) return False args = {} @@ -87,25 +89,16 @@ def compilewithjson(json_str): output = op_func(**args) - schedule_func = None - attrs = {} if isinstance(output, (list, tuple)): from inspect import isfunction tmp_outputs = [] for elem in output: - if isfunction(elem): - schedule_func = elem - elif isinstance(elem, dict): - for key, value in elem.items(): - if key not in attrs or not attrs[key]: - attrs[key] = value - else: + if not isfunction(elem) or isinstance(elem, dict): tmp_outputs.append(elem) output = tmp_outputs else: output = [output] - tsr = tsr + [i for i in output if TensorUtils.is_output_value(i)] - return op_build([op_name], output, tsr, schedule_func, processor, kernel_info['op'], attrs) + return op_build([op_name], output, tsr, processor, kernel_info['op']) diff --git a/mindspore/_akg/op_build.py b/mindspore/_akg/op_build.py index aa6a65cff1..92101f657e 100644 --- a/mindspore/_akg/op_build.py +++ b/mindspore/_akg/op_build.py @@ -25,8 +25,8 @@ from _akg import save_gpu_param as gpu_utils from _akg.utils import validation_check as vc_util -@vc_util.check_input_type(list, (list, tuple), (list, tuple), (types.FunctionType, type(None)), str, str, dict) -def op_build(opnames, computes, args, custom_schedule, device, kernel_name, attrs): +@vc_util.check_input_type(list, (list, tuple), (list, tuple), str, str) +def op_build(opnames, computes, args, device, kernel_name): """op_build""" kernel_meta_path = "./cuda_meta_" + str(os.getpid()) + "/" if device == "cuda": @@ -60,7 +60,7 @@ def op_build(opnames, computes, args, custom_schedule, device, kernel_name, attr kernel_info = (ptx_code, json_file, kernel_name) gpu_utils.save_gpu_params(s, args, kernel_info) os.chmod(ptx_file, 0o400) - except Exception: + except IOError: logging.error(traceback.format_exc()) return None return True diff --git a/mindspore/_akg/ops/math/mean.py b/mindspore/_akg/ops/math/mean.py index 8764387d33..e8300f22fc 100644 --- a/mindspore/_akg/ops/math/mean.py +++ b/mindspore/_akg/ops/math/mean.py @@ -17,7 +17,7 @@ import _akg.topi import _akg.tvm from _akg.utils import format_transform as ft_util from _akg.utils import validation_check as vc_util -from _akg.ops.math import sum +from _akg.ops.math import sum_value @vc_util.check_input_type(_akg.tvm.tensor.Tensor, (list, tuple, int, type(None)), (bool, type(None))) @@ -41,7 +41,7 @@ def mean(data, axis=None, keepdims=False): count = 1 for i in axis: count *= shape[i] - output, _ = sum.sum_value(data, axis, keepdims) + output, _ = sum_value.sum_value(data, axis, keepdims) res = _akg.topi.divide(output, count) return res diff --git a/mindspore/_akg/ops/math/sum.py b/mindspore/_akg/ops/math/sum_value.py similarity index 100% rename from mindspore/_akg/ops/math/sum.py rename to mindspore/_akg/ops/math/sum_value.py diff --git a/mindspore/ccsrc/kernel/common_utils.cc b/mindspore/ccsrc/kernel/common_utils.cc index 8316116486..54980c2cb7 100644 --- a/mindspore/ccsrc/kernel/common_utils.cc +++ b/mindspore/ccsrc/kernel/common_utils.cc @@ -131,18 +131,18 @@ void KernelMeta::Initialize() { } void KernelMeta::RemoveKernelCache() { - if (access(kernel_meta_path_.c_str(), 0) == 0) { - DIR *dir = opendir(kernel_meta_path_.c_str()); - MS_EXCEPTION_IF_NULL(dir); - struct dirent *entry; - while ((entry = readdir(dir)) != nullptr) { - std::string kernel_file = entry->d_name; - std::string kernel_file_realpath = kernel_meta_path_ + kernel_file; - (void)remove(kernel_file_realpath.c_str()); - } - (void)closedir(dir); - (void)rmdir(kernel_meta_path_.c_str()); + DIR *dir = opendir(kernel_meta_path_.c_str()); + if (dir == nullptr) { + return; + } + struct dirent *entry; + while ((entry = readdir(dir)) != nullptr) { + std::string kernel_file = entry->d_name; + std::string kernel_file_realpath = kernel_meta_path_ + kernel_file; + (void)remove(kernel_file_realpath.c_str()); } + (void)closedir(dir); + (void)rmdir(kernel_meta_path_.c_str()); } std::string KernelMeta::Search(const std::string &kernel_name) const { diff --git a/mindspore/ops/_op_impl/akg/gpu/squeeze_grad.py b/mindspore/ops/_op_impl/akg/gpu/squeeze_grad.py index ef397ea0a7..17e45a327a 100644 --- a/mindspore/ops/_op_impl/akg/gpu/squeeze_grad.py +++ b/mindspore/ops/_op_impl/akg/gpu/squeeze_grad.py @@ -20,7 +20,6 @@ squeeze_grad_op_info = AkgRegOp("SqueezeGrad") \ .input(0, "y_grad") \ .output(0, "output") \ .attr("x_shape", "required", "listInt") \ - .attr("axis", "optional", "listInt") \ .dtype_format(DataType.F16_Default, DataType.F16_Default) \ .dtype_format(DataType.F32_Default, DataType.F32_Default) \ .get_op_info()