From 23b471ca2b8aede71f3a566594919deb2cbb60ae Mon Sep 17 00:00:00 2001 From: shenwei41 Date: Fri, 26 Mar 2021 11:08:09 +0800 Subject: [PATCH] update include file 0325 --- .../fwkacllib/inc/ops/nn_pooling_ops.h | 79 ++++++++++++++++++- .../fwkacllib/inc/toolchain/prof_callback.h | 1 + .../fwkacllib/inc/toolchain/prof_reporter.h | 70 ++++++++-------- 3 files changed, 114 insertions(+), 36 deletions(-) diff --git a/third_party/fwkacllib/inc/ops/nn_pooling_ops.h b/third_party/fwkacllib/inc/ops/nn_pooling_ops.h index 8251fac3..139119f4 100644 --- a/third_party/fwkacllib/inc/ops/nn_pooling_ops.h +++ b/third_party/fwkacllib/inc/ops/nn_pooling_ops.h @@ -1444,8 +1444,7 @@ REG_OP(MaxPoolV3Grad) *@par Inputs: *x: A tensor of shape is 4d, format is support NHWC. -*filter: A tensor of shape is 3d, the type is same with x, -and the c dimension is same with x. \n +*filter: A tensor of shape is 3d, the type is same with x, and the c dimension is same with x. \n *@par Attributes: *@li strides: A required list of 4 ints, specifying the stride of the sliding window. The strides of the N and C dimensions are 1. @@ -1473,6 +1472,82 @@ REG_OP(Dilation2D) .ATTR(data_format, String, "NHWC") .OP_END_FACTORY_REG(Dilation2D) +/* +* @brief Performs Dilation2DBackpropFilter on the input. \n + +*@par Inputs: +*x: A tensor of shape is 4d, format is support NHWC +*filter: A tensor of shape is 3d the type is same with x, +*out_backprop: Has the same type and format as input "x" and the c dimension is same with x. \n + +*@par Attributes +*@li stride: A required list of 4 ints, specifying the stride of the sliding window. The strides of the N and C +dimension are 1 +*@li rates: A required list of 4 ints, the rates of the N and C dimensions are 1 +*@li padding_mode: A optional string. Defaults to "SAME", it support SAME and VALID +*@li pads: A optional list of 4 ints. \n + +*@par Outputs: +*y: The output tensor. Has the same type and format as input "filter" . \n + +*@par Third-party framework compatibility +* Compatible with the TensorFlow operator Dilation2D +*/ + +REG_OP(Dilation2DBackpropFilter) + .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_INT32, DT_INT64, DT_UINT8, DT_INT16, DT_INT8, DT_UINT16})) + .INPUT(filter, + TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_INT32, DT_INT64, DT_UINT8, DT_INT16, DT_INT8, DT_UINT16})) + .INPUT(out_backprop, + TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_INT32, DT_INT64, DT_UINT8, DT_INT16, DT_INT8, DT_UINT16})) + .OUTPUT(y, + TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_INT32, DT_INT64, DT_UINT8, DT_INT16, DT_INT8, DT_UINT16})) + .REQUIRED_ATTR(strides, ListInt) + .REQUIRED_ATTR(rates, ListInt) + .ATTR(padding_mode, String, "SAME") + .ATTR(pads, ListInt, {0, 0, 0, 0}) + .ATTR(ceil_mode, Bool, false) + .ATTR(data_format, String, "NHWC") + .OP_END_FACTORY_REG(Dilation2DBackpropFilter) + +/* +* @brief Performs Dilation2DBackpropInput on the input. \n + +*@par Inputs: +*x: A tensor of shape is 4d, format is support NHWC +*filter: A tensor of shape is 3d the type is same with x, +*out_backprop: Has the same type and format as input "x" and the c dimension is same with x. \n + +*@par Attributes +*@li stride: A required list of 4 ints, specifying the stride of the sliding window. The strides of the N and C +dimension are 1 +*@li rates: A required list of 4 ints, the rates of the N and C dimensions are 1 +*@li padding_mode: A optional string. Defaults to "SAME", it support SAME and VALID +*@li pads: A optional list of 4 ints. \n + +*@par Outputs: +*y: The output tensor. Has the same type and format as input "filter" . \n + +*@par Third-party framework compatibility +* Compatible with the TensorFlow operator Dilation2D +*/ + +REG_OP(Dilation2DBackpropInput) + .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_INT32, DT_INT64, DT_UINT8, DT_INT16, DT_INT8, DT_UINT16})) + .INPUT(filter, + TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_INT32, DT_INT64, DT_UINT8, DT_INT16, DT_INT8, DT_UINT16})) + .INPUT(out_backprop, + TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_INT32, DT_INT64, DT_UINT8, DT_INT16, DT_INT8, DT_UINT16})) + .OUTPUT(y, + TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_INT32, DT_INT64, DT_UINT8, DT_INT16, DT_INT8, DT_UINT16})) + .REQUIRED_ATTR(strides, ListInt) + .REQUIRED_ATTR(rates, ListInt) + .ATTR(padding_mode, String, "SAME") + .ATTR(pads, ListInt, {0, 0, 0, 0}) + .ATTR(ceil_mode, Bool, false) + .ATTR(data_format, String, "NHWC") + .OP_END_FACTORY_REG(Dilation2DBackpropInput) + /** * @brief Applies a 2D adaptive average pooling over * an input signal composed of several input planes. \n diff --git a/third_party/fwkacllib/inc/toolchain/prof_callback.h b/third_party/fwkacllib/inc/toolchain/prof_callback.h index 3fad74bc..18550157 100644 --- a/third_party/fwkacllib/inc/toolchain/prof_callback.h +++ b/third_party/fwkacllib/inc/toolchain/prof_callback.h @@ -74,6 +74,7 @@ enum MsprofReporterCallbackType { MSPROF_REPORTER_REPORT = 0, // report data MSPROF_REPORTER_INIT, // init reporter MSPROF_REPORTER_UNINIT, // uninit reporter + MSPROF_REPORTER_DATA_MAX_LEN, // data max length for calling report callback }; /** diff --git a/third_party/fwkacllib/inc/toolchain/prof_reporter.h b/third_party/fwkacllib/inc/toolchain/prof_reporter.h index ff91351b..d5ed7569 100644 --- a/third_party/fwkacllib/inc/toolchain/prof_reporter.h +++ b/third_party/fwkacllib/inc/toolchain/prof_reporter.h @@ -41,42 +41,44 @@ namespace Engine { * the Reporter class .used to send data to profiling */ class MSVP_PROF_API Reporter { - public: - virtual ~Reporter() {} +public: + virtual ~Reporter() {} - public: - /** - * @ingroup reporter - * @name : Report - * @brief : API of libmsprof, report data to libmsprof, it's a non-blocking function \n - The data will be firstly appended to cache, if the cache is full, data will be ignored - * @param data [IN] const ReporterData * the data send to libmsporf - * @retval PROFILING_SUCCESS 0 (success) - * @retval PROFILING_FAILED -1 (failed) - * - * @par depend: - * @li libmsprof - * @li prof_reporter.h - * @since c60 - * @see Flush - */ - virtual int Report(const ReporterData *data) = 0; +public: + /** + * @ingroup reporter + * @name : Report + * @brief : API of libmsprof, report data to libmsprof, it's a non-blocking function \n + The data will be firstly appended to cache, if the cache is full, data will be ignored + * @param data [IN] const ReporterData * the data send to libmsporf + * @retval PROFILING_SUCCESS 0 (success) + * @retval PROFILING_FAILED -1 (failed) + * + * @par depend: + * @li libmsprof + * @li prof_reporter.h + * @since c60 + * @see Flush + */ + virtual int Report(const ReporterData *data) = 0; - /** - * @ingroup reporter - * @name : Flush - * @brief : API of libmsprof, notify libmsprof send data over, it's a blocking function \n - The all datas of cache will be write to file or send to host - * @retval PROFILING_SUCCESS 0 (success) - * @retval PROFILING_FAILED -1 (failed) - * - * @par depend: - * @li libmsprof - * @li prof_reporter.h - * @since c60 - * @see ProfMgrStop - */ - virtual int Flush() = 0; + /** + * @ingroup reporter + * @name : Flush + * @brief : API of libmsprof, notify libmsprof send data over, it's a blocking function \n + The all datas of cache will be write to file or send to host + * @retval PROFILING_SUCCESS 0 (success) + * @retval PROFILING_FAILED -1 (failed) + * + * @par depend: + * @li libmsprof + * @li prof_reporter.h + * @since c60 + * @see ProfMgrStop + */ + virtual int Flush() = 0; + + virtual uint32_t GetReportDataMaxLen() = 0; }; } // namespace Engine