diff --git a/third_party/fwkacllib/inc/ops/aipp.h b/third_party/fwkacllib/inc/ops/aipp.h index 478f6c83..bed984bd 100644 --- a/third_party/fwkacllib/inc/ops/aipp.h +++ b/third_party/fwkacllib/inc/ops/aipp.h @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/third_party/fwkacllib/inc/ops/all_ops.h b/third_party/fwkacllib/inc/ops/all_ops.h index 614b06e2..1ac83783 100644 --- a/third_party/fwkacllib/inc/ops/all_ops.h +++ b/third_party/fwkacllib/inc/ops/all_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/third_party/fwkacllib/inc/ops/array_ops.h b/third_party/fwkacllib/inc/ops/array_ops.h index 691b51f6..e1f64421 100644 --- a/third_party/fwkacllib/inc/ops/array_ops.h +++ b/third_party/fwkacllib/inc/ops/array_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/third_party/fwkacllib/inc/ops/audio_ops.h b/third_party/fwkacllib/inc/ops/audio_ops.h index f05135d1..d9883253 100644 --- a/third_party/fwkacllib/inc/ops/audio_ops.h +++ b/third_party/fwkacllib/inc/ops/audio_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/third_party/fwkacllib/inc/ops/batch_ops.h b/third_party/fwkacllib/inc/ops/batch_ops.h index a4786cd3..8a1c5a7b 100644 --- a/third_party/fwkacllib/inc/ops/batch_ops.h +++ b/third_party/fwkacllib/inc/ops/batch_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/third_party/fwkacllib/inc/ops/bitwise_ops.h b/third_party/fwkacllib/inc/ops/bitwise_ops.h index 39a28cf3..5c83e161 100644 --- a/third_party/fwkacllib/inc/ops/bitwise_ops.h +++ b/third_party/fwkacllib/inc/ops/bitwise_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/third_party/fwkacllib/inc/ops/boosted_trees_ops.h b/third_party/fwkacllib/inc/ops/boosted_trees_ops.h index 08e54824..550e8b7d 100644 --- a/third_party/fwkacllib/inc/ops/boosted_trees_ops.h +++ b/third_party/fwkacllib/inc/ops/boosted_trees_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/third_party/fwkacllib/inc/ops/candidate_sampling_ops.h b/third_party/fwkacllib/inc/ops/candidate_sampling_ops.h index 890c52ae..e20607bf 100644 --- a/third_party/fwkacllib/inc/ops/candidate_sampling_ops.h +++ b/third_party/fwkacllib/inc/ops/candidate_sampling_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/third_party/fwkacllib/inc/ops/condtake_ops.h b/third_party/fwkacllib/inc/ops/condtake_ops.h index 029cffbf..5e91eb07 100644 --- a/third_party/fwkacllib/inc/ops/condtake_ops.h +++ b/third_party/fwkacllib/inc/ops/condtake_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/third_party/fwkacllib/inc/ops/control_flow_ops.h b/third_party/fwkacllib/inc/ops/control_flow_ops.h index c0b6ad72..7196b14f 100644 --- a/third_party/fwkacllib/inc/ops/control_flow_ops.h +++ b/third_party/fwkacllib/inc/ops/control_flow_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/third_party/fwkacllib/inc/ops/ctc_ops.h b/third_party/fwkacllib/inc/ops/ctc_ops.h index c6a265cc..2c75fd09 100644 --- a/third_party/fwkacllib/inc/ops/ctc_ops.h +++ b/third_party/fwkacllib/inc/ops/ctc_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/third_party/fwkacllib/inc/ops/data_flow_ops.h b/third_party/fwkacllib/inc/ops/data_flow_ops.h index 02d2bfdd..bb937a75 100644 --- a/third_party/fwkacllib/inc/ops/data_flow_ops.h +++ b/third_party/fwkacllib/inc/ops/data_flow_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -2240,6 +2240,64 @@ REG_OP(OutfeedEnqueueOp) .ATTR(channel_name, String, "") .OP_END_FACTORY_REG(OutfeedEnqueueOp) +/** +*@brief LruCache, create cache resource. +*@par Inputs: +*No input. +*@par Attributes: +*cache_size: cache size An optional "int64". Defaults to "100000". +*load_factor: rate which show if cache is full An optional "float", Defaults to "1". +*@par Outputs: +*cache: cache resource. +*@par Restrictions: +*Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use. +*/ +REG_OP(LruCache) + .OUTPUT(cache, TensorType({DT_RESOURCE})) + .ATTR(container, String, "") + .ATTR(shared_name, String, "LruCache") + .ATTR(cache_size, Int, 100000) + .ATTR(load_factor, Float, 1) + .OP_END_FACTORY_REG(LruCache) + +/** +*@brief CacheAdd, get id new come in cache and id get out of cache. +*@par Inputs: +*cache: resource data +*ids: Tensor stored id need to insert cache +*@par Outputs: +*swap_in_id: id come in cache. +*swap_in_idx: id in cache which come in cache +*swap_out_id: id get out of cache +*swap_out_idx: id in cache which get out of cache +*@par Restrictions: +*Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use. +*/ +REG_OP(CacheAdd) + .INPUT(cache, TensorType({DT_RESOURCE})) + .INPUT(ids, TensorType({DT_INT64, DT_INT32, DT_UINT64, DT_UINT32})) + .OUTPUT(swap_in_id, TensorType({DT_INT64, DT_INT32, DT_UINT64, DT_UINT32})) + .OUTPUT(swap_in_idx, TensorType({DT_INT64})) + .OUTPUT(swap_out_id, TensorType({DT_INT64, DT_INT32, DT_UINT64, DT_UINT32})) + .OUTPUT(swap_out_idx, TensorType({DT_INT64})) + .OP_END_FACTORY_REG(CacheAdd) + +/** +*@brief CacheRemoteToLocalIndex, get id in cache from id. +*@par Inputs: +*cache: resource data +*ids: Tensor stored id need to insert cache +*@par Outputs: +*local_idx: id in cache. +*@par Restrictions: +*Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use. +*/ +REG_OP(CacheRemoteIndexToLocal) + .INPUT(cache, TensorType({DT_RESOURCE})) + .INPUT(ids, TensorType({DT_INT64, DT_INT32, DT_UINT64, DT_UINT32})) + .OUTPUT(local_idx, TensorType({DT_INT64})) + .OP_END_FACTORY_REG(CacheRemoteIndexToLocal) + } // namespace ge #endif // OPS_BUILT_IN_OP_PROTO_INC_DATA_FLOW_OPS_H_ diff --git a/third_party/fwkacllib/inc/ops/elewise_calculation_ops.h b/third_party/fwkacllib/inc/ops/elewise_calculation_ops.h index 07fab272..c64bc138 100644 --- a/third_party/fwkacllib/inc/ops/elewise_calculation_ops.h +++ b/third_party/fwkacllib/inc/ops/elewise_calculation_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2019-2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -2802,6 +2802,80 @@ REG_OP(AdamApplyOneAssign) .OUTPUT(output2, TensorType({DT_FLOAT16,DT_FLOAT})) .OP_END_FACTORY_REG(AdamApplyOneAssign) +/** +*@brief A fusion operator for bert lamb. \n + +*@par Inputs: +*Ten inputs, including: +* @li input0: A Tensor. Must be one of the following types: float16, float32. +* @li input1: A Tensor. Must be one of the following types: float16, float32. +* @li input2: A Tensor. Must be one of the following types: float16, float32. +* @li input3: A Tensor. Must be one of the following types: float16, float32. +* @li input4: A Tensor. Must be one of the following types: float16, float32. +* @li mul0_x: A Tensor. Must be one of the following types: float16, float32. +* @li mul1_x: A Tensor. Must be one of the following types: float16, float32. +* @li mul2_x: A Tensor. Must be one of the following types: float16, float32. +* @li mul3_x: A Tensor. Must be one of the following types: float16, float32. +* @li steps: A Tensor. Must be one of the following types: float16, float32. +* @li do_use_weight: A Tensor. Must be one of the following types: float16, float32. +* @li weight_decay_rate: A Tensor. Must be one of the following types: float16, float32. +* @li add2_y: A Tensor. Must be one of the following types: float16, float32. \n + +*@par Outputs: +*Three outputs, including: +* @li output0: A Tensor. Must be one of the following types: float16, float32. \n + +*@par Restrictions: +*Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use. +*/ +REG_OP(LambApplyOptimizerAssign) + .INPUT(input0, TensorType({DT_FLOAT16,DT_FLOAT})) + .INPUT(input1, TensorType({DT_FLOAT16,DT_FLOAT})) + .INPUT(input2, TensorType({DT_FLOAT16,DT_FLOAT})) + .INPUT(input3, TensorType({DT_FLOAT16,DT_FLOAT})) + .INPUT(mul0_x, TensorType({DT_FLOAT16,DT_FLOAT})) + .INPUT(mul1_x, TensorType({DT_FLOAT16,DT_FLOAT})) + .INPUT(mul2_x, TensorType({DT_FLOAT16,DT_FLOAT})) + .INPUT(mul3_x, TensorType({DT_FLOAT16,DT_FLOAT})) + .INPUT(add2_y, TensorType({DT_FLOAT16,DT_FLOAT})) + .INPUT(steps, TensorType({DT_FLOAT16,DT_FLOAT})) + .INPUT(do_use_weight, TensorType({DT_FLOAT16,DT_FLOAT})) + .INPUT(weight_decay_rate, TensorType({DT_FLOAT16,DT_FLOAT})) + .OUTPUT(output0, TensorType({DT_FLOAT16,DT_FLOAT})) + .OP_END_FACTORY_REG(LambApplyOptimizerAssign) + +/** +*@brief A fusion operator for bert lamb. \n + +*@par Inputs: +*Ten inputs, including: +* @li input0: A Tensor. Must be one of the following types: float16, float32. +* @li input1: A Tensor. Must be one of the following types: float16, float32. +* @li input2: A Tensor. Must be one of the following types: float16, float32. +* @li input3: A Tensor. Must be one of the following types: float16, float32. +* @li input4: A Tensor. Must be one of the following types: float16, float32. +* @li mul0_x: A Tensor. Must be one of the following types: float16, float32. +* @li mul1_x: A Tensor. Must be one of the following types: float16, float32. +* @li mul2_x: A Tensor. Must be one of the following types: float16, float32. +* @li mul3_x: A Tensor. Must be one of the following types: float16, float32. +* @li steps: A Tensor. Must be one of the following types: float16, float32. +* @li do_use_weight: A Tensor. Must be one of the following types: float16, float32. +* @li weight_decay_rate: A Tensor. Must be one of the following types: float16, float32. +* @li add2_y: A Tensor. Must be one of the following types: float16, float32. \n + +*@par Outputs: +*No outputs +*@par Restrictions: +*Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use. +*/ +REG_OP(LambApplyWeightAssign) + .INPUT(input0, TensorType({DT_FLOAT16,DT_FLOAT})) + .INPUT(input1, TensorType({DT_FLOAT16,DT_FLOAT})) + .INPUT(input2, TensorType({DT_FLOAT16,DT_FLOAT})) + .INPUT(input3, TensorType({DT_FLOAT16,DT_FLOAT})) + .INPUT(input4, TensorType({DT_FLOAT16,DT_FLOAT})) + .OP_END_FACTORY_REG(LambApplyWeightAssign) + /** *@brief Confuse select, maximum, greater and sqrt. \n diff --git a/third_party/fwkacllib/inc/ops/functional_ops.h b/third_party/fwkacllib/inc/ops/functional_ops.h index b09ac058..598d3ad3 100644 --- a/third_party/fwkacllib/inc/ops/functional_ops.h +++ b/third_party/fwkacllib/inc/ops/functional_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/third_party/fwkacllib/inc/ops/get_data_ops.h b/third_party/fwkacllib/inc/ops/get_data_ops.h index e5518ef8..33dc4f14 100644 --- a/third_party/fwkacllib/inc/ops/get_data_ops.h +++ b/third_party/fwkacllib/inc/ops/get_data_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/third_party/fwkacllib/inc/ops/hcom_ops.h b/third_party/fwkacllib/inc/ops/hcom_ops.h index a8fc1106..b90b225e 100644 --- a/third_party/fwkacllib/inc/ops/hcom_ops.h +++ b/third_party/fwkacllib/inc/ops/hcom_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2019-2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -89,6 +89,10 @@ REG_OP(HcomAllReduce) * @par Attributes: * @li root_rank: A required integer identifying the root rank in the op input of this rank will be broadcast to other ranks. + * @li fusion: A required integer identifying if the op need to fusion,the + default value is none fusion + * @li fusion: A required integer identifying the fusion id if para fusion + is set. * @li group: A required string identifying the group name of ranks participating in the op. * @par Outputs: @@ -103,6 +107,8 @@ REG_OP(HcomBroadcast) .DYNAMIC_OUTPUT(y, TensorType({DT_FLOAT, DT_INT32, DT_INT8, DT_INT16, DT_FLOAT16, DT_INT64, DT_UINT64})) .REQUIRED_ATTR(root_rank, Int) .REQUIRED_ATTR(group, String) + .ATTR(fusion, Int, 0) + .ATTR(fusion_id, Int, -1) .ATTR(alpha, Float, 1.0) .ATTR(beta, Float, 0.0) .OP_END_FACTORY_REG(HcomBroadcast) @@ -213,6 +219,14 @@ REG_OP(HcomRemoteRead) .REQUIRED_ATTR(dtype, Type) .OP_END_FACTORY_REG(HcomRemoteRead) +REG_OP(HcomRemoteRefRead) + .INPUT(remote, TensorType({DT_UINT64})) + .INPUT(cache_var, TensorType({DT_UINT64})) + .INPUT(local_offset, TensorType({DT_UINT64})) + .OUTPUT(cache_var, TensorType({DT_UINT64})) + .REQUIRED_ATTR(dtype, Type) + .OP_END_FACTORY_REG(HcomRemoteRefRead) + /** * @brief Performs Remote Write of input tensors * @par Inputs: @@ -225,5 +239,11 @@ REG_OP(HcomRemoteWrite) .INPUT(local, TensorType::ALL()) .OP_END_FACTORY_REG(HcomRemoteWrite) +REG_OP(HcomRemoteScatterWrite) + .INPUT(remote, TensorType({DT_INT64, DT_UINT64})) + .INPUT(local, TensorType::ALL()) + .OPTIONAL_INPUT(local_offset, TensorType({DT_UINT64})) + .OP_END_FACTORY_REG(HcomRemoteScatterWrite) + } // namespace ge #endif // OPS_BUILT_IN_OP_PROTO_INC_HCOM_OPS_H_ diff --git a/third_party/fwkacllib/inc/ops/hvd_ops.h b/third_party/fwkacllib/inc/ops/hvd_ops.h index 00299ef7..a49ec5ed 100644 --- a/third_party/fwkacllib/inc/ops/hvd_ops.h +++ b/third_party/fwkacllib/inc/ops/hvd_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2019-2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/third_party/fwkacllib/inc/ops/image_ops.h b/third_party/fwkacllib/inc/ops/image_ops.h index a29c8553..ce3262f9 100644 --- a/third_party/fwkacllib/inc/ops/image_ops.h +++ b/third_party/fwkacllib/inc/ops/image_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/third_party/fwkacllib/inc/ops/internal_ops.h b/third_party/fwkacllib/inc/ops/internal_ops.h index bcc3f1c3..9dde14a5 100644 --- a/third_party/fwkacllib/inc/ops/internal_ops.h +++ b/third_party/fwkacllib/inc/ops/internal_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/third_party/fwkacllib/inc/ops/linalg_ops.h b/third_party/fwkacllib/inc/ops/linalg_ops.h index d8f45c5d..7a6fbc59 100644 --- a/third_party/fwkacllib/inc/ops/linalg_ops.h +++ b/third_party/fwkacllib/inc/ops/linalg_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/third_party/fwkacllib/inc/ops/logging_ops.h b/third_party/fwkacllib/inc/ops/logging_ops.h index 03be7757..bc8ae2b8 100644 --- a/third_party/fwkacllib/inc/ops/logging_ops.h +++ b/third_party/fwkacllib/inc/ops/logging_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/third_party/fwkacllib/inc/ops/lookup_ops.h b/third_party/fwkacllib/inc/ops/lookup_ops.h index 5d928e5a..b37ab048 100644 --- a/third_party/fwkacllib/inc/ops/lookup_ops.h +++ b/third_party/fwkacllib/inc/ops/lookup_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/third_party/fwkacllib/inc/ops/math_ops.h b/third_party/fwkacllib/inc/ops/math_ops.h index 330d85e7..149e0e37 100644 --- a/third_party/fwkacllib/inc/ops/math_ops.h +++ b/third_party/fwkacllib/inc/ops/math_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -495,51 +495,51 @@ REG_OP(NextAfter) .OP_END_FACTORY_REG(NextAfter) /** - * *@brief Compute element-wise finiteness, return a boolean tensor. - * - * *@par Inputs: - * *x:A Tensor. - * - * *@par Outputs: - * *y:A Tensor. Has the same shape as x. - * - * *@par Third-party framework compatibility. - * *Compatible with tensorflow IsFinite operator. - * */ + *@brief Compute element-wise finiteness, return a boolean tensor. + + *@par Inputs: + *x:A Tensor. + + *@par Outputs: + *y:A Tensor. Has the same shape as x. + + *@par Third-party framework compatibility. + *Compatible with tensorflow IsFinite operator. + */ REG_OP(IsFinite) .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE})) .OUTPUT(y, TensorType({DT_BOOL})) .OP_END_FACTORY_REG(IsFinite) /** - * *@brief Compute element-wise infiniteness, return a boolean tensor. - * - * *@par Inputs: - * *x:A Tensor. - * - * *@par Outputs: - * *y:A Tensor. Has the same shape as x. - * - * *@par Third-party framework compatibility. - * *Compatible with tensorflow IsInf operator. - * */ + *@brief Compute element-wise infiniteness, return a boolean tensor. + + *@par Inputs: + *x:A Tensor. + + *@par Outputs: + *y:A Tensor. Has the same shape as x. + + *@par Third-party framework compatibility. + *Compatible with tensorflow IsInf operator. + */ REG_OP(IsInf) .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE})) .OUTPUT(y, TensorType({DT_BOOL})) .OP_END_FACTORY_REG(IsInf) /** - * *@brief Computes the complex absolute value of a tensor. - * - * *@par Inputs: - * *x:A Tensor. - * - * *@par Outputs: - * *y:A tensor of type `float` or `double` that is the absolute value of each element in `x`. - * - * *@par Third-party framework compatibility. - * *Compatible with tensorflow ComplexAbs operator. - * */ + *@brief Computes the complex absolute value of a tensor. + + *@par Inputs: + *x:A Tensor. + + *@par Outputs: + *y:A tensor of type `float` or `double` that is the absolute value of each element in `x`. + + *@par Third-party framework compatibility. + *Compatible with tensorflow ComplexAbs operator. + */ REG_OP(ComplexAbs) .INPUT(x, TensorType({DT_COMPLEX64, DT_COMPLEX128})) .OUTPUT(y, TensorType({DT_FLOAT, DT_DOUBLE})) @@ -547,34 +547,34 @@ REG_OP(ComplexAbs) .OP_END_FACTORY_REG(ComplexAbs) /** - * *@brief Returns which elements of x are NaN. - * - * *@par Inputs: - * *x:A Tensor. - * - * *@par Outputs: - * *y:A Tensor. Has the same shape as x. - * - * *@par Third-party framework compatibility. - * *Compatible with tensorflow IsNan operator. - * */ + *@brief Returns which elements of x are NaN. + + *@par Inputs: + *x:A Tensor. + + *@par Outputs: + *y:A Tensor. Has the same shape as x. + + *@par Third-party framework compatibility. + *Compatible with tensorflow IsNan operator. + */ REG_OP(IsNan) .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE})) .OUTPUT(y, TensorType({DT_BOOL})) .OP_END_FACTORY_REG(IsNan) /** - * *@brief Returns the real part of a complex number. - * - * *@par Inputs: - * *input:A Tensor. - * - * *@par Outputs: - * *output:A Tensor. Has the same shape as input. - * - * *@par Third-party framework compatibility. - * *Compatible with tensorflow Real operator. - * */ + *@brief Returns the real part of a complex number. + + *@par Inputs: + *input:A Tensor. + + *@par Outputs: + *output:A Tensor. Has the same shape as input. + + *@par Third-party framework compatibility. + *Compatible with tensorflow Real operator. + */ REG_OP(Real) .INPUT(input, TensorType({DT_COMPLEX64, DT_COMPLEX128})) .OUTPUT(output, TensorType({DT_FLOAT, DT_DOUBLE})) @@ -582,17 +582,17 @@ REG_OP(Real) .OP_END_FACTORY_REG(Real) /** - * *@brief Returns the complex conjugate of a complex number. - * - * *@par Inputs: - * *input:A Tensor. - * - * *@par Outputs: - * *output:A Tensor. Has the same shape as input. - * - * *@par Third-party framework compatibility. - * *Compatible with tensorflow output operator. - * */ + *@brief Returns the complex conjugate of a complex number. + + *@par Inputs: + *input:A Tensor. + + *@par Outputs: + *output:A Tensor. Has the same shape as input. + + *@par Third-party framework compatibility. + *Compatible with tensorflow output operator. + */ REG_OP(Conj) .INPUT(input, TensorType({DT_COMPLEX64, DT_COMPLEX128})) .OUTPUT(output, TensorType({DT_COMPLEX64, DT_COMPLEX128})) @@ -692,6 +692,135 @@ REG_OP(IFMR) .REQUIRED_ATTR(search_step, Float) .REQUIRED_ATTR(with_offset, Bool) .OP_END_FACTORY_REG(IFMR) + +/** +*@brief weights adaptive range quantization. \n + +*@par Inputs: +*@li w:A Tensor of weights. \n +*@li w_min:A Tensor of weights reduce_min. \n +*@li w_max:A Tensor of weights reduce_max. \n + +*@par Attributes: +*num_bits: the bits num used for quantize. +*offset_flag: whether using offset. \n + +*@par Outputs: +*y: fake quantized weights. \n + +*@par Third-party framework compatibility +*Compatible with mindspore +*/ + +REG_OP(WtsARQ) + .INPUT(w, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(w_min, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(w_max, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT})) + .ATTR(num_bits, Int, 8) + .ATTR(offset_flag, Bool, false) + .OP_END_FACTORY_REG(WtsARQ) + +/** +*@brief The acts_ulq. \n + +*@par Inputs: +*@li x:A Tensor of feature map +*@li clamp _min:A Tensor of min clamp value of feature map. +*@li clamp _max:A Tensor of max clamp value of feature map. + +*@par Attributes: +*fixed_min: fix min to zero. +*num_bits: quant bits. \n + +*@par Outputs: +*y: output fake quant feature map. +*clamp_min_mask: where x > clamp_min +*clamp_min_mask: where x < clamp_max +*x_clamped_loss: clamp loss. \n + +*@par Third-party framework compatibility +*Compatible with mindspore +*/ + +REG_OP(ActsULQ) + .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(clamp_min, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(clamp_max, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(clamp_min_mask, TensorType({DT_BOOL})) + .OUTPUT(clamp_max_mask, TensorType({DT_BOOL})) + .OUTPUT(x_clamped_loss, TensorType({DT_FLOAT16, DT_FLOAT})) + .ATTR(fixed_min, Bool, false) + .ATTR(num_bits, Int, 8) + .OP_END_FACTORY_REG(ActsULQ) + +/** +*@brief The acts_ulq_input_grad. \n + +*@par Inputs: +*@li y_grad: A Tensor of gradient +*@li clamp_min_mask: A Tensor of boolean mask indicating whether an additional one is needed' +*@li clamp_max_mask: A Tensor of boolean mask indicating whether an additional one is needed' + +*@par Outputs: +*x_grapd: The gradient of inpust. \n + +*@par Third-party framework compatibility +*Compatible with mindspore +*/ + +REG_OP(ActsULQInputGrad) + .INPUT(y_grad, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(clamp_min_mask, TensorType({DT_BOOL})) + .INPUT(clamp_max_mask, TensorType({DT_BOOL})) + .OUTPUT(x_grad, TensorType({DT_FLOAT16, DT_FLOAT})) + .OP_END_FACTORY_REG(ActsULQInputGrad) + +/** +*@brief The act_ulq_clamp_max_grad. \n + +*@par Inputs: +*@li y_grad: A Tensor of gradient +*@li clamp_max_mask: A Tensor of boolean mask indicating whether an additional one is needed. +*@li x_clamped_loss: A Tensor of gradient. \n + +*@par Outputs: +*clamp_max_grad: The gradient of clamp max. \n + +*@par Third-party framework compatibility +*Compatible with mindspore +*/ + +REG_OP(ActULQClampMaxGrad) + .INPUT(y_grad, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(clamp_max_mask, TensorType({DT_BOOL})) + .INPUT(x_clamped_loss, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(clamp_max_grad, TensorType({DT_FLOAT16, DT_FLOAT})) + .OP_END_FACTORY_REG(ActULQClampMaxGrad) + +/** +*@brief The act_ulq_clamp_min_grad. \n + +*@par Inputs: +*@li y_grad: A Tensor of gradient +*@li clamp_min_mask: A Tensor of boolean mask indicating whether an additional one is needed. +*@li x_clamped_loss: A Tensor of gradient. \n + +*@par Outputs: +*clamp_min_grad: The gradient of clamp min. \n + +*@par Third-party framework compatibility +*Compatible with mindspore +*/ + +REG_OP(ActULQClampMinGrad) + .INPUT(y_grad, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(clamp_min_mask, TensorType({DT_BOOL})) + .INPUT(x_clamped_loss, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(clamp_min_grad, TensorType({DT_FLOAT16, DT_FLOAT})) + .OP_END_FACTORY_REG(ActULQClampMinGrad) + } // namespace ge #endif // OPS_BUILT_IN_OP_PROTO_INC_MATH_OPS_H_ diff --git a/third_party/fwkacllib/inc/ops/matrix_calculation_ops.h b/third_party/fwkacllib/inc/ops/matrix_calculation_ops.h index daf0939c..ed23d3f6 100644 --- a/third_party/fwkacllib/inc/ops/matrix_calculation_ops.h +++ b/third_party/fwkacllib/inc/ops/matrix_calculation_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2019-2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/third_party/fwkacllib/inc/ops/nn_batch_norm_ops.h b/third_party/fwkacllib/inc/ops/nn_batch_norm_ops.h index a35cee03..0c6a5dff 100644 --- a/third_party/fwkacllib/inc/ops/nn_batch_norm_ops.h +++ b/third_party/fwkacllib/inc/ops/nn_batch_norm_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/third_party/fwkacllib/inc/ops/nn_calculation_ops.h b/third_party/fwkacllib/inc/ops/nn_calculation_ops.h index bd361f5d..35296870 100644 --- a/third_party/fwkacllib/inc/ops/nn_calculation_ops.h +++ b/third_party/fwkacllib/inc/ops/nn_calculation_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -310,9 +310,6 @@ REG_OP(DepthwiseConv2DBackpropInputD) * @par Third-party framework compatibility * @li Compatible with the TensorFlow operator DepthwiseConv2D. * @li Compatible with the Caffe operator DepthwiseConv2D. -* -* @par Restrictions: -* Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use. */ REG_OP(DepthwiseConv2D) .INPUT(x, TensorType({DT_FLOAT16, DT_INT8})) @@ -585,103 +582,105 @@ REG_OP(Conv2DBackpropFilterD) /** *@brief Computes a 2D convolution given 4D "x" and "filter" tensors. *@par Inputs: -*@li x: A 4D tensor of input images. With "NHWC" format, the shape is -* [batch, in_height, in_width, in_channels]. -*@li filter: A 4D tensor of filters. Has the same type as "x". With "HWCN" -* format, the shape is [filter_height, filter_width, in_channels, -* out_channels]. - -*@li bias: An optional 1D tensor. Shape is [out_channels]. -*@li offset_w: An optional 1D tensor for quantized convolution. Shape is -* [out_channels]. Not supported. +*@li x: A 4D tensor of input image. With the format "NHWC", the data is stored +* in the order of: [batch, in_height, in_width, in_channels]. +*@li filter: A 4D tensor of learnable filters. Must have the same type as "x". +* With the format "HWCN" , the data is stored in the order of: [filter_height, +* filter_width, in_channels / groups, out_channels]. +*@li bias: An optional 1D tensor of additive biases to the filter outputs. +* The data is stored in the order of: [out_channels]. +*@li offset_w: Reserved. *\n *\n -* Note that there is a strict data type mapping between the input and output -* tensors: +* The following are the supported data types and data formats: *@verbatim - |Tensor | x | filter | bias | offset_w | y - -----------|---------|---------|---------|----------|-------- - |Data Type | float16 | float16 | float16 | _ | float16 - | |---------|---------|---------|----------|-------- - | | float32 | float32 | float32 | _ | float32 - | |---------|---------|---------|----------|-------- - | | int8 | int8 | int32 | int8 | int32 - -----------|---------|---------|---------|----------|-------- - |Format | NCHW | NCHW | ND | ND | NCHW - | | NHWC | HWCN | | | NHWC + | Tensor | x | filter | bias | y + ------------|---------|---------|---------|-------- + | Data Type | float16 | float16 | float16 | float16 + | |---------|---------|---------|-------- + | | float32 | float32 | float32 | float32 + | |---------|---------|---------|-------- + | | int8 | int8 | int32 | int32 + ------------|---------|---------|---------|-------- + | Format | NCHW | NCHW | ND | NCHW + | | NHWC | HWCN | | NHWC @endverbatim -* Type float32 is allowed only in mixed precision (float32->float16) scenarios. -* Mixed precision is enabled by default. -* \n +* For float32 type, the actual calculation on the chip is based on +* float16. For int8, a dequant or requant operator must be followed. +*\n * *@par Attributes: -*@li strides: Required. A list of 4 integers. Specifying the strides of the -* convolution along the height and width. The dimension order is determined -* by the data format of "x". By default the N and C dimensions are set to 1. -*@li pads: Required. A list of 4 integers. Specifying the top, bottom, left -* and right padding. -* @li dilations: Optional. A list of 4 integers. Specifying the dilation rate -* to use for dilated convolution. Has the same dimension order and value as -* "strides". Dilation > 1 is not supported for quantized convolution. Defaults -* to [1, 1, 1, 1]. -* @li groups: Optional. An integer of type int32, for the number of blocked -* connections from input channels to output channels. Input channels and output -* channels must both be divisible by "groups". "x" in_channels must be equal to -* "filter" in_channels * groups. Defaults to 1. -* @li offset_x: Optional. An integer of type int32, for quantized convolution. -* Defaults to 0. -* @li data_format: Reserved and optional. A string from: "NHWC" and "NCHW". -* Specifying the data format of the input and output images. Defaults to -* "NHWC". +*@li strides: Required. A list of 4 integers. The stride of the sliding window +* for each dimension of input. The dimension order is determined by the data +* format of "x". The N and C dimensions must be set to 1. +*@li pads: Required. A list of 4 integers. The number of pixels to add to each +* (top, bottom, left, right) side of the input. +*@li dilations: Optional. A list of 4 integers. The dilation factor for each +* dimension of input. The dimension order is determined by the data format of +* "x". The N and C dimensions must be set to 1. The H and W dimensions must be +* set to 1 for int8 type. Defaults to [1, 1, 1, 1]. +*@li groups: Optional. An integer of type int32. The number of blocked +* connections from input channels to output channels. In_channels and +* out_channels must both be divisible by "groups". Defaults to 1. +*@li offset_x: Optional. An integer of type int32. The negative offset added +* to the input image for int8 type. Ensure that the output is within the +* effective range. Defaults to 0. +*@li data_format: Reserved. *\n *\n * The following value range restrictions must be met: *@verbatim - |Name | Field | Scope - ------------------|----------|---------- - |Input Image Size | H | [1, 100000] - | | W | [1, 4096] - ------------------|----------|---------- - |Filter Size | H | [1, 255] - | | W | [1, 255] - ------------------|----------|---------- - |Stride | H | [1, 63] - | | W | [1, 63] - ------------------|----------|---------- - |Padding | top | [0, 255] - | | bottom | [0, 255] - | | left | [0, 255] - | | right | [0, 255] - ------------------|----------|---------- - |Dilation | H | [1, 255] - | | W | [1, 255] + | Name | Field | Scope + -------------------|----------|-------------- + | Input Image Size | H | [1, 100000] + | | W | [1, 4096] + -------------------|----------|-------------- + | Filter Size | H | [1, 255] + | | W | [1, 255] + -------------------|----------|-------------- + | Stride | H | [1, 63] + | | W | [1, 63] + -------------------|----------|-------------- + | Padding | Top | [0, 255] + | | Bottom | [0, 255] + | | Left | [0, 255] + | | Right | [0, 255] + -------------------|----------|-------------- + | Dilation | H | [1, 255] + | | W | [1, 255] + -------------------|----------|-------------- + | Offset_x | | [-128, 127] + @endverbatim +*\n * *@par Outputs: -*@li y: A 4D Tensor of output images. Has the same type and format as "x". With -* "NHWC" format, the shape is [batch, out_height, out_width, out_channels]. +*@li y: A 4D Tensor of output feature map. Has the same type as "x". With the +* format "NHWC", the data is stored in the order of: [batch, out_height, +* out_width, out_channels]. *\n -* out_height = (in_height + top_pad + bottom_pad - -* dilation_h * (filter_height - 1) - 1) +* out_height = (in_height + pad_top + pad_bottom - +* (dilation_h * (filter_height - 1) + 1)) * / stride_h + 1 *\n -* out_width = (in_width + left_pad + right_pad - -* dilation_w * (filter_width - 1) - 1) -* / stride_w + 1 +* out_width = (in_width + pad_left + pad_right - +* (dilation_w * (filter_width - 1) + 1)) +* / stride_w + 1 * *@attention Constraints: *@li The following restrictions on the output must be met: *@verbatim - | Output | Restrictions - -------------------|--------------------------- - | W dimension == 1 | H*W(input) == H*W(filter) - | H dimension == 1 | - -------------------|--------------------------- - | W dimension == 1 | Not supported - | H dimension != 1 | + | Output | Restrictions + ----------|-------------------------------- + | H == 1 | H * W(input) == H * W(filter) + | W == 1 | + ----------|-------------------------------- + | H != 1 | W(input) == W(filter) + | W == 1 | Only for Ascend310 Hi3796V300CS @endverbatim * "H * W (input)" indicates the image size after padding and "H * W (filter)" -* indicates the filter size after dilation. +* indicates the filter size after dilation."W(input)" and W(filter) indicate +* the same rule on the W dimension. *\n * *@par Quantization supported or not @@ -770,113 +769,122 @@ REG_OP(Conv2DCompress) .OP_END_FACTORY_REG(Conv2DCompress) /** -*@brief Computes a 2D convolution given 4D "x", "filter" and "offsets" -* tensors. +*@brief Computes a 2D deformable convolution given 4D "x", "filter" and +* "offsets" tensors. *@par Inputs: -* @li x: A 4D tensor of input images. With shape of -* [batch, in_height, in_width, in_channels] when format is "NHWC". -* @li filter: A 4D tensor of filters. Must have the same type as "x". With -* shape of [filter_height, filter_width, in_channels, out_channels] when format -* is "HWCN". -* @li offsets: A 4D tensor of offsets. With shape of -* [batch, deformable_groups * filter_height * filter_width * 3, in_height, -* in_width] when format is "NCHW". -* @li bias: An optional 1D tensor. Shape is [out_channels]. -* -* The input and output tensor attributes are listed as follows: -* @verbatim - |Tensor | x | filter | offsets | bias | y - -----------|---------|---------|---------|----------|-------- - |Data Type | float16 | float16 | float16 | float16 | float16 - -----------|---------|---------|---------|----------|-------- - |Format | NCHW | NCHW | NCHW | ND | NCHW - | | NHWC | HWCN | | | NHWC +*@li x: A 4D tensor of input image. With the format "NHWC", the data is stored +* in the order of: [batch, in_height, in_width, in_channels]. +*@li filter: A 4D tensor of learnable filters. Must have the same type as "x". +* With the format "HWCN" , the data is stored in the order of: [filter_height, +* filter_width, in_channels / groups, out_channels]. +*@li offsets: A 4D tensor of x-y coordinates offset and mask. With the format +* "NHWC", the data is stored in the order of: [batch, in_height, in_width, +* deformable_groups * filter_height * filter_width * 3]. +*@li bias: An optional 1D tensor of additive biases to the filter outputs. +* The data is stored in the order of: [out_channels]. +*\n +*\n +* The following are the supported data types and data formats: +*@verbatim + | Tensor | x | filter | offsets | bias | y + ------------|---------|---------|---------|----------|-------- + | Data Type | float16 | float16 | float16 | float16 | float16 + | |---------|---------|---------|----------|-------- + | | float32 | float32 | float32 | float32 | float32 + ------------|---------|---------|---------|----------|-------- + | Format | NCHW | NCHW | NCHW | ND | NCHW + | | NHWC | HWCN | NHWC | | NHWC @endverbatim -* It should be noted that the data types must correspond to each other, but -* the format does not need to. - +* For float32 type, the actual convolution calculation part on the chip is +* based on float16. +*\n +* *@par Attributes: -* @li strides: Required. A list of 4 integers. Specifying the strides of the -* convolution along the height and width. The dimension order is determined -* by the data format of "x". By default the N and C dimensions are set to 1. -* @li pads: Required. A list of 4 integers. Specifying the top, bottom, left -* and right padding. -* @li dilations: Optional. A list of 4 integers. Specifying the dilation rate -* to use for dilated convolution. Has the same dimension order and value as -* "strides". -* @li groups: Optional. Number of blocked connections from input channels to -* output channels. Input channels and output channels must both be divisible -* by "groups".Type is int32. -* @li data_format: Optional. An optional string from: "NHWC", "NCHW". Specifying the -* data format of the input and output images. Type is string. Defaults to -* "NHWC". Reserved. -* @li deformable_groups: Optional. Cut the c chanel of input X into deformable_groups, -* each share a different offsets. Input channels must be divisible by -* "deformable_groups". Type is int32. - -*@par Outputs: -* @li y: A 4D Tensor of output images. Must have the same type and format as -* "x". With shape of [batch, out_channels, out_height, out_width] when format -* is "NHWC". -* @li output_height = (in_height + top_pad + botton_pad - -* dilation_h * (filter_height - 1) -1) / stride_h + 1 -* @li output_width = (in_width + left_pad + right_pad - -* dilation_w * (filter_width - 1) -1) / stride_w + 1 - -*@attention -* @li The parameter scope is listed as follows: -* @verbatim - |Name | Field | Scope - ------------------|--------------|---------------------------------------- - |Input Image Size | H dimension | 1 <= in_height * filter_height <= 4096 - | | W dimension | 1 <= in_width * filter_width <=4096 - ------------------|--------------|---------------------------------------- - |Filter Size | H dimension | [1, 255] - | | W dimension | [1, 255] - ------------------|--------------|---------------------------------------- - |offsets Size | C dimension | offsets_c = deformable_groups * - | | | filter_width * filter_height * 3 - | | H dimension | the same as output H dimension - | | W dimension | the same as output W dimension - ------------------|--------------|---------------------------------------- - |Stride Size | H dimension | [1, 63] - | | W dimension | [1, 63] - ------------------|--------------|---------------------------------------- - |Padding Size | top side | [0, 255] - | | bottom side | [0, 255] - | | left side | [0, 255] - | | right side | [0, 255] - ------------------|--------------|---------------------------------------- - |Dilation Size | H dimension | [1, 255] - | | W dimension | [1, 255] +*@li strides: Required. A list of 4 integers. The stride of the sliding window +* for each dimension of input. The dimension order is interpreted according to +* the data format of "x". The N and C dimensions must be set to 1. +*@li pads: Required. A list of 4 integers. The number of pixels to add to each +* (top, bottom, left, right) side of the input. +*@li dilations: Optional. A list of 4 integers. The dilation factor for each +* dimension of input. The dimension order is interpreted according to the data +* format of "x". The N and C dimensions must be set to 1. Defaults to +* [1, 1, 1, 1]. +*@li groups: Optional. An integer of type int32. The number of blocked +* connections from input channels to output channels. In_channels and +* out_channels must both be divisible by "groups". Defaults to 1. +*@li data_format: Reserved. +*@li deformable_groups: Optional. An integer of type int32. The number of +* deformable group partitions. In_channels must be divisible by +* "deformable_groups". Defaults to 1. +*\n +*\n +* The following value range restrictions must be met: +*@verbatim + | Name | Field | Scope + --------------------|--------|---------------------------- + | Input Image Size | H | [1, 100000] + | | W | [1, 4096] + --------------------|--------|---------------------------- + | Filter Size | H | [1, 255] + | | W | [1, 255] + --------------------|--------|---------------------------- + | Stride | H | [1, 63] + | | W | [1, 63] + --------------------|--------|---------------------------- + | Padding | Top | [0, 255] + | | Bottom | [0, 255] + | | Left | [0, 255] + | | Right | [0, 255] + ------------ -------|--------|---------------------------- + | Dilation | H | [1, 255] + | | W | [1, 255] @endverbatim - -* @li There are restrictions for certain scenarios: -* @verbatim - | Output | Restrictions - -------------------|--------------------------- - | W dimension == 1 | HxW(input) == HxW(filter) - | H dimension == 1 | - -------------------|--------------------------- - | W dimension == 1 | Not supported - | H dimension != 1 | +* "W(input)" indicate the image width after padding and W(filter) indicates the +* filter width after dilation. +*\n +* +*@par Outputs: +*@li y: A 4D Tensor of output feature map. Has the same type as "x". With the +* format "NHWC", the data is stored in the order of: [batch, out_height, +* out_width, out_channels]. +*\n +* out_height = (in_height + pad_top + pad_bottom - +* (dilation_h * (filter_height - 1) + 1)) +* / stride_h + 1 +*\n +* out_width = (in_width + pad_left + pad_right - +* (dilation_w * (filter_width - 1) + 1)) +* / stride_w + 1 +* +*@attention Constraints: +*@li The following restrictions on the output must be met: +*@verbatim + | Output | Restrictions + ----------|-------------------------------- + | H == 1 | H * W(input) == H * W(filter) + | W == 1 | + ----------|-------------------------------- + | H != 1 | W(input) == W(filter) + | W == 1 | Only for Ascend310 Hi3796V300CS @endverbatim -* As shown above, "HxW(input)" indicates the image size after padding and -* "HxW(filter)" indicates the filter size after dilation. - +* "H * W(input)" indicates the image size after padding and "H * W(filter)" +* indicates the filter size after dilation. "W(input)" and W(filter) indicate +* the same rule on the W dimension. +* *@par Quantization supported or not -* Yes - +*@li No +* *@par Third-party framework compatibility -*@li Compatible with the TensorFlow operator "conv2d". -*@li Compatible with the Caffe operator 2D "Convolution". +*@li Compatible with the Mxnet operator "DeformableConvolution". +*@li Compatible with the Paddlepaddle operator "deformable_conv". +*@li Compatible with the Mmcv operator "deform_conv". */ REG_OP(DeformableConv2D) - .INPUT(x, TensorType({DT_FLOAT16})) - .INPUT(filter, TensorType({DT_FLOAT16})) - .INPUT(offsets, TensorType({DT_FLOAT16})) - .OPTIONAL_INPUT(bias, TensorType({DT_FLOAT16})) - .OUTPUT(y, TensorType({DT_FLOAT16})) + .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(filter, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(offsets, TensorType({DT_FLOAT16, DT_FLOAT})) + .OPTIONAL_INPUT(bias, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT})) .REQUIRED_ATTR(strides, ListInt) .REQUIRED_ATTR(pads, ListInt) .ATTR(dilations, ListInt, {1, 1, 1, 1}) @@ -1395,14 +1403,13 @@ REG_OP(Conv2DTransposeD) .OP_END_FACTORY_REG(Conv2DTransposeD) /** -*@brief In the deformable convolution operator, the original input FeatureMap is expanded to a ksize_y * H * ksize_x *W -*FeatureMap by bilinear interpolation according to the offset offset. +*@brief Computes the deformed convolution output with the expected input *@par Inputs: * Four inputs: - * @li x: A Tensor of type float16 + * @li x: A Tensor of type float16,float32 * @li offsets: A Tensor of type float16,float32.Deformation offset parameter. *@par Required Attributes: - * @li strides: A tuple/list of 2 integers.The stride of the sliding window for + * @li strides: A tuple/list of 4 integers.The stride of the sliding window for * height and width for H/W dimension. * @li pads: A tuple/list of 4 integers.Padding added to each dimension * of the input. @@ -1410,20 +1417,20 @@ REG_OP(Conv2DTransposeD) *@par Attributes: * Three attributes: * @li dilations: A tuple/list of 4 integers, The dilation factor for each dimension - * of input. Defaults to [0, 0, 0, 0] + * of input. Defaults to [1, 1, 1, 1] * @li data_format: An optional string from: "NCHW", "NHWC". Defaults to "NCHW". Specify the data format of the input x. * @li deformable_groups: Specify the c-axis grouping number of input x. *@par Outputs: - * y: A Tensor. A Tensor of type float16. + * y: A Tensor. A Tensor of type float16, float32. */ REG_OP(DeformableOffsets) - .INPUT(x, TensorType({DT_FLOAT16})) - .INPUT(offsets, TensorType({DT_FLOAT16, DT_FLOAT32})) - .OUTPUT(y, TensorType({DT_FLOAT16})) + .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(offsets, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT})) .REQUIRED_ATTR(strides, ListInt) .REQUIRED_ATTR(pads, ListInt) .REQUIRED_ATTR(ksize, ListInt) - .ATTR(dilations, ListInt, {0, 0, 0, 0}) + .ATTR(dilations, ListInt, {1, 1, 1, 1}) .ATTR(data_format, String, "NCHW") .ATTR(deformable_groups, Int, 1) .OP_END_FACTORY_REG(DeformableOffsets) diff --git a/third_party/fwkacllib/inc/ops/nn_detect_ops.h b/third_party/fwkacllib/inc/ops/nn_detect_ops.h index 476704e5..a013fb33 100644 --- a/third_party/fwkacllib/inc/ops/nn_detect_ops.h +++ b/third_party/fwkacllib/inc/ops/nn_detect_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/third_party/fwkacllib/inc/ops/nn_norm_ops.h b/third_party/fwkacllib/inc/ops/nn_norm_ops.h index 0fdf27e3..35c4c7d4 100644 --- a/third_party/fwkacllib/inc/ops/nn_norm_ops.h +++ b/third_party/fwkacllib/inc/ops/nn_norm_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/third_party/fwkacllib/inc/ops/nn_ops.h b/third_party/fwkacllib/inc/ops/nn_ops.h index 16552eee..9edc469a 100644 --- a/third_party/fwkacllib/inc/ops/nn_ops.h +++ b/third_party/fwkacllib/inc/ops/nn_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/third_party/fwkacllib/inc/ops/nn_pooling_ops.h b/third_party/fwkacllib/inc/ops/nn_pooling_ops.h index 473e94b7..ab35ba47 100644 --- a/third_party/fwkacllib/inc/ops/nn_pooling_ops.h +++ b/third_party/fwkacllib/inc/ops/nn_pooling_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -70,6 +70,7 @@ REG_OP(Pooling) .ATTR(pad, ListInt, {0,0,0,0}) // pad size .ATTR(dilation, ListInt, {1,1,1,1}) .ATTR(ceil_mode, Int, 0) + .ATTR(data_format, String, "NCHW") .OP_END_FACTORY_REG(Pooling) /** @@ -79,7 +80,7 @@ REG_OP(Pooling) *x: A tensor of type float16, float32, double . \n *@par Attributes: -*@li ksize: A required list of 4 ints, specifying the size (N, C, H, and W) of the sliding window, where N = C = 1, and H and W are positive integers within the range [1, 32768]. +*@li ksize: A required list of 4 ints, specifying the size (N, C, H, and W) of the sliding window, where N = C = 1, and H and W are positive integers within the range [1, 255]. *@li strides: A required list of 4 ints, specifying the stride of the sliding window. The strides of the N and C dimensions are 1. The strides of the H and W dimensions are positive integers within the range [1, 63]. *@li padding: A required string, specifying the padding algorithm, either "VALID" or "SAME". With "SAME" means that the outputs will have the same spatial dimensions as its inputs. With "VALID" means no padding. *@li data_format: An optional string, specifying the data format of "ksize" and "strides", either "NCHW", "NC1HWC0", or "NHWC" (default) . \n @@ -91,7 +92,7 @@ REG_OP(Pooling) *@li This operator applies only to a TensorFlow network. *@li Only single input and single output are supported. *@li Global pooling is supported. -*@li "ksize_H" and "ksize_W" are positive integers within the range [1, 32768]. ksize_H * ksize_W < 256 +*@li "ksize_H" and "ksize_W" are positive integers within the range [1, 255]. ksize_H * ksize_W < 256 *@li Due to instruction restrictions, the values of "strides_h" and "strides_w" are positive integers within the range [1, 63]. *@par Third-party framework compatibility * Compatible with the TensorFlow operator AvgPool. @@ -106,13 +107,13 @@ REG_OP(AvgPool) .OP_END_FACTORY_REG(AvgPool) /** -*@brief Performs average pooling on the input . \n +*@brief Performs average pooling on the input. *@par Inputs: *x: A tensor of type float16, float32, double. *@par Attributes: -*@li ksize: A required list of 4 ints, specifying the size (N, C, H, and W) of the sliding window, where N = C = 1, and H and W are positive integers within the range [1, 32768]. +*@li ksize: A required list of 4 ints, specifying the size (N, C, H, and W) of the sliding window, where N = C = 1, and H and W are positive integers within the range [1, 255]. *@li strides: A required list of 4 ints, specifying the stride of the sliding window. The strides of the N and C dimensions are 1. The strides of the H and W dimensions are positive integers within the range [1, 63]. *@li padding_mode: A required string, specifying the padding algorithm, either "VALID", "SAME" and "CALCULATED". With "SAME" means that the outputs will have the same spatial dimensions as its inputs. With "VALID" means no padding. *@li pads: Pad value when padding_mode is "CALCULATED". @@ -127,7 +128,7 @@ REG_OP(AvgPool) *@attention Constraints: *@li Only single input and single output are supported. *@li Global pooling is supported. -*@li "ksize_H" and "ksize_W" are positive integers within the range [1, 32768]. ksize_H * ksize_W < 256 +*@li "ksize_H" and "ksize_W" are positive integers within the range [1, 255]. ksize_H * ksize_W < 256 *@li Due to instruction restrictions, the values of "strides_h" and "strides_w" are positive integers within the range [1, 63]. *@par Third-party framework compatibility * Compatible with the TensorFlow operator AvgPoolV2. @@ -168,9 +169,6 @@ REG_OP(AvgPoolV2) *@par Third-party framework compatibility * Compatible with the TensorFlow operator AvgPool3D. -* -* @par Restrictions: -*Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use. */ REG_OP(AvgPool3D) .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT32, DT_DOUBLE})) @@ -1196,8 +1194,8 @@ REG_OP(MaxPoolGradWithArgmaxV2) * @par Inputs: * One input: -* x: An NC1HWC0 Tensor. Supported type:float16, float32, double, int8, int16, -* int32, int64, uint8, uint16, qint8 +* x: An NC1HWC0 Tensor. Supported type:float16, float32, double, int32, int64, +* uint8, int16, int8, uint16, qint8 * @par Attributes: * @li ksize: A required list of int8, int16, int32, or int64 values, @@ -1208,14 +1206,14 @@ REG_OP(MaxPoolGradWithArgmaxV2) * the input tensor. No default value. * @li padding_mode: A required string. Defaults to "CALCULATED". * @li pads:A required list of int8, int16, int32, or int64 values, -* a data to caculate when padding_mode is "SAME" and "CALCULATED". +* a data to caculate when padding_mode is "CALCULATED". * @li data_format: An optional string. Defaults to "NHWC" . * @li global_pooling bool, Whether to use the global pooling. * If global_pooling = true, kernel size and paddings will be ignored. * Default False -* @li ceil_mode:global_pooling (bool) – (bool) Whether to use the global pooling. -* If global_pooling = true, kernel size and paddings will be ignored. -* Default False \n +* @li ceil_mode: Whether to use the ceil function to calculate output +* height and width. False is the default. If it is set to False, +* the floor function will be used. Default False \n * @par Outputs: * y: A Tensor. Has the same type and format as input "x" . \n @@ -1232,8 +1230,8 @@ REG_OP(MaxPoolGradWithArgmaxV2) * Compatible with the TensorFlow operator MaxPool. */ REG_OP(MaxPoolV3) - .INPUT(x,TensorType({DT_FLOAT16, DT_FLOAT32})) - .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT32})) + .INPUT(x,TensorType({DT_FLOAT16, DT_FLOAT32, DT_DOUBLE, DT_INT32, DT_INT64, DT_UINT8, DT_INT16, DT_INT8, DT_UINT16, DT_QINT8})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT32, DT_DOUBLE, DT_INT32, DT_INT64, DT_UINT8, DT_INT16, DT_INT8, DT_UINT16, DT_QINT8})) .REQUIRED_ATTR(ksize, ListInt) .REQUIRED_ATTR(strides, ListInt) .ATTR(padding_mode, String, "CALCULATED") @@ -1260,14 +1258,14 @@ REG_OP(MaxPoolV3) * the input tensor. No default value. * @li padding_mode: A required string. Defaults to "CALCULATED". * @li pads:A required list of int8, int16, int32, or int64 values, -* a data to caculate when padding_mode is "SAME" and "CALCULATED". +* a data to caculate when padding_mode is "CALCULATED". * @li data_format: An optional string. Defaults to "NHWC" . * @li global_pooling bool, Whether to use the global pooling. * If global_pooling = true, kernel size and paddings will be ignored. * Default False -* @li ceil_mode:global_pooling (bool) – (bool) Whether to use the global pooling. -* If global_pooling = true, kernel size and paddings will be ignored. -* Default False \n +* @li ceil_mode: Whether to use the ceil function to calculate output +* height and width. False is the default. If it is set to False, +* the floor function will be used. Default False \n * @par Outputs: * y: A mutable tensor. Has the same shape and type as "x1" . \n @@ -1294,4 +1292,4 @@ REG_OP(MaxPoolV3Grad) .ATTR(ceil_mode, Bool, false) .OP_END_FACTORY_REG(MaxPoolV3Grad) } // namespace ge -#endif // OPS_BUILT_IN_OP_PROTO_INC_NN_POOLING_OPS_H \ No newline at end of file +#endif // OPS_BUILT_IN_OP_PROTO_INC_NN_POOLING_OPS_H diff --git a/third_party/fwkacllib/inc/ops/nn_training_ops.h b/third_party/fwkacllib/inc/ops/nn_training_ops.h index 92074872..047fd6da 100644 --- a/third_party/fwkacllib/inc/ops/nn_training_ops.h +++ b/third_party/fwkacllib/inc/ops/nn_training_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2019-2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/third_party/fwkacllib/inc/ops/no_op.h b/third_party/fwkacllib/inc/ops/no_op.h index b27b1fa0..7834591c 100644 --- a/third_party/fwkacllib/inc/ops/no_op.h +++ b/third_party/fwkacllib/inc/ops/no_op.h @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/third_party/fwkacllib/inc/ops/nonlinear_fuc_ops.h b/third_party/fwkacllib/inc/ops/nonlinear_fuc_ops.h index ce8383db..e0e5dfc6 100644 --- a/third_party/fwkacllib/inc/ops/nonlinear_fuc_ops.h +++ b/third_party/fwkacllib/inc/ops/nonlinear_fuc_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2019-2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/third_party/fwkacllib/inc/ops/npu_loss_scale_ops.h b/third_party/fwkacllib/inc/ops/npu_loss_scale_ops.h index f36d2935..8d7ef9f9 100644 --- a/third_party/fwkacllib/inc/ops/npu_loss_scale_ops.h +++ b/third_party/fwkacllib/inc/ops/npu_loss_scale_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/third_party/fwkacllib/inc/ops/outfeed_ops.h b/third_party/fwkacllib/inc/ops/outfeed_ops.h index 53b9d701..e0b783bc 100644 --- a/third_party/fwkacllib/inc/ops/outfeed_ops.h +++ b/third_party/fwkacllib/inc/ops/outfeed_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/third_party/fwkacllib/inc/ops/pad_ops.h b/third_party/fwkacllib/inc/ops/pad_ops.h index 92dca17c..f746b3b3 100644 --- a/third_party/fwkacllib/inc/ops/pad_ops.h +++ b/third_party/fwkacllib/inc/ops/pad_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2019-2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -185,6 +185,60 @@ REG_OP(PadD) .REQUIRED_ATTR(paddings, ListListInt) .OP_END_FACTORY_REG(PadD) +/** +*@brief Pads a tensor . \n + +*@par Inputs: +*Three inputs, including: +* @li x: A Tensor. Must be one of the following types: float16, float32, double, int32, +* uint8, int16, int8, complex64, int64, qint8, quint8, qint32, qint16, quint16, uint16, +* complex128, uint32, uint64. +* @li constant_values: A Tensor. Must have the same type as input. +* @li paddings: A Tensor of type int32 or int64 . \n + +*@par Outputs: +*y: A Tensor of the same type as "x" . \n + +*@par Third-party framework compatibility: +* Compatible with TensorFlow operator Pad. +*/ +REG_OP(PadV2) + .INPUT(x, TensorType::BasicType()) + .INPUT(paddings, TensorType::IndexNumberType()) + .INPUT(constant_values, TensorType::BasicType()) + .OUTPUT(y, TensorType::BasicType()) + .OP_END_FACTORY_REG(PadV2) + +/** +*@brief Pads a tensor . \n + +*@par Inputs: +*x: A Tensor. Must be one of the following types: float16, float32, int8, uint8, int32 . \n +*constant_values: A Tensor. Must have the same type as input. + +*@par Attributes: +*paddings: An optional "vector>". Defaults to "{}". +* For each dimension D of input, paddings[D, 0] indicates how many +* values to add before the contents of tensor in that dimension, +* and paddings[D, 1] indicates how many values to add after the +* contents of tensor in that dimension . \n + +*@par Outputs: +*y: A Tensor of the same type as "x" . \n + +*@par Third-party framework compatibility: +* Compatible with TensorFlow operator Pad. +* +* @par Restrictions: +* Warning: THIS FUNCTION IS DEPRECATED. Please use Pad instead. +*/ +REG_OP(PadV2D) + .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32})) + .INPUT(constant_values, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32})) + .REQUIRED_ATTR(paddings, ListListInt) + .OP_END_FACTORY_REG(PadV2D) + /** *@brief Pads a tensor. @@ -349,6 +403,5 @@ REG_OP(EmbeddingRankId) .ATTR(mode, String, "mod") .OP_END_FACTORY_REG(EmbeddingRankId) - } // namespace ge #endif // OPS_BUILT_IN_OP_PROTO_INC_PAD_OPS_H_ diff --git a/third_party/fwkacllib/inc/ops/parsing_ops.h b/third_party/fwkacllib/inc/ops/parsing_ops.h index 9a5cf504..5c7adfd8 100644 --- a/third_party/fwkacllib/inc/ops/parsing_ops.h +++ b/third_party/fwkacllib/inc/ops/parsing_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/third_party/fwkacllib/inc/ops/quantize_ops.h b/third_party/fwkacllib/inc/ops/quantize_ops.h index 806e28df..b53cfeb6 100644 --- a/third_party/fwkacllib/inc/ops/quantize_ops.h +++ b/third_party/fwkacllib/inc/ops/quantize_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/third_party/fwkacllib/inc/ops/ragged_array_ops.h b/third_party/fwkacllib/inc/ops/ragged_array_ops.h index 20484623..9b31aa8e 100644 --- a/third_party/fwkacllib/inc/ops/ragged_array_ops.h +++ b/third_party/fwkacllib/inc/ops/ragged_array_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/third_party/fwkacllib/inc/ops/ragged_conversion_ops.h b/third_party/fwkacllib/inc/ops/ragged_conversion_ops.h index 020e3da4..13488a25 100644 --- a/third_party/fwkacllib/inc/ops/ragged_conversion_ops.h +++ b/third_party/fwkacllib/inc/ops/ragged_conversion_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/third_party/fwkacllib/inc/ops/ragged_math_ops.h b/third_party/fwkacllib/inc/ops/ragged_math_ops.h index 258b0ca1..8af4f867 100644 --- a/third_party/fwkacllib/inc/ops/ragged_math_ops.h +++ b/third_party/fwkacllib/inc/ops/ragged_math_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/third_party/fwkacllib/inc/ops/random_ops.h b/third_party/fwkacllib/inc/ops/random_ops.h index 847b0768..b46da435 100644 --- a/third_party/fwkacllib/inc/ops/random_ops.h +++ b/third_party/fwkacllib/inc/ops/random_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/third_party/fwkacllib/inc/ops/reduce_ops.h b/third_party/fwkacllib/inc/ops/reduce_ops.h index cd448c8d..6f44093e 100644 --- a/third_party/fwkacllib/inc/ops/reduce_ops.h +++ b/third_party/fwkacllib/inc/ops/reduce_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2019-2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/third_party/fwkacllib/inc/ops/resource_variable_ops.h b/third_party/fwkacllib/inc/ops/resource_variable_ops.h index 74ac83f8..1b60d42a 100644 --- a/third_party/fwkacllib/inc/ops/resource_variable_ops.h +++ b/third_party/fwkacllib/inc/ops/resource_variable_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/third_party/fwkacllib/inc/ops/rnn.h b/third_party/fwkacllib/inc/ops/rnn.h index e33f3677..84723872 100644 --- a/third_party/fwkacllib/inc/ops/rnn.h +++ b/third_party/fwkacllib/inc/ops/rnn.h @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -591,6 +591,116 @@ REG_OP(DynamicGRUV2) .ATTR(reset_after, Bool, true) .ATTR(is_training, Bool, true) .OP_END_FACTORY_REG(DynamicGRUV2) + +/** +*@brief: DynamicGRUV2Grad calculation. +*@par Inputs: +*fourteen inputs: \n +*@li x:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li weight_input:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li weight_hidden:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li y:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li init_h:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li h:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li dy:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li dh:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li update:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li reset:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li new:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li hidden_new:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li seq_length:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li mask:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. + +*@par Attributes: +*@li direction:An string identifying the direction in the op. Default to "UNIDIRECTIONAL". Only UNIDIRECTIONAL is currently supported. +*@li cell_depth:An integer identifying the cell depth in the op. Default to 1. +*@li keep_prob:An float identifying the keep prob in the op. Default to 1. +*@li cell_clip:An float identifying the cell clip in the op. Default to -1. +*@li num_proj:An integer identifying the num projection in the op. Default to 0. +*@li time_major:An bool identifying the time major in the op. Default to true. +*@li bias_type:An string identifying the type of bias_type function in the op. Default to "double_bias". +*@li gate_order:An string identifying the gate order in weight and bias. Default to "zrh". "rzh" is another option. +*@li reset_after:An bool identifying whether to apply reset gate after matrix multiplication. Default to true. + +*@par Outputs: +*six outputs: \n +*@li dw_input:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li dw_hidden:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li db_input:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li db_hidden:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li dx:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li dh_prev:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*/ +REG_OP(DynamicGRUV2Grad) + .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(weight_input, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(weight_hidden, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(y, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(init_h, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(h, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(dy, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(dh, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(update, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(reset, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(new, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(hidden_new, TensorType({DT_FLOAT16, DT_FLOAT})) + .OPTIONAL_INPUT(seq_length, TensorType({DT_INT32})) + .OPTIONAL_INPUT(mask, TensorType({DT_UINT8})) + .OUTPUT(dw_input, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(dw_hidden, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(db_input, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(db_hidden, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(dx, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(dh_prev, TensorType({DT_FLOAT16, DT_FLOAT})) + .ATTR(direction, String, "UNIDIRECTIONAL") + .ATTR(cell_depth, Int, 0) + .ATTR(keep_prob, Float, -1.0) + .ATTR(cell_clip, Float, -1.0) + .ATTR(num_proj, Int, 0) + .ATTR(time_major, Bool, true) + .ATTR(bias_type, String, "double_bias") + .ATTR(gate_order, String, "zrh") + .ATTR(reset_after, Bool, true) + .OP_END_FACTORY_REG(DynamicGRUV2Grad) + +/** +*@brief: GRUV2HiddenGrad calculation. +*@par Inputs: +*nine inputs: \n +*@li weight_hidden:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li init_h:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li h:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li dy:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li dh:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li update:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li reset:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li new:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li hidden_new:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. + +*@par Attributes: +*@li gate_order:An string identifying the gate order in weight and bias. Default to "zrh". "rzh" is another option. + +*@par Outputs: +*three outputs: \n +*@li dh_prev:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li dgate_h:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li dnt_x:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*/ +REG_OP(GRUV2HiddenGrad) + .INPUT(weight_hidden, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(init_h, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(h, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(dy, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(dh, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(update, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(reset, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(new, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(hidden_new, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(dh_prev, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(dgate_h, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(dnt_x, TensorType({DT_FLOAT16, DT_FLOAT})) + .ATTR(gate_order, String, "zrh") + .OP_END_FACTORY_REG(GRUV2HiddenGrad) } // namespace ge #endif // OPS_BUILT_IN_OP_PROTO_INC_RNN_H_ diff --git a/third_party/fwkacllib/inc/ops/rpn_ops.h b/third_party/fwkacllib/inc/ops/rpn_ops.h index 089af326..b7649a44 100644 --- a/third_party/fwkacllib/inc/ops/rpn_ops.h +++ b/third_party/fwkacllib/inc/ops/rpn_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/third_party/fwkacllib/inc/ops/save_ops.h b/third_party/fwkacllib/inc/ops/save_ops.h index 5ce6c2e0..0ce473b7 100644 --- a/third_party/fwkacllib/inc/ops/save_ops.h +++ b/third_party/fwkacllib/inc/ops/save_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/third_party/fwkacllib/inc/ops/sdca_ops.h b/third_party/fwkacllib/inc/ops/sdca_ops.h index 34c6a268..cbd9839d 100644 --- a/third_party/fwkacllib/inc/ops/sdca_ops.h +++ b/third_party/fwkacllib/inc/ops/sdca_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/third_party/fwkacllib/inc/ops/selection_ops.h b/third_party/fwkacllib/inc/ops/selection_ops.h index e7f35e02..2c99e82e 100644 --- a/third_party/fwkacllib/inc/ops/selection_ops.h +++ b/third_party/fwkacllib/inc/ops/selection_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/third_party/fwkacllib/inc/ops/set_ops.h b/third_party/fwkacllib/inc/ops/set_ops.h index 04e04f1b..1d02fa15 100644 --- a/third_party/fwkacllib/inc/ops/set_ops.h +++ b/third_party/fwkacllib/inc/ops/set_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/third_party/fwkacllib/inc/ops/sparse_ops.h b/third_party/fwkacllib/inc/ops/sparse_ops.h index 09d8ced9..d7512790 100644 --- a/third_party/fwkacllib/inc/ops/sparse_ops.h +++ b/third_party/fwkacllib/inc/ops/sparse_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/third_party/fwkacllib/inc/ops/spectral_ops.h b/third_party/fwkacllib/inc/ops/spectral_ops.h index be3d7d00..64fa7814 100644 --- a/third_party/fwkacllib/inc/ops/spectral_ops.h +++ b/third_party/fwkacllib/inc/ops/spectral_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/third_party/fwkacllib/inc/ops/split_combination_ops.h b/third_party/fwkacllib/inc/ops/split_combination_ops.h index f1a93fa6..efe4715d 100644 --- a/third_party/fwkacllib/inc/ops/split_combination_ops.h +++ b/third_party/fwkacllib/inc/ops/split_combination_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/third_party/fwkacllib/inc/ops/state_ops.h b/third_party/fwkacllib/inc/ops/state_ops.h index 3c8e32b6..db1f5353 100644 --- a/third_party/fwkacllib/inc/ops/state_ops.h +++ b/third_party/fwkacllib/inc/ops/state_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/third_party/fwkacllib/inc/ops/stateful_random_ops.h b/third_party/fwkacllib/inc/ops/stateful_random_ops.h index c2f65c6a..366112d6 100644 --- a/third_party/fwkacllib/inc/ops/stateful_random_ops.h +++ b/third_party/fwkacllib/inc/ops/stateful_random_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/third_party/fwkacllib/inc/ops/stateless_random_ops.h b/third_party/fwkacllib/inc/ops/stateless_random_ops.h index ff9daaa3..dad3c379 100644 --- a/third_party/fwkacllib/inc/ops/stateless_random_ops.h +++ b/third_party/fwkacllib/inc/ops/stateless_random_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/third_party/fwkacllib/inc/ops/string_ops.h b/third_party/fwkacllib/inc/ops/string_ops.h index ec84cc83..4a88bc79 100644 --- a/third_party/fwkacllib/inc/ops/string_ops.h +++ b/third_party/fwkacllib/inc/ops/string_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/third_party/fwkacllib/inc/ops/swap_co_ops.h b/third_party/fwkacllib/inc/ops/swap_co_ops.h index 6e8eaac3..a1bf4f8b 100644 --- a/third_party/fwkacllib/inc/ops/swap_co_ops.h +++ b/third_party/fwkacllib/inc/ops/swap_co_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/third_party/fwkacllib/inc/ops/target_crop_and_resize.h b/third_party/fwkacllib/inc/ops/target_crop_and_resize.h new file mode 100644 index 00000000..9c61f2c9 --- /dev/null +++ b/third_party/fwkacllib/inc/ops/target_crop_and_resize.h @@ -0,0 +1,59 @@ +/** + * Copyright 2019-2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/*! + * \file target_crop_and_resize.h + * \brief + */ +#ifndef GE_OP_TARGET_CROP_AND_RESIZE_H +#define GE_OP_TARGET_CROP_AND_RESIZE_H + +#include "graph/operator_reg.h" + +namespace ge { + +/** +*@brief Performs crop and resize on images. + +*@par Inputs: +*@li x: An NCHW tensor of type uint8, specifying the input to the data layer. +*@li boxes: Crop parameters of type int32. \n +*@li box_index: Batch index parameters of type int32. The batch of the input x to be cropped and resize. \n + +*@par Attributes: +*output_h: A required int, specifying the height of output. \n +*output_w: A required int, specifying the width of output. \n +*input_format: A required string, specifying the input format. \n + +*@par Outputs: +*y: The output tensor of type uint8, format only support NC1HWC0_C04. +*@par Third-party framework compatibility +* It is a custom operator. It has no corresponding operator in Caffe. +* +*@par Restrictions: +*Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use. +*/ +REG_OP(TargetCropAndResize) + .INPUT(x, TensorType({DT_UINT8})) + .INPUT(boxes, TensorType({DT_INT32})) + .INPUT(box_index, TensorType({DT_INT32})) + .OUTPUT(y, TensorType({DT_UINT8})) + .ATTR(output_h, Int, 224) + .ATTR(output_w, Int, 224) + .ATTR(input_format, String, "YUV420SP_U8") + .OP_END_FACTORY_REG(TargetCropAndResize) +} +#endif //GE_OP_TARGET_CROP_AND_RESIZE_H diff --git a/third_party/fwkacllib/inc/ops/transformation_ops.h b/third_party/fwkacllib/inc/ops/transformation_ops.h index 9338a636..64e18fc7 100644 --- a/third_party/fwkacllib/inc/ops/transformation_ops.h +++ b/third_party/fwkacllib/inc/ops/transformation_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -140,7 +140,8 @@ support "NHWC/NCHW" to "NC1HWC0" and "NC1HWC0" to "NHWC/NCHW" *@par Attributes: *@li src_format: A string source data format, can be "NHWC", "NCHW", "FRACTAL_Zn" etc. -*@li dst_format: A string target data format, can be "NC1HWC0", "NCHW", "FRACTAL_Zn" etc . \n +*@li dst_format: A string target data format, can be "NC1HWC0", "NCHW", "FRACTAL_Zn" etc. +*@li group: A required int32, default value is 1. \n *@par Outputs: *dst: A Tensor dtype of all types. @@ -150,6 +151,7 @@ REG_OP(TransData) .OUTPUT(dst, TensorType::BasicType()) .REQUIRED_ATTR(src_format, String) .REQUIRED_ATTR(dst_format, String) + .ATTR(group, Int, 1) .OP_END_FACTORY_REG(TransData) /** @@ -529,7 +531,9 @@ REG_OP(Unpack) * with patch_sizes_eff = patch_sizes + (patch_sizes - 1) * * (rates - 1), followed by subsampling them spatially by a factor of rates. * This is equivalent to rate in dilated (a.k.a. Atrous) convolutions. -* @li padding: A required string. The type of padding algorithm to use . \n +* @li padding: A required string. The type of padding algorithm to use, + support "SAME" or "VALID". \n +* @li data_format: A required string. The format of input, only supported NHWC. \n * @par Outputs: * y: A 4D Tensor with shape [batch, out_rows, out_cols, ksize_rows * @@ -550,6 +554,7 @@ REG_OP(ExtractImagePatches) .REQUIRED_ATTR(strides, ListInt) .REQUIRED_ATTR(rates, ListInt) .REQUIRED_ATTR(padding, String) + .ATTR(data_format, String, "NHWC") .OP_END_FACTORY_REG(ExtractImagePatches) /** @@ -564,7 +569,9 @@ REG_OP(ExtractImagePatches) * dimension of "x". * @li strides: A required list or tuple. How far the centers of two consecutive * patches are in "x". Must be: [1, stride_planes, stride_rows, stride_cols, 1]. -* @li padding: A required string. The type of padding algorithm to use . \n +* @li padding: A required string. The type of padding algorithm to use , +* support "SAME" or "VALID" . \n +* @li data_format: An optional string. The format of input, only supported NDHWC. \n * @par Outputs: * Output: A 5D Tensor with shape [batch, out_planes, out_rows, out_cols, ksize_planes * @@ -583,6 +590,7 @@ REG_OP(ExtractVolumePatches) .REQUIRED_ATTR(ksizes, ListInt) .REQUIRED_ATTR(strides, ListInt) .REQUIRED_ATTR(padding, String) + .ATTR(data_format, String, "NDHWC") .OP_END_FACTORY_REG(ExtractVolumePatches) /** diff --git a/third_party/fwkacllib/inc/ops/warp_perspective_ops.h b/third_party/fwkacllib/inc/ops/warp_perspective_ops.h index 8ef69d8b..e19cbd7c 100644 --- a/third_party/fwkacllib/inc/ops/warp_perspective_ops.h +++ b/third_party/fwkacllib/inc/ops/warp_perspective_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License.