update ops headers

pull/324/head
yanghaoran 4 years ago
parent 856237bae3
commit 503c34ce9d

@ -1,5 +1,5 @@
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.

@ -1,5 +1,5 @@
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.

@ -1,5 +1,5 @@
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.

@ -1,5 +1,5 @@
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.

@ -1,5 +1,5 @@
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.

@ -1,5 +1,5 @@
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.

@ -1,5 +1,5 @@
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.

@ -1,5 +1,5 @@
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.

@ -1,5 +1,5 @@
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.

@ -1,5 +1,5 @@
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.

@ -1,5 +1,5 @@
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.

@ -1,5 +1,5 @@
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -2240,6 +2240,64 @@ REG_OP(OutfeedEnqueueOp)
.ATTR(channel_name, String, "")
.OP_END_FACTORY_REG(OutfeedEnqueueOp)
/**
*@brief LruCache, create cache resource.
*@par Inputs:
*No input.
*@par Attributes:
*cache_size: cache size An optional "int64". Defaults to "100000".
*load_factor: rate which show if cache is full An optional "float", Defaults to "1".
*@par Outputs:
*cache: cache resource.
*@par Restrictions:
*Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use.
*/
REG_OP(LruCache)
.OUTPUT(cache, TensorType({DT_RESOURCE}))
.ATTR(container, String, "")
.ATTR(shared_name, String, "LruCache")
.ATTR(cache_size, Int, 100000)
.ATTR(load_factor, Float, 1)
.OP_END_FACTORY_REG(LruCache)
/**
*@brief CacheAdd, get id new come in cache and id get out of cache.
*@par Inputs:
*cache: resource data
*ids: Tensor stored id need to insert cache
*@par Outputs:
*swap_in_id: id come in cache.
*swap_in_idx: id in cache which come in cache
*swap_out_id: id get out of cache
*swap_out_idx: id in cache which get out of cache
*@par Restrictions:
*Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use.
*/
REG_OP(CacheAdd)
.INPUT(cache, TensorType({DT_RESOURCE}))
.INPUT(ids, TensorType({DT_INT64, DT_INT32, DT_UINT64, DT_UINT32}))
.OUTPUT(swap_in_id, TensorType({DT_INT64, DT_INT32, DT_UINT64, DT_UINT32}))
.OUTPUT(swap_in_idx, TensorType({DT_INT64}))
.OUTPUT(swap_out_id, TensorType({DT_INT64, DT_INT32, DT_UINT64, DT_UINT32}))
.OUTPUT(swap_out_idx, TensorType({DT_INT64}))
.OP_END_FACTORY_REG(CacheAdd)
/**
*@brief CacheRemoteToLocalIndex, get id in cache from id.
*@par Inputs:
*cache: resource data
*ids: Tensor stored id need to insert cache
*@par Outputs:
*local_idx: id in cache.
*@par Restrictions:
*Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use.
*/
REG_OP(CacheRemoteIndexToLocal)
.INPUT(cache, TensorType({DT_RESOURCE}))
.INPUT(ids, TensorType({DT_INT64, DT_INT32, DT_UINT64, DT_UINT32}))
.OUTPUT(local_idx, TensorType({DT_INT64}))
.OP_END_FACTORY_REG(CacheRemoteIndexToLocal)
} // namespace ge
#endif // OPS_BUILT_IN_OP_PROTO_INC_DATA_FLOW_OPS_H_

@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -2802,6 +2802,80 @@ REG_OP(AdamApplyOneAssign)
.OUTPUT(output2, TensorType({DT_FLOAT16,DT_FLOAT}))
.OP_END_FACTORY_REG(AdamApplyOneAssign)
/**
*@brief A fusion operator for bert lamb. \n
*@par Inputs:
*Ten inputs, including:
* @li input0: A Tensor. Must be one of the following types: float16, float32.
* @li input1: A Tensor. Must be one of the following types: float16, float32.
* @li input2: A Tensor. Must be one of the following types: float16, float32.
* @li input3: A Tensor. Must be one of the following types: float16, float32.
* @li input4: A Tensor. Must be one of the following types: float16, float32.
* @li mul0_x: A Tensor. Must be one of the following types: float16, float32.
* @li mul1_x: A Tensor. Must be one of the following types: float16, float32.
* @li mul2_x: A Tensor. Must be one of the following types: float16, float32.
* @li mul3_x: A Tensor. Must be one of the following types: float16, float32.
* @li steps: A Tensor. Must be one of the following types: float16, float32.
* @li do_use_weight: A Tensor. Must be one of the following types: float16, float32.
* @li weight_decay_rate: A Tensor. Must be one of the following types: float16, float32.
* @li add2_y: A Tensor. Must be one of the following types: float16, float32. \n
*@par Outputs:
*Three outputs, including:
* @li output0: A Tensor. Must be one of the following types: float16, float32. \n
*@par Restrictions:
*Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use.
*/
REG_OP(LambApplyOptimizerAssign)
.INPUT(input0, TensorType({DT_FLOAT16,DT_FLOAT}))
.INPUT(input1, TensorType({DT_FLOAT16,DT_FLOAT}))
.INPUT(input2, TensorType({DT_FLOAT16,DT_FLOAT}))
.INPUT(input3, TensorType({DT_FLOAT16,DT_FLOAT}))
.INPUT(mul0_x, TensorType({DT_FLOAT16,DT_FLOAT}))
.INPUT(mul1_x, TensorType({DT_FLOAT16,DT_FLOAT}))
.INPUT(mul2_x, TensorType({DT_FLOAT16,DT_FLOAT}))
.INPUT(mul3_x, TensorType({DT_FLOAT16,DT_FLOAT}))
.INPUT(add2_y, TensorType({DT_FLOAT16,DT_FLOAT}))
.INPUT(steps, TensorType({DT_FLOAT16,DT_FLOAT}))
.INPUT(do_use_weight, TensorType({DT_FLOAT16,DT_FLOAT}))
.INPUT(weight_decay_rate, TensorType({DT_FLOAT16,DT_FLOAT}))
.OUTPUT(output0, TensorType({DT_FLOAT16,DT_FLOAT}))
.OP_END_FACTORY_REG(LambApplyOptimizerAssign)
/**
*@brief A fusion operator for bert lamb. \n
*@par Inputs:
*Ten inputs, including:
* @li input0: A Tensor. Must be one of the following types: float16, float32.
* @li input1: A Tensor. Must be one of the following types: float16, float32.
* @li input2: A Tensor. Must be one of the following types: float16, float32.
* @li input3: A Tensor. Must be one of the following types: float16, float32.
* @li input4: A Tensor. Must be one of the following types: float16, float32.
* @li mul0_x: A Tensor. Must be one of the following types: float16, float32.
* @li mul1_x: A Tensor. Must be one of the following types: float16, float32.
* @li mul2_x: A Tensor. Must be one of the following types: float16, float32.
* @li mul3_x: A Tensor. Must be one of the following types: float16, float32.
* @li steps: A Tensor. Must be one of the following types: float16, float32.
* @li do_use_weight: A Tensor. Must be one of the following types: float16, float32.
* @li weight_decay_rate: A Tensor. Must be one of the following types: float16, float32.
* @li add2_y: A Tensor. Must be one of the following types: float16, float32. \n
*@par Outputs:
*No outputs
*@par Restrictions:
*Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use.
*/
REG_OP(LambApplyWeightAssign)
.INPUT(input0, TensorType({DT_FLOAT16,DT_FLOAT}))
.INPUT(input1, TensorType({DT_FLOAT16,DT_FLOAT}))
.INPUT(input2, TensorType({DT_FLOAT16,DT_FLOAT}))
.INPUT(input3, TensorType({DT_FLOAT16,DT_FLOAT}))
.INPUT(input4, TensorType({DT_FLOAT16,DT_FLOAT}))
.OP_END_FACTORY_REG(LambApplyWeightAssign)
/**
*@brief Confuse select, maximum, greater and sqrt. \n

@ -1,5 +1,5 @@
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.

@ -1,5 +1,5 @@
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.

@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -89,6 +89,10 @@ REG_OP(HcomAllReduce)
* @par Attributes:
* @li root_rank: A required integer identifying the root rank in the op
input of this rank will be broadcast to other ranks.
* @li fusion: A required integer identifying if the op need to fusion,the
default value is none fusion
* @li fusion: A required integer identifying the fusion id if para fusion
is set.
* @li group: A required string identifying the group name of ranks
participating in the op.
* @par Outputs:
@ -103,6 +107,8 @@ REG_OP(HcomBroadcast)
.DYNAMIC_OUTPUT(y, TensorType({DT_FLOAT, DT_INT32, DT_INT8, DT_INT16, DT_FLOAT16, DT_INT64, DT_UINT64}))
.REQUIRED_ATTR(root_rank, Int)
.REQUIRED_ATTR(group, String)
.ATTR(fusion, Int, 0)
.ATTR(fusion_id, Int, -1)
.ATTR(alpha, Float, 1.0)
.ATTR(beta, Float, 0.0)
.OP_END_FACTORY_REG(HcomBroadcast)
@ -213,6 +219,14 @@ REG_OP(HcomRemoteRead)
.REQUIRED_ATTR(dtype, Type)
.OP_END_FACTORY_REG(HcomRemoteRead)
REG_OP(HcomRemoteRefRead)
.INPUT(remote, TensorType({DT_UINT64}))
.INPUT(cache_var, TensorType({DT_UINT64}))
.INPUT(local_offset, TensorType({DT_UINT64}))
.OUTPUT(cache_var, TensorType({DT_UINT64}))
.REQUIRED_ATTR(dtype, Type)
.OP_END_FACTORY_REG(HcomRemoteRefRead)
/**
* @brief Performs Remote Write of input tensors
* @par Inputs:
@ -225,5 +239,11 @@ REG_OP(HcomRemoteWrite)
.INPUT(local, TensorType::ALL())
.OP_END_FACTORY_REG(HcomRemoteWrite)
REG_OP(HcomRemoteScatterWrite)
.INPUT(remote, TensorType({DT_INT64, DT_UINT64}))
.INPUT(local, TensorType::ALL())
.OPTIONAL_INPUT(local_offset, TensorType({DT_UINT64}))
.OP_END_FACTORY_REG(HcomRemoteScatterWrite)
} // namespace ge
#endif // OPS_BUILT_IN_OP_PROTO_INC_HCOM_OPS_H_

@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.

@ -1,5 +1,5 @@
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.

@ -1,5 +1,5 @@
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.

@ -1,5 +1,5 @@
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.

@ -1,5 +1,5 @@
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.

@ -1,5 +1,5 @@
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.

File diff suppressed because it is too large Load Diff

@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.

@ -1,5 +1,5 @@
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save