parent
ed61d67c73
commit
2c836ff914
@ -0,0 +1,110 @@
|
||||
abs
|
||||
acos
|
||||
asin
|
||||
atan
|
||||
attention_lstm
|
||||
bilinear_interp
|
||||
bilinear_tensor_product
|
||||
bpr_loss
|
||||
brelu
|
||||
conv_shift
|
||||
cos
|
||||
cos_sim
|
||||
dequantize
|
||||
elementwise_div
|
||||
elementwise_max
|
||||
elementwise_min
|
||||
elu
|
||||
fc
|
||||
flatten
|
||||
fsp
|
||||
fused_embedding_fc_lstm
|
||||
fused_embedding_seq_pool
|
||||
fusion_gru
|
||||
fusion_lstm
|
||||
fusion_repeated_fc_relu
|
||||
fusion_seqconv_eltadd_relu
|
||||
fusion_seqexpand_concat_fc
|
||||
fusion_seqpool_concat
|
||||
fusion_squared_mat_sub
|
||||
gelu
|
||||
gru
|
||||
hard_shrink
|
||||
hierarchical_sigmoid
|
||||
hinge_loss
|
||||
huber_loss
|
||||
im2sequence
|
||||
l1_norm
|
||||
label_smooth
|
||||
leaky_relu
|
||||
linear_chain_crf
|
||||
log
|
||||
log_loss
|
||||
logsigmoid
|
||||
lookup_table
|
||||
lrn
|
||||
lstm
|
||||
lstm_unit
|
||||
lstmp
|
||||
margin_rank_loss
|
||||
max_pool2d_with_index
|
||||
max_pool3d_with_index
|
||||
maxout
|
||||
modified_huber_loss
|
||||
multiplex
|
||||
nce
|
||||
nearest_interp
|
||||
norm
|
||||
pool2d
|
||||
pool3d
|
||||
pow
|
||||
prelu
|
||||
psroi_pool
|
||||
quantize
|
||||
rank_loss
|
||||
reduce_max
|
||||
reduce_mean
|
||||
reduce_min
|
||||
reduce_prod
|
||||
reduce_sum
|
||||
requantize
|
||||
reshape
|
||||
rnn_memory_helper
|
||||
roi_align
|
||||
roi_perspective_transform
|
||||
roi_pool
|
||||
round
|
||||
row_conv
|
||||
scatter
|
||||
sequence_concat
|
||||
sequence_conv
|
||||
sequence_expand
|
||||
sequence_expand_as
|
||||
sequence_pad
|
||||
sequence_scatter
|
||||
sequence_slice
|
||||
sequence_softmax
|
||||
sequence_unpad
|
||||
shuffle_channel
|
||||
sigmoid_cross_entropy_with_logits
|
||||
sin
|
||||
softplus
|
||||
softshrink
|
||||
softsign
|
||||
space_to_depth
|
||||
spp
|
||||
square
|
||||
squared_l2_distance
|
||||
squared_l2_norm
|
||||
squeeze
|
||||
stanh
|
||||
swish
|
||||
tanh_shrink
|
||||
teacher_student_sigmoid_loss
|
||||
tensor_array_to_tensor
|
||||
thresholded_relu
|
||||
transpose
|
||||
tree_conv
|
||||
unpool
|
||||
unsqueeze
|
||||
warpctc
|
@ -0,0 +1,66 @@
|
||||
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
|
||||
os.environ['CUDA_VISIBLE_DEVICES'] = ''
|
||||
|
||||
import paddle.fluid as fluid
|
||||
import sys
|
||||
|
||||
|
||||
def get_op_diff(filename):
|
||||
ops_created_by_py_func = set(
|
||||
fluid.core._get_use_default_grad_op_desc_maker_ops())
|
||||
|
||||
with open(filename, 'r') as f:
|
||||
ops_read_from_file = set([line.strip() for line in f.readlines()])
|
||||
|
||||
diff_ops = []
|
||||
|
||||
for op in ops_read_from_file:
|
||||
if op not in ops_created_by_py_func:
|
||||
diff_ops.append(op)
|
||||
else:
|
||||
ops_created_by_py_func.remove(op)
|
||||
|
||||
err_msg = []
|
||||
diff_ops = list(diff_ops)
|
||||
if len(diff_ops) > 0:
|
||||
err_msg.append('Added grad op with DefaultGradOpDescMaker: ' + str(
|
||||
diff_ops))
|
||||
|
||||
ops_created_by_py_func = list(ops_created_by_py_func)
|
||||
if len(ops_created_by_py_func) > 0:
|
||||
err_msg.append('Remove grad op with DefaultGradOpDescMaker: ' + str(
|
||||
ops_created_by_py_func))
|
||||
|
||||
return err_msg
|
||||
|
||||
|
||||
if len(sys.argv) != 2:
|
||||
print('Usage: python diff_use_default_grad_op_maker.py [filepath]')
|
||||
sys.exit(1)
|
||||
|
||||
file_path = str(sys.argv[1])
|
||||
err_msg = get_op_diff(file_path)
|
||||
|
||||
if len(err_msg) > 0:
|
||||
_, filename = os.path.split(file_path)
|
||||
print('File `{}` is wrong compared to your PR revision!'.format(filename))
|
||||
print(
|
||||
'Please use `python generate_op_use_grad_op_desc_maker_spec.py [filepath]` to generate new `{}` file'.
|
||||
format(filename))
|
||||
print('Error message is: ' + '; '.join(err_msg))
|
||||
sys.exit(1)
|
@ -0,0 +1,29 @@
|
||||
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
|
||||
os.environ['CUDA_VISIBLE_DEVICES'] = ''
|
||||
|
||||
import paddle.fluid as fluid
|
||||
import sys
|
||||
|
||||
if len(sys.argv) != 2:
|
||||
print('Usage: python generate_op_use_grad_op_desc_maker_spec.py [filepath]')
|
||||
sys.exit(1)
|
||||
|
||||
with open(sys.argv[1], 'w') as f:
|
||||
ops = fluid.core._get_use_default_grad_op_desc_maker_ops()
|
||||
for op in ops:
|
||||
f.write(op + '\n')
|
Loading…
Reference in new issue