add_depthwiseConv_op_gpu
commit
e5e206e2b6
@ -0,0 +1,114 @@
|
||||
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import sys
|
||||
import argparse
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
|
||||
def parse_args():
|
||||
parser = argparse.ArgumentParser('Parse Log')
|
||||
parser.add_argument(
|
||||
'--file_path', '-f', type=str, help='the path of the log file')
|
||||
parser.add_argument(
|
||||
'--sample_rate',
|
||||
'-s',
|
||||
type=float,
|
||||
default=1.0,
|
||||
help='the rate to take samples from log')
|
||||
parser.add_argument(
|
||||
'--log_period', '-p', type=int, default=1, help='the period of log')
|
||||
|
||||
args = parser.parse_args()
|
||||
return args
|
||||
|
||||
|
||||
def parse_file(file_name):
|
||||
loss = []
|
||||
error = []
|
||||
with open(file_name) as f:
|
||||
for i, line in enumerate(f):
|
||||
line = line.strip()
|
||||
if not line.startswith('pass'):
|
||||
continue
|
||||
line_split = line.split(' ')
|
||||
if len(line_split) != 5:
|
||||
continue
|
||||
|
||||
loss_str = line_split[2][:-1]
|
||||
cur_loss = float(loss_str.split('=')[-1])
|
||||
loss.append(cur_loss)
|
||||
|
||||
err_str = line_split[3][:-1]
|
||||
cur_err = float(err_str.split('=')[-1])
|
||||
error.append(cur_err)
|
||||
|
||||
accuracy = [1.0 - err for err in error]
|
||||
|
||||
return loss, accuracy
|
||||
|
||||
|
||||
def sample(metric, sample_rate):
|
||||
interval = int(1.0 / sample_rate)
|
||||
if interval > len(metric):
|
||||
return metric[:1]
|
||||
|
||||
num = len(metric) / interval
|
||||
idx = [interval * i for i in range(num)]
|
||||
metric_sample = [metric[id] for id in idx]
|
||||
return metric_sample
|
||||
|
||||
|
||||
def plot_metric(metric,
|
||||
batch_id,
|
||||
graph_title,
|
||||
line_style='b-',
|
||||
line_label='y',
|
||||
line_num=1):
|
||||
plt.figure()
|
||||
plt.title(graph_title)
|
||||
if line_num == 1:
|
||||
plt.plot(batch_id, metric, line_style, label=line_label)
|
||||
else:
|
||||
for i in range(line_num):
|
||||
plt.plot(batch_id, metric[i], line_style[i], label=line_label[i])
|
||||
plt.xlabel('batch')
|
||||
plt.ylabel(graph_title)
|
||||
plt.legend()
|
||||
plt.savefig(graph_title + '.jpg')
|
||||
plt.close()
|
||||
|
||||
|
||||
def main():
|
||||
args = parse_args()
|
||||
assert args.sample_rate > 0. and args.sample_rate <= 1.0, "The sample rate should in the range (0, 1]."
|
||||
|
||||
loss, accuracy = parse_file(args.file_path)
|
||||
batch = [args.log_period * i for i in range(len(loss))]
|
||||
|
||||
batch_sample = sample(batch, args.sample_rate)
|
||||
loss_sample = sample(loss, args.sample_rate)
|
||||
accuracy_sample = sample(accuracy, args.sample_rate)
|
||||
|
||||
plot_metric(loss_sample, batch_sample, 'loss', line_label='loss')
|
||||
plot_metric(
|
||||
accuracy_sample,
|
||||
batch_sample,
|
||||
'accuracy',
|
||||
line_style='g-',
|
||||
line_label='accuracy')
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -0,0 +1,144 @@
|
||||
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. */
|
||||
|
||||
#include "paddle/operators/math/sequence_padding.h"
|
||||
|
||||
namespace paddle {
|
||||
namespace operators {
|
||||
namespace math {
|
||||
|
||||
template <typename T>
|
||||
class PaddingLoDTensorFunctor<platform::CPUDeviceContext, T> {
|
||||
public:
|
||||
void operator()(const platform::CPUDeviceContext& context,
|
||||
const framework::LoDTensor& seq, framework::Tensor& padding,
|
||||
bool norm_by_times) {
|
||||
auto lod = seq.lod();
|
||||
PADDLE_ENFORCE_GT(lod.size(), 0UL,
|
||||
"The LoD of LoDTensor seq should not be null.");
|
||||
|
||||
const size_t level = 0;
|
||||
framework::LoD abs_offset_lod = framework::ToAbsOffset(lod);
|
||||
|
||||
auto seq_dims = seq.dims();
|
||||
PADDLE_ENFORCE_EQ(seq_dims[0], abs_offset_lod[level].back(),
|
||||
"The first dimension of LoDTensor seq should be "
|
||||
"equal to the sum of all sequences's length.");
|
||||
|
||||
auto padding_dims = padding.dims();
|
||||
PADDLE_ENFORCE_EQ(padding_dims.size(), 3UL,
|
||||
"The input padding should be a 3-D Tensor of shape "
|
||||
"[max_sequence_length, num_sequences, sequence_width].");
|
||||
|
||||
const size_t max_sequence_length = MaximumSequenceLength(lod, level);
|
||||
PADDLE_ENFORCE_EQ(padding_dims[0], max_sequence_length,
|
||||
"The first dimension of Tensor padding should be the "
|
||||
"maximum length of all sequences in LoDTensor seq.");
|
||||
|
||||
const size_t num_sequences = abs_offset_lod[level].size() - 1;
|
||||
PADDLE_ENFORCE_EQ(padding_dims[1], num_sequences,
|
||||
"The second dimension of Tensor padding should be the "
|
||||
"number of sequences in LoDTensor seq.");
|
||||
|
||||
const size_t sequence_width = seq.numel() / seq_dims[0];
|
||||
PADDLE_ENFORCE_EQ(padding_dims[2], sequence_width,
|
||||
"The third dimension of Tensor padding should be the "
|
||||
"width of sequence in LoDTensor seq.");
|
||||
|
||||
const T* seq_data = seq.data<T>();
|
||||
T* padding_data = padding.data<T>();
|
||||
for (size_t i = 0; i < max_sequence_length; ++i) {
|
||||
for (size_t j = 0; j < num_sequences; ++j) {
|
||||
size_t start_pos = abs_offset_lod[level][j];
|
||||
size_t sequence_length = abs_offset_lod[level][j + 1] - start_pos;
|
||||
if (i < sequence_length) {
|
||||
// i > 0 => sequence_length > 0
|
||||
T scale =
|
||||
norm_by_times ? (1.0f / static_cast<T>(sequence_length)) : 1.0f;
|
||||
for (size_t k = 0; k < sequence_width; ++k) {
|
||||
padding_data[(i * num_sequences + j) * sequence_width + k] =
|
||||
seq_data[(start_pos + i) * sequence_width + k] * scale;
|
||||
}
|
||||
} else {
|
||||
memset(padding_data + (i * num_sequences + j) * sequence_width, 0,
|
||||
sequence_width * sizeof(T));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
class UnpaddingLoDTensorFunctor<platform::CPUDeviceContext, T> {
|
||||
public:
|
||||
void operator()(const platform::CPUDeviceContext& context,
|
||||
framework::LoDTensor& seq, const framework::Tensor& padding,
|
||||
bool norm_by_times) {
|
||||
auto lod = seq.lod();
|
||||
PADDLE_ENFORCE_GT(lod.size(), 0UL,
|
||||
"The LoD of LoDTensor seq should not be null.");
|
||||
|
||||
const size_t level = 0;
|
||||
framework::LoD abs_offset_lod = framework::ToAbsOffset(lod);
|
||||
|
||||
auto seq_dims = seq.dims();
|
||||
PADDLE_ENFORCE_EQ(seq_dims[0], abs_offset_lod[level].back(),
|
||||
"The first dimension of LoDTensor seq should be "
|
||||
"equal to the sum of all sequences's length.");
|
||||
|
||||
auto padding_dims = padding.dims();
|
||||
PADDLE_ENFORCE_EQ(padding_dims.size(), 3UL,
|
||||
"The input padding should be a 3-D Tensor of shape "
|
||||
"[max_sequnece_length, num_sequences, sequence_width].");
|
||||
|
||||
const size_t max_sequence_length = MaximumSequenceLength(lod, level);
|
||||
PADDLE_ENFORCE_EQ(padding_dims[0], max_sequence_length,
|
||||
"The first dimension of Tensor padding should be "
|
||||
"the maximum length of all sequences in LoDTensor seq.");
|
||||
|
||||
const size_t num_sequences = abs_offset_lod[level].size() - 1;
|
||||
PADDLE_ENFORCE_EQ(padding_dims[1], num_sequences,
|
||||
"The second dimension of Tensor padding should be "
|
||||
"the number of sequences in LoDTensor seq.");
|
||||
|
||||
const size_t sequence_width = seq.numel() / seq_dims[0];
|
||||
PADDLE_ENFORCE_EQ(padding_dims[2], sequence_width,
|
||||
"The third dimension of Tensor padding should be the "
|
||||
"width of sequence in LoDTensor seq.");
|
||||
|
||||
const T* padding_data = padding.data<T>();
|
||||
T* seq_data = seq.data<T>();
|
||||
for (size_t i = 0; i < num_sequences; ++i) {
|
||||
size_t start_pos = abs_offset_lod[level][i];
|
||||
size_t sequence_length = abs_offset_lod[level][i + 1] - start_pos;
|
||||
for (size_t j = 0; j < sequence_length; ++j) {
|
||||
// sequence_width > j > 0
|
||||
T scale =
|
||||
norm_by_times ? (1.0f / static_cast<T>(sequence_length)) : 1.0f;
|
||||
for (size_t k = 0; k < sequence_width; ++k) {
|
||||
seq_data[(start_pos + j) * sequence_width + k] =
|
||||
padding_data[(j * num_sequences + i) * sequence_width + k] *
|
||||
scale;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
template class PaddingLoDTensorFunctor<platform::CPUDeviceContext, float>;
|
||||
template class UnpaddingLoDTensorFunctor<platform::CPUDeviceContext, float>;
|
||||
|
||||
} // namespace math
|
||||
} // namespace operators
|
||||
} // namespace paddle
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in new issue