Merge pull request #952 from WenmuZhou/dygraph

使用PaddleClass的resnet_vd
release/2.0-rc1-0
zhoujun 5 years ago committed by GitHub
commit 52b40f36e5
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -3,7 +3,7 @@ Global:
epoch_num: 1200
log_smooth_window: 20
print_batch_step: 2
save_model_dir: ./output/20201010/
save_model_dir: ./output/db_mv3/
save_epoch_step: 1200
# evaluation is run every 5000 iterations after the 4000th iteration
eval_batch_step: 8
@ -66,9 +66,9 @@ Metric:
TRAIN:
dataset:
name: SimpleDataSet
data_dir: /home/zhoujun20/detection/
data_dir: ./detection/
file_list:
- /home/zhoujun20/detection/train_icdar2015_label.txt # dataset1
- ./detection/train_icdar2015_label.txt # dataset1
ratio_list: [1.0]
transforms:
- DecodeImage: # load image
@ -103,14 +103,14 @@ TRAIN:
shuffle: True
drop_last: False
batch_size: 16
num_workers: 6
num_workers: 8
EVAL:
dataset:
name: SimpleDataSet
data_dir: /home/zhoujun20/detection/
data_dir: ./detection/
file_list:
- /home/zhoujun20/detection/test_icdar2015_label.txt
- ./detection/test_icdar2015_label.txt
transforms:
- DecodeImage: # load image
img_mode: BGR
@ -130,4 +130,4 @@ EVAL:
shuffle: False
drop_last: False
batch_size: 1 # must be 1
num_workers: 6
num_workers: 8

@ -3,14 +3,14 @@ Global:
epoch_num: 1200
log_smooth_window: 20
print_batch_step: 2
save_model_dir: ./output/20201010/
save_model_dir: ./output/20201015_r50/
save_epoch_step: 1200
# evaluation is run every 5000 iterations after the 4000th iteration
eval_batch_step: 8
# if pretrained_model is saved in static mode, load_static_weights must set to True
load_static_weights: True
cal_metric_during_train: False
pretrained_model: /home/zhoujun20/pretrain_models/MobileNetV3_large_x0_5_pretrained
pretrained_model: /home/zhoujun20/pretrain_models/ResNet50_vd_ssld_pretrained/
checkpoints: #./output/det_db_0.001_DiceLoss_256_pp_config_2.0b_4gpu/best_accuracy
save_inference_dir:
use_visualdl: True
@ -102,7 +102,7 @@ TRAIN:
shuffle: True
drop_last: False
batch_size: 16
num_workers: 6
num_workers: 8
EVAL:
dataset:
@ -129,4 +129,4 @@ EVAL:
shuffle: False
drop_last: False
batch_size: 1 # must be 1
num_workers: 6
num_workers: 8

@ -84,7 +84,7 @@ TRAIN:
batch_size: 256
shuffle: True
drop_last: True
num_workers: 6
num_workers: 8
EVAL:
dataset:
@ -105,4 +105,4 @@ EVAL:
shuffle: False
drop_last: False
batch_size: 256
num_workers: 6
num_workers: 8

@ -83,7 +83,7 @@ TRAIN:
batch_size: 256
shuffle: True
drop_last: True
num_workers: 6
num_workers: 8
EVAL:
dataset:
@ -103,4 +103,4 @@ EVAL:
shuffle: False
drop_last: False
batch_size: 256
num_workers: 6
num_workers: 8

@ -0,0 +1,105 @@
Global:
use_gpu: false
epoch_num: 500
log_smooth_window: 20
print_batch_step: 1
save_model_dir: ./output/rec/test/
save_epoch_step: 500
# evaluation is run every 5000 iterations after the 4000th iteration
eval_batch_step: 1016
# if pretrained_model is saved in static mode, load_static_weights must set to True
load_static_weights: True
cal_metric_during_train: True
pretrained_model:
checkpoints: #output/rec/rec_crnn/best_accuracy
save_inference_dir:
use_visualdl: True
infer_img: doc/imgs_words/ch/word_1.jpg
# for data or label process
max_text_length: 80
character_dict_path: /home/zhoujun20/rec/lmdb/dict.txt
character_type: 'en'
use_space_char: True
infer_mode: False
use_tps: False
Optimizer:
name: Adam
beta1: 0.9
beta2: 0.999
learning_rate:
name: Cosine
lr: 0.0005
warmup_epoch: 1
regularizer:
name: 'L2'
factor: 0.00001
Architecture:
type: rec
algorithm: CRNN
Transform:
Backbone:
name: MobileNetV3
scale: 0.5
model_name: small
small_stride: [ 1, 2, 2, 2 ]
Neck:
name: SequenceEncoder
encoder_type: reshape
Head:
name: CTC
fc_decay: 0.00001
Loss:
name: CTCLoss
PostProcess:
name: CTCLabelDecode
Metric:
name: RecMetric
main_indicator: acc
TRAIN:
dataset:
name: LMDBDateSet
file_list:
- /Users/zhoujun20/Downloads/evaluation_new # dataset1
ratio_list: [ 0.4,0.6 ]
transforms:
- DecodeImage: # load image
img_mode: BGR
channel_first: False
- CTCLabelEncode: # Class handling label
- RecAug:
- RecResizeImg:
image_shape: [ 3,32,320 ]
- keepKeys:
keep_keys: [ 'image','label','length' ] # dataloader将按照此顺序返回list
loader:
batch_size: 256
shuffle: True
drop_last: True
num_workers: 8
EVAL:
dataset:
name: LMDBDateSet
file_list:
- /home/zhoujun20/rec/lmdb/val
transforms:
- DecodeImage: # load image
img_mode: BGR
channel_first: False
- CTCLabelEncode: # Class handling label
- RecResizeImg:
image_shape: [ 3,32,320 ]
- keepKeys:
keep_keys: [ 'image','label','length' ] # dataloader将按照此顺序返回list
loader:
shuffle: False
drop_last: False
batch_size: 256
num_workers: 8

@ -42,7 +42,7 @@ Architecture:
Transform:
Backbone:
name: ResNet
layers: 200
layers: 34
Neck:
name: SequenceEncoder
encoder_type: fc
@ -82,7 +82,7 @@ TRAIN:
batch_size: 256
shuffle: True
drop_last: True
num_workers: 6
num_workers: 8
EVAL:
dataset:
@ -103,4 +103,4 @@ EVAL:
shuffle: False
drop_last: False
batch_size: 256
num_workers: 6
num_workers: 8

@ -94,13 +94,11 @@ def check_static():
from ppocr.utils.logging import get_logger
from tools import program
config = program.load_config('configs/det/det_r50_vd_db.yml')
config = program.load_config('configs/rec/rec_r34_vd_none_bilstm_ctc.yml')
# import cv2
# data = cv2.imread('doc/imgs/1.jpg')
# data = normalize(data)
logger = get_logger()
data = np.zeros((1, 3, 640, 640), dtype=np.float32)
np.random.seed(0)
data = np.random.rand(1, 3, 32, 320).astype(np.float32)
paddle.disable_static()
config['Architecture']['in_channels'] = 3
@ -110,17 +108,15 @@ def check_static():
load_dygraph_pretrain(
model,
logger,
'/Users/zhoujun20/Desktop/code/PaddleOCR/db/db',
'/Users/zhoujun20/Desktop/code/PaddleOCR/cnn_ctc/cnn_ctc',
load_static_weights=True)
x = paddle.to_variable(data)
x = paddle.to_tensor(data)
y = model(x)
for y1 in y:
print(y1.shape)
#
# # from matplotlib import pyplot as plt
# # plt.imshow(y.numpy())
# # plt.show()
static_out = np.load('/Users/zhoujun20/Desktop/code/PaddleOCR/db/db.npy')
static_out = np.load(
'/Users/zhoujun20/Desktop/code/PaddleOCR/output/conv.npy')
diff = y.numpy() - static_out
print(y.shape, static_out.shape, diff.mean())

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

@ -116,7 +116,7 @@ class EncoderWithFC(nn.Layer):
class SequenceEncoder(nn.Layer):
def __init__(self, in_channels, encoder_type, hidden_size, **kwargs):
def __init__(self, in_channels, encoder_type, hidden_size=48, **kwargs):
super(SequenceEncoder, self).__init__()
self.encoder_reshape = EncoderWithReshape(in_channels)
self.out_channels = self.encoder_reshape.out_channels

@ -88,20 +88,23 @@ def main(config, device, logger, vdl_writer):
best_model_dict, logger, vdl_writer)
def test_reader(config, place, logger):
train_loader = build_dataloader(config['TRAIN'], place)
def test_reader(config, place, logger, global_config):
train_loader, _ = build_dataloader(
config['TRAIN'], place, global_config=global_config)
import time
starttime = time.time()
count = 0
try:
for data in train_loader():
for data in train_loader:
count += 1
if count % 1 == 0:
batch_time = time.time() - starttime
starttime = time.time()
logger.info("reader: {}, {}, {}".format(count,
len(data), batch_time))
logger.info("reader: {}, {}, {}".format(
count, len(data[0]), batch_time))
except Exception as e:
import traceback
traceback.print_exc()
logger.info(e)
logger.info("finish reader: {}, Success!".format(count))
@ -130,7 +133,7 @@ def dis_main():
device))
main(config, device, logger, vdl_writer)
# test_reader(config, place, logger)
# test_reader(config, device, logger, config['Global'])
if __name__ == '__main__':

Loading…
Cancel
Save