svtrv2_cam_tps_on.yml 3.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123
  1. Global:
  2. device: gpu
  3. epoch_num: 20
  4. log_smooth_window: 20
  5. print_batch_step: 10
  6. output_dir: ./output/rec/u14m_filter/svtrv2_cam_tps_on
  7. eval_epoch_step: [0, 1]
  8. eval_batch_step: [0, 500]
  9. cal_metric_during_train: False
  10. pretrained_model:
  11. checkpoints:
  12. use_tensorboard: false
  13. infer_img:
  14. # for data or label process
  15. character_dict_path: ./tools/utils/EN_symbol_dict.txt
  16. max_text_length: &max_text_length 25
  17. use_space_char: False
  18. save_res_path: ./output/rec/u14m_filter/predicts_svtrv2_cam_tps_on.txt
  19. use_amp: True
  20. Optimizer:
  21. name: AdamW
  22. lr: 0.00065 # for 4gpus bs256/gpu
  23. weight_decay: 0.05
  24. filter_bias_and_bn: True
  25. LRScheduler:
  26. name: OneCycleLR
  27. warmup_epoch: 1.5 # pct_start 0.075*20 : 1.5ep
  28. cycle_momentum: False
  29. Architecture:
  30. model_type: rec
  31. algorithm: CAM
  32. Transform:
  33. name: Aster_TPS
  34. tps_inputsize: [32, 64]
  35. tps_outputsize: &img_shape [32, 128]
  36. Encoder:
  37. name: CAMEncoder
  38. encoder_config:
  39. name: SVTRv2LNConvTwo33
  40. use_pos_embed: False
  41. dims: [128, 256, 384]
  42. depths: [6, 6, 6]
  43. num_heads: [4, 8, 12]
  44. mixer: [['Conv','Conv','Conv','Conv','Conv','Conv'],['Conv','Conv','FGlobal','Global','Global','Global'],['Global','Global','Global','Global','Global','Global']]
  45. local_k: [[5, 5], [5, 5], [-1, -1]]
  46. sub_k: [[1, 1], [2, 1], [-1, -1]]
  47. last_stage: false
  48. feat2d: True
  49. nb_classes: 97
  50. strides: [[4, 4], [1, 1], [2, 1], [1, 1]]
  51. k_size: [[2, 2], [1, 1], [2, 1], [1, 1]]
  52. q_size: [4, 32]
  53. deform_stride: 2
  54. stage_idx: 2
  55. use_depthwise_unet: True
  56. use_more_unet: False
  57. binary_loss_type: BanlanceMultiClassCrossEntropyLoss
  58. mid_size: True
  59. d_embedding: 384
  60. Decoder:
  61. name: CAMDecoder
  62. num_encoder_layers: -1
  63. beam_size: 0
  64. num_decoder_layers: 2
  65. nhead: 8
  66. max_len: *max_text_length
  67. Loss:
  68. name: CAMLoss
  69. loss_weight_binary: 1.5
  70. label_smoothing: 0.
  71. Metric:
  72. name: RecMetric
  73. main_indicator: acc
  74. is_filter: True
  75. PostProcess:
  76. name: ARLabelDecode
  77. Train:
  78. dataset:
  79. name: LMDBDataSet
  80. data_dir: ../Union14M-L-LMDB-Filtered
  81. transforms:
  82. - DecodeImagePIL: # load image
  83. img_mode: RGB
  84. - PARSeqAugPIL:
  85. - CAMLabelEncode: # Class handling label
  86. font_path: ./arial.ttf
  87. image_shape: *img_shape
  88. - RecTVResize:
  89. image_shape: [64, 256]
  90. padding: False
  91. - KeepKeys:
  92. keep_keys: ['image', 'label', 'length', 'binary_mask'] # dataloader will return list in this order
  93. loader:
  94. shuffle: True
  95. batch_size_per_card: 256
  96. drop_last: True
  97. num_workers: 4
  98. Eval:
  99. dataset:
  100. name: LMDBDataSet
  101. data_dir: ../evaluation
  102. transforms:
  103. - DecodeImagePIL: # load image
  104. img_mode: RGB
  105. - ARLabelEncode: # Class handling label
  106. - RecTVResize:
  107. image_shape: [64, 256]
  108. padding: False
  109. - KeepKeys:
  110. keep_keys: ['image', 'label', 'length'] # dataloader will return list in this order
  111. loader:
  112. shuffle: False
  113. drop_last: False
  114. batch_size_per_card: 256
  115. num_workers: 2