svtrv2_visionlan_LF_1.yml 3.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111
  1. Global:
  2. device: gpu
  3. epoch_num: 10
  4. log_smooth_window: 20
  5. print_batch_step: 10
  6. output_dir: ./output/rec/u14m_filter/svtrv2_visionlan_LF1/
  7. eval_epoch_step: [0, 1]
  8. eval_batch_step: [0, 500]
  9. cal_metric_during_train: True
  10. pretrained_model:
  11. checkpoints:
  12. use_tensorboard: false
  13. infer_img:
  14. # for data or label process
  15. character_dict_path: &character_dict_path ./tools/utils/EN_symbol_dict.txt # 96en
  16. # ./tools/utils/ppocr_keys_v1.txt # ch
  17. max_text_length: &max_text_length 25
  18. use_space_char: &use_space_char False
  19. save_res_path: ./output/rec/u14m_filter/predicts_svtrv2_visionlan_LF1.txt
  20. grad_clip_val: 20
  21. use_amp: True
  22. Optimizer:
  23. name: AdamW
  24. lr: 0.00065 # for 4gpus bs256/gpu
  25. weight_decay: 0.05
  26. filter_bias_and_bn: True
  27. LRScheduler:
  28. name: OneCycleLR
  29. warmup_epoch: 1.5 # pct_start 0.075*20 = 1.5ep
  30. cycle_momentum: False
  31. Architecture:
  32. model_type: rec
  33. algorithm: VisionLAN
  34. Transform:
  35. Encoder:
  36. name: SVTRv2LNConvTwo33
  37. use_pos_embed: False
  38. dims: [128, 256, 384]
  39. depths: [6, 6, 6]
  40. num_heads: [4, 8, 12]
  41. mixer: [['Conv','Conv','Conv','Conv','Conv','Conv'],['Conv','Conv','FGlobal','Global','Global','Global'],['Global','Global','Global','Global','Global','Global']]
  42. local_k: [[5, 5], [5, 5], [-1, -1]]
  43. sub_k: [[1, 1], [2, 1], [-1, -1]]
  44. last_stage: false
  45. feat2d: True
  46. Decoder:
  47. name: VisionLANDecoder
  48. training_step: &training_step 'LF_1'
  49. n_position: 128
  50. Loss:
  51. name: VisionLANLoss
  52. training_step: *training_step
  53. PostProcess:
  54. name: VisionLANLabelDecode
  55. character_dict_path: *character_dict_path
  56. use_space_char: *use_space_char
  57. Metric:
  58. name: RecMetric
  59. main_indicator: acc
  60. is_filter: True
  61. Train:
  62. dataset:
  63. name: LMDBDataSet
  64. data_dir: ../Union14M-L-LMDB-Filtered
  65. transforms:
  66. - DecodeImagePIL: # load image
  67. img_mode: RGB
  68. - PARSeqAugPIL:
  69. - VisionLANLabelEncode:
  70. character_dict_path: *character_dict_path
  71. use_space_char: *use_space_char
  72. max_text_length: *max_text_length
  73. - RecTVResize:
  74. image_shape: [32, 128]
  75. padding: False
  76. - KeepKeys:
  77. keep_keys: ['image', 'label', 'label_res', 'label_sub', 'label_id', 'length'] # dataloader will return list in this order
  78. loader:
  79. shuffle: True
  80. batch_size_per_card: 256
  81. drop_last: True
  82. num_workers: 4
  83. Eval:
  84. dataset:
  85. name: LMDBDataSet
  86. data_dir: ../evaluation
  87. transforms:
  88. - DecodeImagePIL: # load image
  89. img_mode: RGB
  90. - VisionLANLabelEncode:
  91. character_dict_path: *character_dict_path
  92. use_space_char: *use_space_char
  93. max_text_length: *max_text_length
  94. - RecTVResize:
  95. image_shape: [32, 128]
  96. padding: False
  97. - KeepKeys:
  98. keep_keys: ['image', 'label', 'label_res', 'label_sub', 'label_id', 'length'] # dataloader will return list in this order
  99. loader:
  100. shuffle: False
  101. drop_last: False
  102. batch_size_per_card: 256
  103. num_workers: 2