abinet_aug.py 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473
  1. """This code is refer from:
  2. https://github.com/FangShancheng/ABINet/blob/main/transforms.py
  3. """
  4. import math
  5. import numbers
  6. import random
  7. import cv2
  8. import numpy as np
  9. from PIL import Image
  10. from torchvision.transforms import ColorJitter, Compose
  11. def sample_asym(magnitude, size=None):
  12. return np.random.beta(1, 4, size) * magnitude
  13. def sample_sym(magnitude, size=None):
  14. return (np.random.beta(4, 4, size=size) - 0.5) * 2 * magnitude
  15. def sample_uniform(low, high, size=None):
  16. return np.random.uniform(low, high, size=size)
  17. def get_interpolation(type='random'):
  18. if type == 'random':
  19. choice = [
  20. cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_CUBIC,
  21. cv2.INTER_AREA
  22. ]
  23. interpolation = choice[random.randint(0, len(choice) - 1)]
  24. elif type == 'nearest':
  25. interpolation = cv2.INTER_NEAREST
  26. elif type == 'linear':
  27. interpolation = cv2.INTER_LINEAR
  28. elif type == 'cubic':
  29. interpolation = cv2.INTER_CUBIC
  30. elif type == 'area':
  31. interpolation = cv2.INTER_AREA
  32. else:
  33. raise TypeError(
  34. 'Interpolation types only nearest, linear, cubic, area are supported!'
  35. )
  36. return interpolation
  37. class CVRandomRotation(object):
  38. def __init__(self, degrees=15):
  39. assert isinstance(degrees,
  40. numbers.Number), 'degree should be a single number.'
  41. assert degrees >= 0, 'degree must be positive.'
  42. self.degrees = degrees
  43. @staticmethod
  44. def get_params(degrees):
  45. return sample_sym(degrees)
  46. def __call__(self, img):
  47. angle = self.get_params(self.degrees)
  48. src_h, src_w = img.shape[:2]
  49. M = cv2.getRotationMatrix2D(center=(src_w / 2, src_h / 2),
  50. angle=angle,
  51. scale=1.0)
  52. abs_cos, abs_sin = abs(M[0, 0]), abs(M[0, 1])
  53. dst_w = int(src_h * abs_sin + src_w * abs_cos)
  54. dst_h = int(src_h * abs_cos + src_w * abs_sin)
  55. M[0, 2] += (dst_w - src_w) / 2
  56. M[1, 2] += (dst_h - src_h) / 2
  57. flags = get_interpolation()
  58. return cv2.warpAffine(img,
  59. M, (dst_w, dst_h),
  60. flags=flags,
  61. borderMode=cv2.BORDER_REPLICATE)
  62. class CVRandomAffine(object):
  63. def __init__(self, degrees, translate=None, scale=None, shear=None):
  64. assert isinstance(degrees,
  65. numbers.Number), 'degree should be a single number.'
  66. assert degrees >= 0, 'degree must be positive.'
  67. self.degrees = degrees
  68. if translate is not None:
  69. assert (
  70. isinstance(translate, (tuple, list)) and len(translate) == 2
  71. ), 'translate should be a list or tuple and it must be of length 2.'
  72. for t in translate:
  73. if not (0.0 <= t <= 1.0):
  74. raise ValueError(
  75. 'translation values should be between 0 and 1')
  76. self.translate = translate
  77. if scale is not None:
  78. assert (
  79. isinstance(scale, (tuple, list)) and len(scale) == 2
  80. ), 'scale should be a list or tuple and it must be of length 2.'
  81. for s in scale:
  82. if s <= 0:
  83. raise ValueError('scale values should be positive')
  84. self.scale = scale
  85. if shear is not None:
  86. if isinstance(shear, numbers.Number):
  87. if shear < 0:
  88. raise ValueError(
  89. 'If shear is a single number, it must be positive.')
  90. self.shear = [shear]
  91. else:
  92. assert isinstance(shear, (tuple, list)) and (
  93. len(shear) == 2
  94. ), 'shear should be a list or tuple and it must be of length 2.'
  95. self.shear = shear
  96. else:
  97. self.shear = shear
  98. def _get_inverse_affine_matrix(self, center, angle, translate, scale,
  99. shear):
  100. # https://github.com/pytorch/vision/blob/v0.4.0/torchvision/transforms/functional.py#L717
  101. from numpy import cos, sin, tan
  102. if isinstance(shear, numbers.Number):
  103. shear = [shear, 0]
  104. if not isinstance(shear, (tuple, list)) and len(shear) == 2:
  105. raise ValueError(
  106. 'Shear should be a single value or a tuple/list containing ' +
  107. 'two values. Got {}'.format(shear))
  108. rot = math.radians(angle)
  109. sx, sy = [math.radians(s) for s in shear]
  110. cx, cy = center
  111. tx, ty = translate
  112. # RSS without scaling
  113. a = cos(rot - sy) / cos(sy)
  114. b = -cos(rot - sy) * tan(sx) / cos(sy) - sin(rot)
  115. c = sin(rot - sy) / cos(sy)
  116. d = -sin(rot - sy) * tan(sx) / cos(sy) + cos(rot)
  117. # Inverted rotation matrix with scale and shear
  118. # det([[a, b], [c, d]]) == 1, since det(rotation) = 1 and det(shear) = 1
  119. M = [d, -b, 0, -c, a, 0]
  120. M = [x / scale for x in M]
  121. # Apply inverse of translation and of center translation: RSS^-1 * C^-1 * T^-1
  122. M[2] += M[0] * (-cx - tx) + M[1] * (-cy - ty)
  123. M[5] += M[3] * (-cx - tx) + M[4] * (-cy - ty)
  124. # Apply center translation: C * RSS^-1 * C^-1 * T^-1
  125. M[2] += cx
  126. M[5] += cy
  127. return M
  128. @staticmethod
  129. def get_params(degrees, translate, scale_ranges, shears, height):
  130. angle = sample_sym(degrees)
  131. if translate is not None:
  132. max_dx = translate[0] * height
  133. max_dy = translate[1] * height
  134. translations = (np.round(sample_sym(max_dx)),
  135. np.round(sample_sym(max_dy)))
  136. else:
  137. translations = (0, 0)
  138. if scale_ranges is not None:
  139. scale = sample_uniform(scale_ranges[0], scale_ranges[1])
  140. else:
  141. scale = 1.0
  142. if shears is not None:
  143. if len(shears) == 1:
  144. shear = [sample_sym(shears[0]), 0.0]
  145. elif len(shears) == 2:
  146. shear = [sample_sym(shears[0]), sample_sym(shears[1])]
  147. else:
  148. shear = 0.0
  149. return angle, translations, scale, shear
  150. def __call__(self, img):
  151. src_h, src_w = img.shape[:2]
  152. angle, translate, scale, shear = self.get_params(
  153. self.degrees, self.translate, self.scale, self.shear, src_h)
  154. M = self._get_inverse_affine_matrix((src_w / 2, src_h / 2), angle,
  155. (0, 0), scale, shear)
  156. M = np.array(M).reshape(2, 3)
  157. startpoints = [(0, 0), (src_w - 1, 0), (src_w - 1, src_h - 1),
  158. (0, src_h - 1)]
  159. project = lambda x, y, a, b, c: int(a * x + b * y + c)
  160. endpoints = [(project(x, y, *M[0]), project(x, y, *M[1]))
  161. for x, y in startpoints]
  162. rect = cv2.minAreaRect(np.array(endpoints))
  163. bbox = cv2.boxPoints(rect).astype(dtype=np.int32)
  164. max_x, max_y = bbox[:, 0].max(), bbox[:, 1].max()
  165. min_x, min_y = bbox[:, 0].min(), bbox[:, 1].min()
  166. dst_w = int(max_x - min_x)
  167. dst_h = int(max_y - min_y)
  168. M[0, 2] += (dst_w - src_w) / 2
  169. M[1, 2] += (dst_h - src_h) / 2
  170. # add translate
  171. dst_w += int(abs(translate[0]))
  172. dst_h += int(abs(translate[1]))
  173. if translate[0] < 0:
  174. M[0, 2] += abs(translate[0])
  175. if translate[1] < 0:
  176. M[1, 2] += abs(translate[1])
  177. flags = get_interpolation()
  178. return cv2.warpAffine(img,
  179. M, (dst_w, dst_h),
  180. flags=flags,
  181. borderMode=cv2.BORDER_REPLICATE)
  182. class CVRandomPerspective(object):
  183. def __init__(self, distortion=0.5):
  184. self.distortion = distortion
  185. def get_params(self, width, height, distortion):
  186. offset_h = sample_asym(distortion * height / 2,
  187. size=4).astype(dtype=np.int32)
  188. offset_w = sample_asym(distortion * width / 2,
  189. size=4).astype(dtype=np.int32)
  190. topleft = (offset_w[0], offset_h[0])
  191. topright = (width - 1 - offset_w[1], offset_h[1])
  192. botright = (width - 1 - offset_w[2], height - 1 - offset_h[2])
  193. botleft = (offset_w[3], height - 1 - offset_h[3])
  194. startpoints = [(0, 0), (width - 1, 0), (width - 1, height - 1),
  195. (0, height - 1)]
  196. endpoints = [topleft, topright, botright, botleft]
  197. return np.array(startpoints,
  198. dtype=np.float32), np.array(endpoints,
  199. dtype=np.float32)
  200. def __call__(self, img):
  201. height, width = img.shape[:2]
  202. startpoints, endpoints = self.get_params(width, height,
  203. self.distortion)
  204. M = cv2.getPerspectiveTransform(startpoints, endpoints)
  205. # TODO: more robust way to crop image
  206. rect = cv2.minAreaRect(endpoints)
  207. bbox = cv2.boxPoints(rect).astype(dtype=np.int32)
  208. max_x, max_y = bbox[:, 0].max(), bbox[:, 1].max()
  209. min_x, min_y = bbox[:, 0].min(), bbox[:, 1].min()
  210. min_x, min_y = max(min_x, 0), max(min_y, 0)
  211. flags = get_interpolation()
  212. img = cv2.warpPerspective(img,
  213. M, (max_x, max_y),
  214. flags=flags,
  215. borderMode=cv2.BORDER_REPLICATE)
  216. img = img[min_y:, min_x:]
  217. return img
  218. class CVRescale(object):
  219. def __init__(self, factor=4, base_size=(128, 512)):
  220. """Define image scales using gaussian pyramid and rescale image to
  221. target scale.
  222. Args:
  223. factor: the decayed factor from base size, factor=4 keeps target scale by default.
  224. base_size: base size the build the bottom layer of pyramid
  225. """
  226. if isinstance(factor, numbers.Number):
  227. self.factor = round(sample_uniform(0, factor))
  228. elif isinstance(factor, (tuple, list)) and len(factor) == 2:
  229. self.factor = round(sample_uniform(factor[0], factor[1]))
  230. else:
  231. raise Exception('factor must be number or list with length 2')
  232. # assert factor is valid
  233. self.base_h, self.base_w = base_size[:2]
  234. def __call__(self, img):
  235. if self.factor == 0:
  236. return img
  237. src_h, src_w = img.shape[:2]
  238. cur_w, cur_h = self.base_w, self.base_h
  239. scale_img = cv2.resize(img, (cur_w, cur_h),
  240. interpolation=get_interpolation())
  241. for _ in range(self.factor):
  242. scale_img = cv2.pyrDown(scale_img)
  243. scale_img = cv2.resize(scale_img, (src_w, src_h),
  244. interpolation=get_interpolation())
  245. return scale_img
  246. class CVGaussianNoise(object):
  247. def __init__(self, mean=0, var=20):
  248. self.mean = mean
  249. if isinstance(var, numbers.Number):
  250. self.var = max(int(sample_asym(var)), 1)
  251. elif isinstance(var, (tuple, list)) and len(var) == 2:
  252. self.var = int(sample_uniform(var[0], var[1]))
  253. else:
  254. raise Exception('degree must be number or list with length 2')
  255. def __call__(self, img):
  256. noise = np.random.normal(self.mean, self.var**0.5, img.shape)
  257. img = np.clip(img + noise, 0, 255).astype(np.uint8)
  258. return img
  259. class CVMotionBlur(object):
  260. def __init__(self, degrees=12, angle=90):
  261. if isinstance(degrees, numbers.Number):
  262. self.degree = max(int(sample_asym(degrees)), 1)
  263. elif isinstance(degrees, (tuple, list)) and len(degrees) == 2:
  264. self.degree = int(sample_uniform(degrees[0], degrees[1]))
  265. else:
  266. raise Exception('degree must be number or list with length 2')
  267. self.angle = sample_uniform(-angle, angle)
  268. def __call__(self, img):
  269. M = cv2.getRotationMatrix2D((self.degree // 2, self.degree // 2),
  270. self.angle, 1)
  271. motion_blur_kernel = np.zeros((self.degree, self.degree))
  272. motion_blur_kernel[self.degree // 2, :] = 1
  273. motion_blur_kernel = cv2.warpAffine(motion_blur_kernel, M,
  274. (self.degree, self.degree))
  275. motion_blur_kernel = motion_blur_kernel / self.degree
  276. img = cv2.filter2D(img, -1, motion_blur_kernel)
  277. img = np.clip(img, 0, 255).astype(np.uint8)
  278. return img
  279. class CVGeometry(object):
  280. def __init__(
  281. self,
  282. degrees=15,
  283. translate=(0.3, 0.3),
  284. scale=(0.5, 2.0),
  285. shear=(45, 15),
  286. distortion=0.5,
  287. p=0.5,
  288. ):
  289. self.p = p
  290. type_p = random.random()
  291. if type_p < 0.33:
  292. self.transforms = CVRandomRotation(degrees=degrees)
  293. elif type_p < 0.66:
  294. self.transforms = CVRandomAffine(degrees=degrees,
  295. translate=translate,
  296. scale=scale,
  297. shear=shear)
  298. else:
  299. self.transforms = CVRandomPerspective(distortion=distortion)
  300. def __call__(self, img):
  301. if random.random() < self.p:
  302. return self.transforms(img)
  303. else:
  304. return img
  305. class CVDeterioration(object):
  306. def __init__(self, var, degrees, factor, p=0.5):
  307. self.p = p
  308. transforms = []
  309. if var is not None:
  310. transforms.append(CVGaussianNoise(var=var))
  311. if degrees is not None:
  312. transforms.append(CVMotionBlur(degrees=degrees))
  313. if factor is not None:
  314. transforms.append(CVRescale(factor=factor))
  315. random.shuffle(transforms)
  316. transforms = Compose(transforms)
  317. self.transforms = transforms
  318. def __call__(self, img):
  319. if random.random() < self.p:
  320. return self.transforms(img)
  321. else:
  322. return img
  323. class CVColorJitter(object):
  324. def __init__(self,
  325. brightness=0.5,
  326. contrast=0.5,
  327. saturation=0.5,
  328. hue=0.1,
  329. p=0.5):
  330. self.p = p
  331. self.transforms = ColorJitter(brightness=brightness,
  332. contrast=contrast,
  333. saturation=saturation,
  334. hue=hue)
  335. def __call__(self, img):
  336. if random.random() < self.p:
  337. return np.array(self.transforms(Image.fromarray(img)))
  338. else:
  339. return img
  340. class SVTRDeterioration(object):
  341. def __init__(self, var, degrees, factor, p=0.5):
  342. self.p = p
  343. transforms = []
  344. if var is not None:
  345. transforms.append(CVGaussianNoise(var=var))
  346. if degrees is not None:
  347. transforms.append(CVMotionBlur(degrees=degrees))
  348. if factor is not None:
  349. transforms.append(CVRescale(factor=factor))
  350. self.transforms = transforms
  351. def __call__(self, img):
  352. if random.random() < self.p:
  353. random.shuffle(self.transforms)
  354. transforms = Compose(self.transforms)
  355. return transforms(img)
  356. else:
  357. return img
  358. class SVTRGeometry(object):
  359. def __init__(
  360. self,
  361. aug_type=0,
  362. degrees=15,
  363. translate=(0.3, 0.3),
  364. scale=(0.5, 2.0),
  365. shear=(45, 15),
  366. distortion=0.5,
  367. p=0.5,
  368. ):
  369. self.aug_type = aug_type
  370. self.p = p
  371. self.transforms = []
  372. self.transforms.append(CVRandomRotation(degrees=degrees))
  373. self.transforms.append(
  374. CVRandomAffine(degrees=degrees,
  375. translate=translate,
  376. scale=scale,
  377. shear=shear))
  378. self.transforms.append(CVRandomPerspective(distortion=distortion))
  379. def __call__(self, img):
  380. if random.random() < self.p:
  381. if self.aug_type:
  382. random.shuffle(self.transforms)
  383. transforms = Compose(self.transforms[:random.randint(1, 3)])
  384. img = transforms(img)
  385. else:
  386. img = self.transforms[random.randint(0, 2)](img)
  387. return img
  388. else:
  389. return img