auto_augment.py 35 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012
  1. """AutoAugment, RandAugment, AugMix, and 3-Augment for PyTorch.
  2. This code implements the searched ImageNet policies with various tweaks and improvements and
  3. does not include any of the search code.
  4. AA and RA Implementation adapted from:
  5. https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/autoaugment.py
  6. AugMix adapted from:
  7. https://github.com/google-research/augmix
  8. 3-Augment based on: https://github.com/facebookresearch/deit/blob/main/README_revenge.md
  9. Papers:
  10. AutoAugment: Learning Augmentation Policies from Data - https://arxiv.org/abs/1805.09501
  11. Learning Data Augmentation Strategies for Object Detection - https://arxiv.org/abs/1906.11172
  12. RandAugment: Practical automated data augmentation... - https://arxiv.org/abs/1909.13719
  13. AugMix: A Simple Data Processing Method to Improve Robustness and Uncertainty - https://arxiv.org/abs/1912.02781
  14. 3-Augment: DeiT III: Revenge of the ViT - https://arxiv.org/abs/2204.07118
  15. Hacked together by / Copyright 2019, Ross Wightman
  16. """
  17. import math
  18. import random
  19. import re
  20. from functools import partial
  21. from typing import Dict, List, Optional, Union
  22. import numpy as np
  23. import PIL
  24. from PIL import Image, ImageChops, ImageEnhance, ImageFilter, ImageOps
  25. _PIL_VER = tuple([int(x) for x in PIL.__version__.split('.')[:2]])
  26. _FILL = (128, 128, 128)
  27. _LEVEL_DENOM = 10. # denominator for conversion from 'Mx' magnitude scale to fractional aug level for op arguments
  28. _HPARAMS_DEFAULT = dict(
  29. translate_const=250,
  30. img_mean=_FILL,
  31. )
  32. if hasattr(Image, 'Resampling'):
  33. _RANDOM_INTERPOLATION = (Image.Resampling.BILINEAR,
  34. Image.Resampling.BICUBIC)
  35. _DEFAULT_INTERPOLATION = Image.Resampling.BICUBIC
  36. else:
  37. _RANDOM_INTERPOLATION = (Image.BILINEAR, Image.BICUBIC)
  38. _DEFAULT_INTERPOLATION = Image.BICUBIC
  39. def _interpolation(kwargs):
  40. interpolation = kwargs.pop('resample', _DEFAULT_INTERPOLATION)
  41. if isinstance(interpolation, (list, tuple)):
  42. return random.choice(interpolation)
  43. return interpolation
  44. def _check_args_tf(kwargs):
  45. if 'fillcolor' in kwargs and _PIL_VER < (5, 0):
  46. kwargs.pop('fillcolor')
  47. kwargs['resample'] = _interpolation(kwargs)
  48. def shear_x(img, factor, **kwargs):
  49. _check_args_tf(kwargs)
  50. return img.transform(img.size, Image.AFFINE, (1, factor, 0, 0, 1, 0),
  51. **kwargs)
  52. def shear_y(img, factor, **kwargs):
  53. _check_args_tf(kwargs)
  54. return img.transform(img.size, Image.AFFINE, (1, 0, 0, factor, 1, 0),
  55. **kwargs)
  56. def translate_x_rel(img, pct, **kwargs):
  57. pixels = pct * img.size[0]
  58. _check_args_tf(kwargs)
  59. return img.transform(img.size, Image.AFFINE, (1, 0, pixels, 0, 1, 0),
  60. **kwargs)
  61. def translate_y_rel(img, pct, **kwargs):
  62. pixels = pct * img.size[1]
  63. _check_args_tf(kwargs)
  64. return img.transform(img.size, Image.AFFINE, (1, 0, 0, 0, 1, pixels),
  65. **kwargs)
  66. def translate_x_abs(img, pixels, **kwargs):
  67. _check_args_tf(kwargs)
  68. return img.transform(img.size, Image.AFFINE, (1, 0, pixels, 0, 1, 0),
  69. **kwargs)
  70. def translate_y_abs(img, pixels, **kwargs):
  71. _check_args_tf(kwargs)
  72. return img.transform(img.size, Image.AFFINE, (1, 0, 0, 0, 1, pixels),
  73. **kwargs)
  74. def rotate(img, degrees, **kwargs):
  75. _check_args_tf(kwargs)
  76. if _PIL_VER >= (5, 2):
  77. return img.rotate(degrees, **kwargs)
  78. if _PIL_VER >= (5, 0):
  79. w, h = img.size
  80. post_trans = (0, 0)
  81. rotn_center = (w / 2.0, h / 2.0)
  82. angle = -math.radians(degrees)
  83. matrix = [
  84. round(math.cos(angle), 15),
  85. round(math.sin(angle), 15),
  86. 0.0,
  87. round(-math.sin(angle), 15),
  88. round(math.cos(angle), 15),
  89. 0.0,
  90. ]
  91. def transform(x, y, matrix):
  92. (a, b, c, d, e, f) = matrix
  93. return a * x + b * y + c, d * x + e * y + f
  94. matrix[2], matrix[5] = transform(-rotn_center[0] - post_trans[0],
  95. -rotn_center[1] - post_trans[1],
  96. matrix)
  97. matrix[2] += rotn_center[0]
  98. matrix[5] += rotn_center[1]
  99. return img.transform(img.size, Image.AFFINE, matrix, **kwargs)
  100. return img.rotate(degrees, resample=kwargs['resample'])
  101. def auto_contrast(img, **__):
  102. return ImageOps.autocontrast(img)
  103. def invert(img, **__):
  104. return ImageOps.invert(img)
  105. def equalize(img, **__):
  106. return ImageOps.equalize(img)
  107. def solarize(img, thresh, **__):
  108. return ImageOps.solarize(img, thresh)
  109. def solarize_add(img, add, thresh=128, **__):
  110. lut = []
  111. for i in range(256):
  112. if i < thresh:
  113. lut.append(min(255, i + add))
  114. else:
  115. lut.append(i)
  116. if img.mode in ('L', 'RGB'):
  117. if img.mode == 'RGB' and len(lut) == 256:
  118. lut = lut + lut + lut
  119. return img.point(lut)
  120. return img
  121. def posterize(img, bits_to_keep, **__):
  122. if bits_to_keep >= 8:
  123. return img
  124. return ImageOps.posterize(img, bits_to_keep)
  125. def contrast(img, factor, **__):
  126. return ImageEnhance.Contrast(img).enhance(factor)
  127. def color(img, factor, **__):
  128. return ImageEnhance.Color(img).enhance(factor)
  129. def brightness(img, factor, **__):
  130. return ImageEnhance.Brightness(img).enhance(factor)
  131. def sharpness(img, factor, **__):
  132. return ImageEnhance.Sharpness(img).enhance(factor)
  133. def gaussian_blur(img, factor, **__):
  134. img = img.filter(ImageFilter.GaussianBlur(radius=factor))
  135. return img
  136. def gaussian_blur_rand(img, factor, **__):
  137. radius_min = 0.1
  138. radius_max = 2.0
  139. img = img.filter(
  140. ImageFilter.GaussianBlur(radius=random.uniform(radius_min, radius_max *
  141. factor)))
  142. return img
  143. def desaturate(img, factor, **_):
  144. factor = min(1., max(0., 1. - factor))
  145. # enhance factor 0 = grayscale, 1.0 = no-change
  146. return ImageEnhance.Color(img).enhance(factor)
  147. def _randomly_negate(v):
  148. """With 50% prob, negate the value."""
  149. return -v if random.random() > 0.5 else v
  150. def _rotate_level_to_arg(level, _hparams):
  151. # range [-30, 30]
  152. level = (level / _LEVEL_DENOM) * 30.
  153. level = _randomly_negate(level)
  154. return level,
  155. def _enhance_level_to_arg(level, _hparams):
  156. # range [0.1, 1.9]
  157. return (level / _LEVEL_DENOM) * 1.8 + 0.1,
  158. def _enhance_increasing_level_to_arg(level, _hparams):
  159. # the 'no change' level is 1.0, moving away from that towards 0. or 2.0 increases the enhancement blend
  160. # range [0.1, 1.9] if level <= _LEVEL_DENOM
  161. level = (level / _LEVEL_DENOM) * .9
  162. level = max(0.1, 1.0 + _randomly_negate(level)) # keep it >= 0.1
  163. return level,
  164. def _minmax_level_to_arg(level, _hparams, min_val=0., max_val=1.0, clamp=True):
  165. level = (level / _LEVEL_DENOM)
  166. level = min_val + (max_val - min_val) * level
  167. if clamp:
  168. level = max(min_val, min(max_val, level))
  169. return level,
  170. def _shear_level_to_arg(level, _hparams):
  171. # range [-0.3, 0.3]
  172. level = (level / _LEVEL_DENOM) * 0.3
  173. level = _randomly_negate(level)
  174. return level,
  175. def _translate_abs_level_to_arg(level, hparams):
  176. translate_const = hparams['translate_const']
  177. level = (level / _LEVEL_DENOM) * float(translate_const)
  178. level = _randomly_negate(level)
  179. return level,
  180. def _translate_rel_level_to_arg(level, hparams):
  181. # default range [-0.45, 0.45]
  182. translate_pct = hparams.get('translate_pct', 0.45)
  183. level = (level / _LEVEL_DENOM) * translate_pct
  184. level = _randomly_negate(level)
  185. return level,
  186. def _posterize_level_to_arg(level, _hparams):
  187. # As per Tensorflow TPU EfficientNet impl
  188. # range [0, 4], 'keep 0 up to 4 MSB of original image'
  189. # intensity/severity of augmentation decreases with level
  190. return int((level / _LEVEL_DENOM) * 4),
  191. def _posterize_increasing_level_to_arg(level, hparams):
  192. # As per Tensorflow models research and UDA impl
  193. # range [4, 0], 'keep 4 down to 0 MSB of original image',
  194. # intensity/severity of augmentation increases with level
  195. return 4 - _posterize_level_to_arg(level, hparams)[0],
  196. def _posterize_original_level_to_arg(level, _hparams):
  197. # As per original AutoAugment paper description
  198. # range [4, 8], 'keep 4 up to 8 MSB of image'
  199. # intensity/severity of augmentation decreases with level
  200. return int((level / _LEVEL_DENOM) * 4) + 4,
  201. def _solarize_level_to_arg(level, _hparams):
  202. # range [0, 256]
  203. # intensity/severity of augmentation decreases with level
  204. return min(256, int((level / _LEVEL_DENOM) * 256)),
  205. def _solarize_increasing_level_to_arg(level, _hparams):
  206. # range [0, 256]
  207. # intensity/severity of augmentation increases with level
  208. return 256 - _solarize_level_to_arg(level, _hparams)[0],
  209. def _solarize_add_level_to_arg(level, _hparams):
  210. # range [0, 110]
  211. return min(128, int((level / _LEVEL_DENOM) * 110)),
  212. LEVEL_TO_ARG = {
  213. 'AutoContrast': None,
  214. 'Equalize': None,
  215. 'Invert': None,
  216. 'Rotate': _rotate_level_to_arg,
  217. # There are several variations of the posterize level scaling in various Tensorflow/Google repositories/papers
  218. 'Posterize': _posterize_level_to_arg,
  219. 'PosterizeIncreasing': _posterize_increasing_level_to_arg,
  220. 'PosterizeOriginal': _posterize_original_level_to_arg,
  221. 'Solarize': _solarize_level_to_arg,
  222. 'SolarizeIncreasing': _solarize_increasing_level_to_arg,
  223. 'SolarizeAdd': _solarize_add_level_to_arg,
  224. 'Color': _enhance_level_to_arg,
  225. 'ColorIncreasing': _enhance_increasing_level_to_arg,
  226. 'Contrast': _enhance_level_to_arg,
  227. 'ContrastIncreasing': _enhance_increasing_level_to_arg,
  228. 'Brightness': _enhance_level_to_arg,
  229. 'BrightnessIncreasing': _enhance_increasing_level_to_arg,
  230. 'Sharpness': _enhance_level_to_arg,
  231. 'SharpnessIncreasing': _enhance_increasing_level_to_arg,
  232. 'ShearX': _shear_level_to_arg,
  233. 'ShearY': _shear_level_to_arg,
  234. 'TranslateX': _translate_abs_level_to_arg,
  235. 'TranslateY': _translate_abs_level_to_arg,
  236. 'TranslateXRel': _translate_rel_level_to_arg,
  237. 'TranslateYRel': _translate_rel_level_to_arg,
  238. 'Desaturate': partial(_minmax_level_to_arg, min_val=0.5, max_val=1.0),
  239. 'GaussianBlur': partial(_minmax_level_to_arg, min_val=0.1, max_val=2.0),
  240. 'GaussianBlurRand': _minmax_level_to_arg,
  241. }
  242. NAME_TO_OP = {
  243. 'AutoContrast': auto_contrast,
  244. 'Equalize': equalize,
  245. 'Invert': invert,
  246. 'Rotate': rotate,
  247. 'Posterize': posterize,
  248. 'PosterizeIncreasing': posterize,
  249. 'PosterizeOriginal': posterize,
  250. 'Solarize': solarize,
  251. 'SolarizeIncreasing': solarize,
  252. 'SolarizeAdd': solarize_add,
  253. 'Color': color,
  254. 'ColorIncreasing': color,
  255. 'Contrast': contrast,
  256. 'ContrastIncreasing': contrast,
  257. 'Brightness': brightness,
  258. 'BrightnessIncreasing': brightness,
  259. 'Sharpness': sharpness,
  260. 'SharpnessIncreasing': sharpness,
  261. 'ShearX': shear_x,
  262. 'ShearY': shear_y,
  263. 'TranslateX': translate_x_abs,
  264. 'TranslateY': translate_y_abs,
  265. 'TranslateXRel': translate_x_rel,
  266. 'TranslateYRel': translate_y_rel,
  267. 'Desaturate': desaturate,
  268. 'GaussianBlur': gaussian_blur,
  269. 'GaussianBlurRand': gaussian_blur_rand,
  270. }
  271. class AugmentOp:
  272. def __init__(self, name, prob=0.5, magnitude=10, hparams=None):
  273. hparams = hparams or _HPARAMS_DEFAULT
  274. self.name = name
  275. self.aug_fn = NAME_TO_OP[name]
  276. self.level_fn = LEVEL_TO_ARG[name]
  277. self.prob = prob
  278. self.magnitude = magnitude
  279. self.hparams = hparams.copy()
  280. self.kwargs = dict(
  281. fillcolor=hparams['img_mean'] if 'img_mean' in hparams else _FILL,
  282. resample=hparams['interpolation']
  283. if 'interpolation' in hparams else _RANDOM_INTERPOLATION,
  284. )
  285. # If magnitude_std is > 0, we introduce some randomness
  286. # in the usually fixed policy and sample magnitude from a normal distribution
  287. # with mean `magnitude` and std-dev of `magnitude_std`.
  288. # NOTE This is my own hack, being tested, not in papers or reference impls.
  289. # If magnitude_std is inf, we sample magnitude from a uniform distribution
  290. self.magnitude_std = self.hparams.get('magnitude_std', 0)
  291. self.magnitude_max = self.hparams.get('magnitude_max', None)
  292. def __call__(self, img):
  293. if self.prob < 1.0 and random.random() > self.prob:
  294. return img
  295. magnitude = self.magnitude
  296. if self.magnitude_std > 0:
  297. # magnitude randomization enabled
  298. if self.magnitude_std == float('inf'):
  299. # inf == uniform sampling
  300. magnitude = random.uniform(0, magnitude)
  301. elif self.magnitude_std > 0:
  302. magnitude = random.gauss(magnitude, self.magnitude_std)
  303. # default upper_bound for the timm RA impl is _LEVEL_DENOM (10)
  304. # setting magnitude_max overrides this to allow M > 10 (behaviour closer to Google TF RA impl)
  305. upper_bound = self.magnitude_max or _LEVEL_DENOM
  306. magnitude = max(0., min(magnitude, upper_bound))
  307. level_args = self.level_fn(
  308. magnitude, self.hparams) if self.level_fn is not None else tuple()
  309. return self.aug_fn(img, *level_args, **self.kwargs)
  310. def __repr__(self):
  311. fs = self.__class__.__name__ + f'(name={self.name}, p={self.prob}'
  312. fs += f', m={self.magnitude}, mstd={self.magnitude_std}'
  313. if self.magnitude_max is not None:
  314. fs += f', mmax={self.magnitude_max}'
  315. fs += ')'
  316. return fs
  317. def auto_augment_policy_v0(hparams):
  318. # ImageNet v0 policy from TPU EfficientNet impl, cannot find a paper reference.
  319. policy = [
  320. [('Equalize', 0.8, 1), ('ShearY', 0.8, 4)],
  321. [('Color', 0.4, 9), ('Equalize', 0.6, 3)],
  322. [('Color', 0.4, 1), ('Rotate', 0.6, 8)],
  323. [('Solarize', 0.8, 3), ('Equalize', 0.4, 7)],
  324. [('Solarize', 0.4, 2), ('Solarize', 0.6, 2)],
  325. [('Color', 0.2, 0), ('Equalize', 0.8, 8)],
  326. [('Equalize', 0.4, 8), ('SolarizeAdd', 0.8, 3)],
  327. [('ShearX', 0.2, 9), ('Rotate', 0.6, 8)],
  328. [('Color', 0.6, 1), ('Equalize', 1.0, 2)],
  329. [('Invert', 0.4, 9), ('Rotate', 0.6, 0)],
  330. [('Equalize', 1.0, 9), ('ShearY', 0.6, 3)],
  331. [('Color', 0.4, 7), ('Equalize', 0.6, 0)],
  332. [('Posterize', 0.4, 6), ('AutoContrast', 0.4, 7)],
  333. [('Solarize', 0.6, 8), ('Color', 0.6, 9)],
  334. [('Solarize', 0.2, 4), ('Rotate', 0.8, 9)],
  335. [('Rotate', 1.0, 7), ('TranslateYRel', 0.8, 9)],
  336. [('ShearX', 0.0, 0), ('Solarize', 0.8, 4)],
  337. [('ShearY', 0.8, 0), ('Color', 0.6, 4)],
  338. [('Color', 1.0, 0), ('Rotate', 0.6, 2)],
  339. [('Equalize', 0.8, 4), ('Equalize', 0.0, 8)],
  340. [('Equalize', 1.0, 4), ('AutoContrast', 0.6, 2)],
  341. [('ShearY', 0.4, 7), ('SolarizeAdd', 0.6, 7)],
  342. [('Posterize', 0.8, 2), ('Solarize', 0.6, 10)
  343. ], # This results in black image with Tpu posterize
  344. [('Solarize', 0.6, 8), ('Equalize', 0.6, 1)],
  345. [('Color', 0.8, 6), ('Rotate', 0.4, 5)],
  346. ]
  347. pc = [[AugmentOp(*a, hparams=hparams) for a in sp] for sp in policy]
  348. return pc
  349. def auto_augment_policy_v0r(hparams):
  350. # ImageNet v0 policy from TPU EfficientNet impl, with variation of Posterize used
  351. # in Google research implementation (number of bits discarded increases with magnitude)
  352. policy = [
  353. [('Equalize', 0.8, 1), ('ShearY', 0.8, 4)],
  354. [('Color', 0.4, 9), ('Equalize', 0.6, 3)],
  355. [('Color', 0.4, 1), ('Rotate', 0.6, 8)],
  356. [('Solarize', 0.8, 3), ('Equalize', 0.4, 7)],
  357. [('Solarize', 0.4, 2), ('Solarize', 0.6, 2)],
  358. [('Color', 0.2, 0), ('Equalize', 0.8, 8)],
  359. [('Equalize', 0.4, 8), ('SolarizeAdd', 0.8, 3)],
  360. [('ShearX', 0.2, 9), ('Rotate', 0.6, 8)],
  361. [('Color', 0.6, 1), ('Equalize', 1.0, 2)],
  362. [('Invert', 0.4, 9), ('Rotate', 0.6, 0)],
  363. [('Equalize', 1.0, 9), ('ShearY', 0.6, 3)],
  364. [('Color', 0.4, 7), ('Equalize', 0.6, 0)],
  365. [('PosterizeIncreasing', 0.4, 6), ('AutoContrast', 0.4, 7)],
  366. [('Solarize', 0.6, 8), ('Color', 0.6, 9)],
  367. [('Solarize', 0.2, 4), ('Rotate', 0.8, 9)],
  368. [('Rotate', 1.0, 7), ('TranslateYRel', 0.8, 9)],
  369. [('ShearX', 0.0, 0), ('Solarize', 0.8, 4)],
  370. [('ShearY', 0.8, 0), ('Color', 0.6, 4)],
  371. [('Color', 1.0, 0), ('Rotate', 0.6, 2)],
  372. [('Equalize', 0.8, 4), ('Equalize', 0.0, 8)],
  373. [('Equalize', 1.0, 4), ('AutoContrast', 0.6, 2)],
  374. [('ShearY', 0.4, 7), ('SolarizeAdd', 0.6, 7)],
  375. [('PosterizeIncreasing', 0.8, 2), ('Solarize', 0.6, 10)],
  376. [('Solarize', 0.6, 8), ('Equalize', 0.6, 1)],
  377. [('Color', 0.8, 6), ('Rotate', 0.4, 5)],
  378. ]
  379. pc = [[AugmentOp(*a, hparams=hparams) for a in sp] for sp in policy]
  380. return pc
  381. def auto_augment_policy_original(hparams):
  382. # ImageNet policy from https://arxiv.org/abs/1805.09501
  383. policy = [
  384. [('PosterizeOriginal', 0.4, 8), ('Rotate', 0.6, 9)],
  385. [('Solarize', 0.6, 5), ('AutoContrast', 0.6, 5)],
  386. [('Equalize', 0.8, 8), ('Equalize', 0.6, 3)],
  387. [('PosterizeOriginal', 0.6, 7), ('PosterizeOriginal', 0.6, 6)],
  388. [('Equalize', 0.4, 7), ('Solarize', 0.2, 4)],
  389. [('Equalize', 0.4, 4), ('Rotate', 0.8, 8)],
  390. [('Solarize', 0.6, 3), ('Equalize', 0.6, 7)],
  391. [('PosterizeOriginal', 0.8, 5), ('Equalize', 1.0, 2)],
  392. [('Rotate', 0.2, 3), ('Solarize', 0.6, 8)],
  393. [('Equalize', 0.6, 8), ('PosterizeOriginal', 0.4, 6)],
  394. [('Rotate', 0.8, 8), ('Color', 0.4, 0)],
  395. [('Rotate', 0.4, 9), ('Equalize', 0.6, 2)],
  396. [('Equalize', 0.0, 7), ('Equalize', 0.8, 8)],
  397. [('Invert', 0.6, 4), ('Equalize', 1.0, 8)],
  398. [('Color', 0.6, 4), ('Contrast', 1.0, 8)],
  399. [('Rotate', 0.8, 8), ('Color', 1.0, 2)],
  400. [('Color', 0.8, 8), ('Solarize', 0.8, 7)],
  401. [('Sharpness', 0.4, 7), ('Invert', 0.6, 8)],
  402. [('ShearX', 0.6, 5), ('Equalize', 1.0, 9)],
  403. [('Color', 0.4, 0), ('Equalize', 0.6, 3)],
  404. [('Equalize', 0.4, 7), ('Solarize', 0.2, 4)],
  405. [('Solarize', 0.6, 5), ('AutoContrast', 0.6, 5)],
  406. [('Invert', 0.6, 4), ('Equalize', 1.0, 8)],
  407. [('Color', 0.6, 4), ('Contrast', 1.0, 8)],
  408. [('Equalize', 0.8, 8), ('Equalize', 0.6, 3)],
  409. ]
  410. pc = [[AugmentOp(*a, hparams=hparams) for a in sp] for sp in policy]
  411. return pc
  412. def auto_augment_policy_originalr(hparams):
  413. # ImageNet policy from https://arxiv.org/abs/1805.09501 with research posterize variation
  414. policy = [
  415. [('PosterizeIncreasing', 0.4, 8), ('Rotate', 0.6, 9)],
  416. [('Solarize', 0.6, 5), ('AutoContrast', 0.6, 5)],
  417. [('Equalize', 0.8, 8), ('Equalize', 0.6, 3)],
  418. [('PosterizeIncreasing', 0.6, 7), ('PosterizeIncreasing', 0.6, 6)],
  419. [('Equalize', 0.4, 7), ('Solarize', 0.2, 4)],
  420. [('Equalize', 0.4, 4), ('Rotate', 0.8, 8)],
  421. [('Solarize', 0.6, 3), ('Equalize', 0.6, 7)],
  422. [('PosterizeIncreasing', 0.8, 5), ('Equalize', 1.0, 2)],
  423. [('Rotate', 0.2, 3), ('Solarize', 0.6, 8)],
  424. [('Equalize', 0.6, 8), ('PosterizeIncreasing', 0.4, 6)],
  425. [('Rotate', 0.8, 8), ('Color', 0.4, 0)],
  426. [('Rotate', 0.4, 9), ('Equalize', 0.6, 2)],
  427. [('Equalize', 0.0, 7), ('Equalize', 0.8, 8)],
  428. [('Invert', 0.6, 4), ('Equalize', 1.0, 8)],
  429. [('Color', 0.6, 4), ('Contrast', 1.0, 8)],
  430. [('Rotate', 0.8, 8), ('Color', 1.0, 2)],
  431. [('Color', 0.8, 8), ('Solarize', 0.8, 7)],
  432. [('Sharpness', 0.4, 7), ('Invert', 0.6, 8)],
  433. [('ShearX', 0.6, 5), ('Equalize', 1.0, 9)],
  434. [('Color', 0.4, 0), ('Equalize', 0.6, 3)],
  435. [('Equalize', 0.4, 7), ('Solarize', 0.2, 4)],
  436. [('Solarize', 0.6, 5), ('AutoContrast', 0.6, 5)],
  437. [('Invert', 0.6, 4), ('Equalize', 1.0, 8)],
  438. [('Color', 0.6, 4), ('Contrast', 1.0, 8)],
  439. [('Equalize', 0.8, 8), ('Equalize', 0.6, 3)],
  440. ]
  441. pc = [[AugmentOp(*a, hparams=hparams) for a in sp] for sp in policy]
  442. return pc
  443. def auto_augment_policy_3a(hparams):
  444. policy = [
  445. [('Solarize', 1.0, 5)], # 128 solarize threshold @ 5 magnitude
  446. [('Desaturate', 1.0, 10)], # grayscale at 10 magnitude
  447. [('GaussianBlurRand', 1.0, 10)],
  448. ]
  449. pc = [[AugmentOp(*a, hparams=hparams) for a in sp] for sp in policy]
  450. return pc
  451. def auto_augment_policy(name='v0', hparams=None):
  452. hparams = hparams or _HPARAMS_DEFAULT
  453. if name == 'original':
  454. return auto_augment_policy_original(hparams)
  455. if name == 'originalr':
  456. return auto_augment_policy_originalr(hparams)
  457. if name == 'v0':
  458. return auto_augment_policy_v0(hparams)
  459. if name == 'v0r':
  460. return auto_augment_policy_v0r(hparams)
  461. if name == '3a':
  462. return auto_augment_policy_3a(hparams)
  463. assert False, f'Unknown AA policy {name}'
  464. class AutoAugment:
  465. def __init__(self, policy):
  466. self.policy = policy
  467. def __call__(self, img):
  468. sub_policy = random.choice(self.policy)
  469. for op in sub_policy:
  470. img = op(img)
  471. return img
  472. def __repr__(self):
  473. fs = self.__class__.__name__ + '(policy='
  474. for p in self.policy:
  475. fs += '\n\t['
  476. fs += ', '.join([str(op) for op in p])
  477. fs += ']'
  478. fs += ')'
  479. return fs
  480. def auto_augment_transform(config_str: str, hparams: Optional[Dict] = None):
  481. """Create a AutoAugment transform.
  482. Args:
  483. config_str: String defining configuration of auto augmentation. Consists of multiple sections separated by
  484. dashes ('-').
  485. The first section defines the AutoAugment policy (one of 'v0', 'v0r', 'original', 'originalr').
  486. The remaining sections:
  487. 'mstd' - float std deviation of magnitude noise applied
  488. Ex 'original-mstd0.5' results in AutoAugment with original policy, magnitude_std 0.5
  489. hparams: Other hparams (kwargs) for the AutoAugmentation scheme
  490. Returns:
  491. A PyTorch compatible Transform
  492. """
  493. config = config_str.split('-')
  494. policy_name = config[0]
  495. config = config[1:]
  496. for c in config:
  497. cs = re.split(r'(\d.*)', c)
  498. if len(cs) < 2:
  499. continue
  500. key, val = cs[:2]
  501. if key == 'mstd':
  502. # noise param injected via hparams for now
  503. hparams.setdefault('magnitude_std', float(val))
  504. else:
  505. assert False, 'Unknown AutoAugment config section'
  506. aa_policy = auto_augment_policy(policy_name, hparams=hparams)
  507. return AutoAugment(aa_policy)
  508. _RAND_TRANSFORMS = [
  509. 'AutoContrast',
  510. 'Equalize',
  511. 'Invert',
  512. 'Rotate',
  513. 'Posterize',
  514. 'Solarize',
  515. 'SolarizeAdd',
  516. 'Color',
  517. 'Contrast',
  518. 'Brightness',
  519. 'Sharpness',
  520. 'ShearX',
  521. 'ShearY',
  522. 'TranslateXRel',
  523. 'TranslateYRel',
  524. # 'Cutout' # NOTE I've implement this as random erasing separately
  525. ]
  526. _RAND_INCREASING_TRANSFORMS = [
  527. 'AutoContrast',
  528. 'Equalize',
  529. 'Invert',
  530. 'Rotate',
  531. 'PosterizeIncreasing',
  532. 'SolarizeIncreasing',
  533. 'SolarizeAdd',
  534. 'ColorIncreasing',
  535. 'ContrastIncreasing',
  536. 'BrightnessIncreasing',
  537. 'SharpnessIncreasing',
  538. 'ShearX',
  539. 'ShearY',
  540. 'TranslateXRel',
  541. 'TranslateYRel',
  542. # 'Cutout' # NOTE I've implement this as random erasing separately
  543. ]
  544. _RAND_3A = [
  545. 'SolarizeIncreasing',
  546. 'Desaturate',
  547. 'GaussianBlur',
  548. ]
  549. _RAND_WEIGHTED_3A = {
  550. 'SolarizeIncreasing': 6,
  551. 'Desaturate': 6,
  552. 'GaussianBlur': 6,
  553. 'Rotate': 3,
  554. 'ShearX': 2,
  555. 'ShearY': 2,
  556. 'PosterizeIncreasing': 1,
  557. 'AutoContrast': 1,
  558. 'ColorIncreasing': 1,
  559. 'SharpnessIncreasing': 1,
  560. 'ContrastIncreasing': 1,
  561. 'BrightnessIncreasing': 1,
  562. 'Equalize': 1,
  563. 'Invert': 1,
  564. }
  565. # These experimental weights are based loosely on the relative improvements mentioned in paper.
  566. # They may not result in increased performance, but could likely be tuned to so.
  567. _RAND_WEIGHTED_0 = {
  568. 'Rotate': 3,
  569. 'ShearX': 2,
  570. 'ShearY': 2,
  571. 'TranslateXRel': 1,
  572. 'TranslateYRel': 1,
  573. 'ColorIncreasing': .25,
  574. 'SharpnessIncreasing': 0.25,
  575. 'AutoContrast': 0.25,
  576. 'SolarizeIncreasing': .05,
  577. 'SolarizeAdd': .05,
  578. 'ContrastIncreasing': .05,
  579. 'BrightnessIncreasing': .05,
  580. 'Equalize': .05,
  581. 'PosterizeIncreasing': 0.05,
  582. 'Invert': 0.05,
  583. }
  584. def _get_weighted_transforms(transforms: Dict):
  585. transforms, probs = list(zip(*transforms.items()))
  586. probs = np.array(probs)
  587. probs = probs / np.sum(probs)
  588. return transforms, probs
  589. def rand_augment_choices(name: str, increasing=True):
  590. if name == 'weights':
  591. return _RAND_WEIGHTED_0
  592. if name == '3aw':
  593. return _RAND_WEIGHTED_3A
  594. if name == '3a':
  595. return _RAND_3A
  596. return _RAND_INCREASING_TRANSFORMS if increasing else _RAND_TRANSFORMS
  597. def rand_augment_ops(
  598. magnitude: Union[int, float] = 10,
  599. prob: float = 0.5,
  600. hparams: Optional[Dict] = None,
  601. transforms: Optional[Union[Dict, List]] = None,
  602. ):
  603. hparams = hparams or _HPARAMS_DEFAULT
  604. transforms = transforms or _RAND_TRANSFORMS
  605. return [
  606. AugmentOp(name, prob=prob, magnitude=magnitude, hparams=hparams)
  607. for name in transforms
  608. ]
  609. class RandAugment:
  610. def __init__(self, ops, num_layers=2, choice_weights=None):
  611. self.ops = ops
  612. self.num_layers = num_layers
  613. self.choice_weights = choice_weights
  614. def __call__(self, img):
  615. # no replacement when using weighted choice
  616. ops = np.random.choice(
  617. self.ops,
  618. self.num_layers,
  619. replace=self.choice_weights is None,
  620. p=self.choice_weights,
  621. )
  622. for op in ops:
  623. img = op(img)
  624. return img
  625. def __repr__(self):
  626. fs = self.__class__.__name__ + f'(n={self.num_layers}, ops='
  627. for op in self.ops:
  628. fs += f'\n\t{op}'
  629. fs += ')'
  630. return fs
  631. def rand_augment_transform(
  632. config_str: str,
  633. hparams: Optional[Dict] = None,
  634. transforms: Optional[Union[str, Dict, List]] = None,
  635. ):
  636. """Create a RandAugment transform.
  637. Args:
  638. config_str (str): String defining configuration of random augmentation. Consists of multiple sections separated
  639. by dashes ('-'). The first section defines the specific variant of rand augment (currently only 'rand').
  640. The remaining sections, not order sepecific determine
  641. 'm' - integer magnitude of rand augment
  642. 'n' - integer num layers (number of transform ops selected per image)
  643. 'p' - float probability of applying each layer (default 0.5)
  644. 'mstd' - float std deviation of magnitude noise applied, or uniform sampling if infinity (or > 100)
  645. 'mmax' - set upper bound for magnitude to something other than default of _LEVEL_DENOM (10)
  646. 'inc' - integer (bool), use augmentations that increase in severity with magnitude (default: 0)
  647. 't' - str name of transform set to use
  648. Ex 'rand-m9-n3-mstd0.5' results in RandAugment with magnitude 9, num_layers 3, magnitude_std 0.5
  649. 'rand-mstd1-tweights' results in mag std 1.0, weighted transforms, default mag of 10 and num_layers 2
  650. hparams (dict): Other hparams (kwargs) for the RandAugmentation scheme
  651. Returns:
  652. A PyTorch compatible Transform
  653. """
  654. magnitude = _LEVEL_DENOM # default to _LEVEL_DENOM for magnitude (currently 10)
  655. num_layers = 2 # default to 2 ops per image
  656. increasing = False
  657. prob = 0.5
  658. config = config_str.split('-')
  659. assert config[0] == 'rand'
  660. config = config[1:]
  661. for c in config:
  662. if c.startswith('t'):
  663. # NOTE old 'w' key was removed, 'w0' is not equivalent to 'tweights'
  664. val = str(c[1:])
  665. if transforms is None:
  666. transforms = val
  667. else:
  668. # numeric options
  669. cs = re.split(r'(\d.*)', c)
  670. if len(cs) < 2:
  671. continue
  672. key, val = cs[:2]
  673. if key == 'mstd':
  674. # noise param / randomization of magnitude values
  675. mstd = float(val)
  676. if mstd > 100:
  677. # use uniform sampling in 0 to magnitude if mstd is > 100
  678. mstd = float('inf')
  679. hparams.setdefault('magnitude_std', mstd)
  680. elif key == 'mmax':
  681. # clip magnitude between [0, mmax] instead of default [0, _LEVEL_DENOM]
  682. hparams.setdefault('magnitude_max', int(val))
  683. elif key == 'inc':
  684. if bool(val):
  685. increasing = True
  686. elif key == 'm':
  687. magnitude = int(val)
  688. elif key == 'n':
  689. num_layers = int(val)
  690. elif key == 'p':
  691. prob = float(val)
  692. else:
  693. assert False, 'Unknown RandAugment config section'
  694. if isinstance(transforms, str):
  695. transforms = rand_augment_choices(transforms, increasing=increasing)
  696. elif transforms is None:
  697. transforms = _RAND_INCREASING_TRANSFORMS if increasing else _RAND_TRANSFORMS
  698. choice_weights = None
  699. if isinstance(transforms, Dict):
  700. transforms, choice_weights = _get_weighted_transforms(transforms)
  701. ra_ops = rand_augment_ops(magnitude=magnitude,
  702. prob=prob,
  703. hparams=hparams,
  704. transforms=transforms)
  705. return RandAugment(ra_ops, num_layers, choice_weights=choice_weights)
  706. _AUGMIX_TRANSFORMS = [
  707. 'AutoContrast',
  708. 'ColorIncreasing', # not in paper
  709. 'ContrastIncreasing', # not in paper
  710. 'BrightnessIncreasing', # not in paper
  711. 'SharpnessIncreasing', # not in paper
  712. 'Equalize',
  713. 'Rotate',
  714. 'PosterizeIncreasing',
  715. 'SolarizeIncreasing',
  716. 'ShearX',
  717. 'ShearY',
  718. 'TranslateXRel',
  719. 'TranslateYRel',
  720. ]
  721. def augmix_ops(
  722. magnitude: Union[int, float] = 10,
  723. hparams: Optional[Dict] = None,
  724. transforms: Optional[Union[str, Dict, List]] = None,
  725. ):
  726. hparams = hparams or _HPARAMS_DEFAULT
  727. transforms = transforms or _AUGMIX_TRANSFORMS
  728. return [
  729. AugmentOp(name, prob=1.0, magnitude=magnitude, hparams=hparams)
  730. for name in transforms
  731. ]
  732. class AugMixAugment:
  733. """AugMix Transform Adapted and improved from impl here:
  734. https://github.com/google-research/augmix/blob/master/imagenet.py From
  735. paper: 'AugMix: A Simple Data Processing Method to Improve Robustness and
  736. Uncertainty - https://arxiv.org/abs/1912.02781."""
  737. def __init__(self, ops, alpha=1., width=3, depth=-1, blended=False):
  738. self.ops = ops
  739. self.alpha = alpha
  740. self.width = width
  741. self.depth = depth
  742. self.blended = blended # blended mode is faster but not well tested
  743. def _calc_blended_weights(self, ws, m):
  744. ws = ws * m
  745. cump = 1.
  746. rws = []
  747. for w in ws[::-1]:
  748. alpha = w / cump
  749. cump *= (1 - alpha)
  750. rws.append(alpha)
  751. return np.array(rws[::-1], dtype=np.float32)
  752. def _apply_blended(self, img, mixing_weights, m):
  753. # This is my first crack and implementing a slightly faster mixed augmentation. Instead
  754. # of accumulating the mix for each chain in a Numpy array and then blending with original,
  755. # it recomputes the blending coefficients and applies one PIL image blend per chain.
  756. # TODO the results appear in the right ballpark but they differ by more than rounding.
  757. img_orig = img.copy()
  758. ws = self._calc_blended_weights(mixing_weights, m)
  759. for w in ws:
  760. depth = self.depth if self.depth > 0 else np.random.randint(1, 4)
  761. ops = np.random.choice(self.ops, depth, replace=True)
  762. img_aug = img_orig # no ops are in-place, deep copy not necessary
  763. for op in ops:
  764. img_aug = op(img_aug)
  765. img = Image.blend(img, img_aug, w)
  766. return img
  767. def _apply_basic(self, img, mixing_weights, m):
  768. # This is a literal adaptation of the paper/official implementation without normalizations and
  769. # PIL <-> Numpy conversions between every op. It is still quite CPU compute heavy compared to the
  770. # typical augmentation transforms, could use a GPU / Kornia implementation.
  771. img_shape = img.size[0], img.size[1], len(img.getbands())
  772. mixed = np.zeros(img_shape, dtype=np.float32)
  773. for mw in mixing_weights:
  774. depth = self.depth if self.depth > 0 else np.random.randint(1, 4)
  775. ops = np.random.choice(self.ops, depth, replace=True)
  776. img_aug = img # no ops are in-place, deep copy not necessary
  777. for op in ops:
  778. img_aug = op(img_aug)
  779. mixed += mw * np.asarray(img_aug, dtype=np.float32)
  780. np.clip(mixed, 0, 255., out=mixed)
  781. mixed = Image.fromarray(mixed.astype(np.uint8))
  782. return Image.blend(img, mixed, m)
  783. def __call__(self, img):
  784. mixing_weights = np.float32(
  785. np.random.dirichlet([self.alpha] * self.width))
  786. m = np.float32(np.random.beta(self.alpha, self.alpha))
  787. if self.blended:
  788. mixed = self._apply_blended(img, mixing_weights, m)
  789. else:
  790. mixed = self._apply_basic(img, mixing_weights, m)
  791. return mixed
  792. def __repr__(self):
  793. fs = self.__class__.__name__ + f'(alpha={self.alpha}, width={self.width}, depth={self.depth}, ops='
  794. for op in self.ops:
  795. fs += f'\n\t{op}'
  796. fs += ')'
  797. return fs
  798. def augment_and_mix_transform(config_str: str, hparams: Optional[Dict] = None):
  799. """Create AugMix PyTorch transform.
  800. Args:
  801. config_str (str): String defining configuration of random augmentation. Consists of multiple sections separated
  802. by dashes ('-'). The first section defines the specific variant of rand augment (currently only 'rand').
  803. The remaining sections, not order sepecific determine
  804. 'm' - integer magnitude (severity) of augmentation mix (default: 3)
  805. 'w' - integer width of augmentation chain (default: 3)
  806. 'd' - integer depth of augmentation chain (-1 is random [1, 3], default: -1)
  807. 'b' - integer (bool), blend each branch of chain into end result without a final blend, less CPU (default: 0)
  808. 'mstd' - float std deviation of magnitude noise applied (default: 0)
  809. Ex 'augmix-m5-w4-d2' results in AugMix with severity 5, chain width 4, chain depth 2
  810. hparams: Other hparams (kwargs) for the Augmentation transforms
  811. Returns:
  812. A PyTorch compatible Transform
  813. """
  814. magnitude = 3
  815. width = 3
  816. depth = -1
  817. alpha = 1.
  818. blended = False
  819. config = config_str.split('-')
  820. assert config[0] == 'augmix'
  821. config = config[1:]
  822. for c in config:
  823. cs = re.split(r'(\d.*)', c)
  824. if len(cs) < 2:
  825. continue
  826. key, val = cs[:2]
  827. if key == 'mstd':
  828. # noise param injected via hparams for now
  829. hparams.setdefault('magnitude_std', float(val))
  830. elif key == 'm':
  831. magnitude = int(val)
  832. elif key == 'w':
  833. width = int(val)
  834. elif key == 'd':
  835. depth = int(val)
  836. elif key == 'a':
  837. alpha = float(val)
  838. elif key == 'b':
  839. blended = bool(val)
  840. else:
  841. assert False, 'Unknown AugMix config section'
  842. hparams.setdefault(
  843. 'magnitude_std',
  844. float('inf')) # default to uniform sampling (if not set via mstd arg)
  845. ops = augmix_ops(magnitude=magnitude, hparams=hparams)
  846. return AugMixAugment(ops,
  847. alpha=alpha,
  848. width=width,
  849. depth=depth,
  850. blended=blended)