aster_tps.py 9.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262
  1. import itertools
  2. import math
  3. import numpy as np
  4. import torch
  5. from torch import nn
  6. from torch.nn import functional as F
  7. def conv3x3_block(in_planes, out_planes, stride=1):
  8. """3x3 convolution with padding."""
  9. conv_layer = nn.Conv2d(in_planes,
  10. out_planes,
  11. kernel_size=3,
  12. stride=1,
  13. padding=1)
  14. block = nn.Sequential(
  15. conv_layer,
  16. nn.BatchNorm2d(out_planes),
  17. nn.ReLU(inplace=True),
  18. )
  19. return block
  20. class STNHead(nn.Module):
  21. def __init__(self, in_planes, num_ctrlpoints, activation='none'):
  22. super(STNHead, self).__init__()
  23. self.in_planes = in_planes
  24. self.num_ctrlpoints = num_ctrlpoints
  25. self.activation = activation
  26. self.stn_convnet = nn.Sequential(
  27. conv3x3_block(in_planes, 32), # 32*64
  28. nn.MaxPool2d(kernel_size=2, stride=2),
  29. conv3x3_block(32, 64), # 16*32
  30. nn.MaxPool2d(kernel_size=2, stride=2),
  31. conv3x3_block(64, 128), # 8*16
  32. nn.MaxPool2d(kernel_size=2, stride=2),
  33. conv3x3_block(128, 256), # 4*8
  34. nn.MaxPool2d(kernel_size=2, stride=2),
  35. conv3x3_block(256, 256), # 2*4,
  36. nn.MaxPool2d(kernel_size=2, stride=2),
  37. conv3x3_block(256, 256)) # 1*2
  38. self.stn_fc1 = nn.Sequential(nn.Linear(2 * 256, 512),
  39. nn.BatchNorm1d(512),
  40. nn.ReLU(inplace=True))
  41. self.stn_fc2 = nn.Linear(512, num_ctrlpoints * 2)
  42. self.init_weights(self.stn_convnet)
  43. self.init_weights(self.stn_fc1)
  44. self.init_stn(self.stn_fc2)
  45. def init_weights(self, module):
  46. for m in module.modules():
  47. if isinstance(m, nn.Conv2d):
  48. n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
  49. m.weight.data.normal_(0, math.sqrt(2. / n))
  50. if m.bias is not None:
  51. m.bias.data.zero_()
  52. elif isinstance(m, nn.BatchNorm2d):
  53. m.weight.data.fill_(1)
  54. m.bias.data.zero_()
  55. elif isinstance(m, nn.Linear):
  56. m.weight.data.normal_(0, 0.001)
  57. m.bias.data.zero_()
  58. def init_stn(self, stn_fc2):
  59. margin = 0.01
  60. sampling_num_per_side = int(self.num_ctrlpoints / 2)
  61. ctrl_pts_x = np.linspace(margin, 1. - margin, sampling_num_per_side)
  62. ctrl_pts_y_top = np.ones(sampling_num_per_side) * margin
  63. ctrl_pts_y_bottom = np.ones(sampling_num_per_side) * (1 - margin)
  64. ctrl_pts_top = np.stack([ctrl_pts_x, ctrl_pts_y_top], axis=1)
  65. ctrl_pts_bottom = np.stack([ctrl_pts_x, ctrl_pts_y_bottom], axis=1)
  66. ctrl_points = np.concatenate([ctrl_pts_top, ctrl_pts_bottom],
  67. axis=0).astype(np.float32)
  68. if self.activation == 'none':
  69. pass
  70. elif self.activation == 'sigmoid':
  71. ctrl_points = -np.log(1. / ctrl_points - 1.)
  72. stn_fc2.weight.data.zero_()
  73. stn_fc2.bias.data = torch.Tensor(ctrl_points).view(-1)
  74. def forward(self, x):
  75. x = self.stn_convnet(x)
  76. batch_size, _, h, w = x.size()
  77. x = x.view(batch_size, -1)
  78. img_feat = self.stn_fc1(x)
  79. x = self.stn_fc2(0.1 * img_feat)
  80. if self.activation == 'sigmoid':
  81. x = F.sigmoid(x)
  82. x = x.view(-1, self.num_ctrlpoints, 2)
  83. return x
  84. def grid_sample(input, grid, canvas=None):
  85. output = F.grid_sample(input, grid)
  86. if canvas is None:
  87. return output
  88. else:
  89. input_mask = input.data.new(input.size()).fill_(1)
  90. output_mask = F.grid_sample(input_mask, grid)
  91. padded_output = output * output_mask + canvas * (1 - output_mask)
  92. return padded_output
  93. # phi(x1, x2) = r^2 * log(r), where r = ||x1 - x2||_2
  94. def compute_partial_repr(input_points, control_points):
  95. N = input_points.size(0)
  96. M = control_points.size(0)
  97. pairwise_diff = input_points.view(N, 1, 2) - control_points.view(1, M, 2)
  98. # original implementation, very slow
  99. # pairwise_dist = torch.sum(pairwise_diff ** 2, dim = 2) # square of distance
  100. pairwise_diff_square = pairwise_diff * pairwise_diff
  101. pairwise_dist = pairwise_diff_square[:, :, 0] + pairwise_diff_square[:, :,
  102. 1]
  103. repr_matrix = 0.5 * pairwise_dist * torch.log(pairwise_dist)
  104. # fix numerical error for 0 * log(0), substitute all nan with 0
  105. mask = repr_matrix != repr_matrix
  106. repr_matrix.masked_fill_(mask, 0)
  107. return repr_matrix
  108. # output_ctrl_pts are specified, according to our task.
  109. def build_output_control_points(num_control_points, margins):
  110. margin_x, margin_y = margins
  111. num_ctrl_pts_per_side = num_control_points // 2
  112. ctrl_pts_x = np.linspace(margin_x, 1.0 - margin_x, num_ctrl_pts_per_side)
  113. ctrl_pts_y_top = np.ones(num_ctrl_pts_per_side) * margin_y
  114. ctrl_pts_y_bottom = np.ones(num_ctrl_pts_per_side) * (1.0 - margin_y)
  115. ctrl_pts_top = np.stack([ctrl_pts_x, ctrl_pts_y_top], axis=1)
  116. ctrl_pts_bottom = np.stack([ctrl_pts_x, ctrl_pts_y_bottom], axis=1)
  117. # ctrl_pts_top = ctrl_pts_top[1:-1,:]
  118. # ctrl_pts_bottom = ctrl_pts_bottom[1:-1,:]
  119. output_ctrl_pts_arr = np.concatenate([ctrl_pts_top, ctrl_pts_bottom],
  120. axis=0)
  121. output_ctrl_pts = torch.Tensor(output_ctrl_pts_arr)
  122. return output_ctrl_pts
  123. class TPSSpatialTransformer(nn.Module):
  124. def __init__(
  125. self,
  126. output_image_size,
  127. num_control_points,
  128. margins,
  129. ):
  130. super(TPSSpatialTransformer, self).__init__()
  131. self.output_image_size = output_image_size
  132. self.num_control_points = num_control_points
  133. self.margins = margins
  134. self.target_height, self.target_width = output_image_size
  135. target_control_points = build_output_control_points(
  136. num_control_points, margins)
  137. N = num_control_points
  138. # N = N - 4
  139. # create padded kernel matrix
  140. forward_kernel = torch.zeros(N + 3, N + 3)
  141. target_control_partial_repr = compute_partial_repr(
  142. target_control_points, target_control_points)
  143. forward_kernel[:N, :N].copy_(target_control_partial_repr)
  144. forward_kernel[:N, -3].fill_(1)
  145. forward_kernel[-3, :N].fill_(1)
  146. forward_kernel[:N, -2:].copy_(target_control_points)
  147. forward_kernel[-2:, :N].copy_(target_control_points.transpose(0, 1))
  148. # compute inverse matrix
  149. inverse_kernel = torch.inverse(forward_kernel)
  150. # create target cordinate matrix
  151. HW = self.target_height * self.target_width
  152. target_coordinate = list(
  153. itertools.product(range(self.target_height),
  154. range(self.target_width)))
  155. target_coordinate = torch.Tensor(target_coordinate) # HW x 2
  156. Y, X = target_coordinate.split(1, dim=1)
  157. Y = Y / (self.target_height - 1)
  158. X = X / (self.target_width - 1)
  159. target_coordinate = torch.cat([X, Y],
  160. dim=1) # convert from (y, x) to (x, y)
  161. target_coordinate_partial_repr = compute_partial_repr(
  162. target_coordinate, target_control_points)
  163. target_coordinate_repr = torch.cat([
  164. target_coordinate_partial_repr,
  165. torch.ones(HW, 1), target_coordinate
  166. ],
  167. dim=1)
  168. # register precomputed matrices
  169. self.register_buffer('inverse_kernel', inverse_kernel)
  170. self.register_buffer('padding_matrix', torch.zeros(3, 2))
  171. self.register_buffer('target_coordinate_repr', target_coordinate_repr)
  172. self.register_buffer('target_control_points', target_control_points)
  173. def forward(self, input, source_control_points):
  174. assert source_control_points.ndimension() == 3
  175. assert source_control_points.size(1) == self.num_control_points
  176. assert source_control_points.size(2) == 2
  177. batch_size = source_control_points.size(0)
  178. Y = torch.cat([
  179. source_control_points,
  180. self.padding_matrix.expand(batch_size, 3, 2)
  181. ], 1)
  182. mapping_matrix = torch.matmul(self.inverse_kernel, Y)
  183. source_coordinate = torch.matmul(self.target_coordinate_repr,
  184. mapping_matrix)
  185. grid = source_coordinate.view(-1, self.target_height,
  186. self.target_width, 2)
  187. grid = torch.clamp(
  188. grid, 0, 1) # the source_control_points may be out of [0, 1].
  189. # the input to grid_sample is normalized [-1, 1], but what we get is [0, 1]
  190. grid = 2.0 * grid - 1.0
  191. output_maps = grid_sample(input, grid, canvas=None)
  192. return output_maps
  193. class Aster_TPS(nn.Module):
  194. def __init__(
  195. self,
  196. in_channels,
  197. tps_inputsize=[32, 64],
  198. tps_outputsize=[32, 100],
  199. num_control_points=20,
  200. tps_margins=[0.05, 0.05],
  201. ) -> None:
  202. super().__init__()
  203. self.in_channels = in_channels
  204. #TODO
  205. self.out_channels = in_channels
  206. self.tps_inputsize = tps_inputsize
  207. self.num_control_points = num_control_points
  208. self.stn_head = STNHead(
  209. in_planes=3,
  210. num_ctrlpoints=num_control_points,
  211. )
  212. self.tps = TPSSpatialTransformer(
  213. output_image_size=tps_outputsize,
  214. num_control_points=num_control_points,
  215. margins=tps_margins,
  216. )
  217. def forward(self, img):
  218. stn_input = F.interpolate(img,
  219. self.tps_inputsize,
  220. mode='bilinear',
  221. align_corners=True)
  222. ctrl_points = self.stn_head(stn_input)
  223. img = self.tps(img, ctrl_points)
  224. return img