123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503 |
- import numpy as np
- import torch
- import torch.nn as nn
- from torch.nn.init import kaiming_normal_, ones_, trunc_normal_, zeros_
- from openrec.modeling.common import DropPath, Identity, Mlp
- class ConvBNLayer(nn.Module):
- def __init__(
- self,
- in_channels,
- out_channels,
- kernel_size=3,
- stride=1,
- padding=0,
- bias=False,
- groups=1,
- act=nn.GELU,
- ):
- super().__init__()
- self.conv = nn.Conv2d(
- in_channels=in_channels,
- out_channels=out_channels,
- kernel_size=kernel_size,
- stride=stride,
- padding=padding,
- groups=groups,
- bias=bias,
- )
- self.norm = nn.BatchNorm2d(out_channels)
- self.act = act()
- def forward(self, inputs):
- out = self.conv(inputs)
- out = self.norm(out)
- out = self.act(out)
- return out
- class Attention(nn.Module):
- def __init__(
- self,
- dim,
- num_heads=8,
- qkv_bias=False,
- qk_scale=None,
- attn_drop=0.0,
- proj_drop=0.0,
- ):
- super().__init__()
- self.num_heads = num_heads
- self.dim = dim
- self.head_dim = dim // num_heads
- self.scale = qk_scale or self.head_dim**-0.5
- self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
- self.attn_drop = nn.Dropout(attn_drop)
- self.proj = nn.Linear(dim, dim)
- self.proj_drop = nn.Dropout(proj_drop)
- def forward(self, x):
- B, N, _ = x.shape
- qkv = (self.qkv(x).reshape(B, N, 3, self.num_heads,
- self.head_dim).permute(2, 0, 3, 1, 4))
- q, k, v = qkv.unbind(0)
- attn = q @ k.transpose(-2, -1) * self.scale
- attn = attn.softmax(dim=-1)
- attn = self.attn_drop(attn)
- x = attn @ v
- x = x.transpose(1, 2).reshape(B, N, self.dim)
- x = self.proj(x)
- x = self.proj_drop(x)
- return x
- class Block(nn.Module):
- def __init__(
- self,
- dim,
- num_heads,
- mlp_ratio=4.0,
- qkv_bias=False,
- qk_scale=None,
- drop=0.0,
- attn_drop=0.0,
- drop_path=0.0,
- act_layer=nn.GELU,
- norm_layer=nn.LayerNorm,
- eps=1e-6,
- ):
- super().__init__()
- mlp_hidden_dim = int(dim * mlp_ratio)
- self.norm1 = norm_layer(dim, eps=eps)
- self.mixer = Attention(
- dim,
- num_heads=num_heads,
- qkv_bias=qkv_bias,
- qk_scale=qk_scale,
- attn_drop=attn_drop,
- proj_drop=drop,
- )
- self.drop_path = DropPath(drop_path) if drop_path > 0.0 else Identity()
- self.norm2 = norm_layer(dim, eps=eps)
- self.mlp = Mlp(
- in_features=dim,
- hidden_features=mlp_hidden_dim,
- act_layer=act_layer,
- drop=drop,
- )
- def forward(self, x):
- x = self.norm1(x + self.drop_path(self.mixer(x)))
- x = self.norm2(x + self.drop_path(self.mlp(x)))
- return x
- class ConvBlock(nn.Module):
- def __init__(
- self,
- dim,
- num_heads,
- mlp_ratio=4.0,
- drop=0.0,
- drop_path=0.0,
- act_layer=nn.GELU,
- norm_layer=nn.LayerNorm,
- eps=1e-6,
- ):
- super().__init__()
- mlp_hidden_dim = int(dim * mlp_ratio)
- self.norm1 = norm_layer(dim, eps=eps)
- self.mixer = nn.Conv2d(dim, dim, 5, 1, 2, groups=num_heads)
- self.drop_path = DropPath(drop_path) if drop_path > 0.0 else Identity()
- self.norm2 = norm_layer(dim, eps=eps)
- self.mlp = Mlp(
- in_features=dim,
- hidden_features=mlp_hidden_dim,
- act_layer=act_layer,
- drop=drop,
- )
- def forward(self, x):
- C, H, W = x.shape[1:]
- x = x + self.drop_path(self.mixer(x))
- x = self.norm1(x.flatten(2).transpose(1, 2))
- x = self.norm2(x + self.drop_path(self.mlp(x)))
- x = x.transpose(1, 2).reshape(-1, C, H, W)
- return x
- class FlattenTranspose(nn.Module):
- def forward(self, x):
- return x.flatten(2).transpose(1, 2)
- class SubSample2D(nn.Module):
- def __init__(
- self,
- in_channels,
- out_channels,
- stride=[2, 1],
- ):
- super().__init__()
- self.conv = nn.Conv2d(in_channels,
- out_channels,
- kernel_size=3,
- stride=stride,
- padding=1)
- self.norm = nn.LayerNorm(out_channels)
- def forward(self, x, sz):
- # print(x.shape)
- x = self.conv(x)
- C, H, W = x.shape[1:]
- x = self.norm(x.flatten(2).transpose(1, 2))
- x = x.transpose(1, 2).reshape(-1, C, H, W)
- return x, [H, W]
- class SubSample1D(nn.Module):
- def __init__(
- self,
- in_channels,
- out_channels,
- stride=[2, 1],
- ):
- super().__init__()
- self.conv = nn.Conv2d(in_channels,
- out_channels,
- kernel_size=3,
- stride=stride,
- padding=1)
- self.norm = nn.LayerNorm(out_channels)
- def forward(self, x, sz):
- C = x.shape[-1]
- x = x.transpose(1, 2).reshape(-1, C, sz[0], sz[1])
- x = self.conv(x)
- C, H, W = x.shape[1:]
- x = self.norm(x.flatten(2).transpose(1, 2))
- return x, [H, W]
- class IdentitySize(nn.Module):
- def forward(self, x, sz):
- return x, sz
- class SVTRStage(nn.Module):
- def __init__(self,
- dim=64,
- out_dim=256,
- depth=3,
- mixer=['Local'] * 3,
- sub_k=[2, 1],
- num_heads=2,
- mlp_ratio=4,
- qkv_bias=True,
- qk_scale=None,
- drop_rate=0.0,
- attn_drop_rate=0.0,
- drop_path=[0.1] * 3,
- norm_layer=nn.LayerNorm,
- act=nn.GELU,
- eps=1e-6,
- downsample=None,
- **kwargs):
- super().__init__()
- self.dim = dim
- conv_block_num = sum([1 if mix == 'Conv' else 0 for mix in mixer])
- self.blocks = nn.Sequential()
- for i in range(depth):
- if mixer[i] == 'Conv':
- self.blocks.append(
- ConvBlock(
- dim=dim,
- num_heads=num_heads,
- mlp_ratio=mlp_ratio,
- drop=drop_rate,
- act_layer=act,
- drop_path=drop_path[i],
- norm_layer=norm_layer,
- eps=eps,
- ))
- else:
- self.blocks.append(
- Block(
- dim=dim,
- num_heads=num_heads,
- mlp_ratio=mlp_ratio,
- qkv_bias=qkv_bias,
- qk_scale=qk_scale,
- drop=drop_rate,
- act_layer=act,
- attn_drop=attn_drop_rate,
- drop_path=drop_path[i],
- norm_layer=norm_layer,
- eps=eps,
- ))
- if i == conv_block_num - 1 and mixer[-1] != 'Conv':
- self.blocks.append(FlattenTranspose())
- if downsample:
- if mixer[-1] == 'Conv':
- self.downsample = SubSample2D(dim, out_dim, stride=sub_k)
- elif mixer[-1] == 'Global':
- self.downsample = SubSample1D(dim, out_dim, stride=sub_k)
- else:
- self.downsample = IdentitySize()
- def forward(self, x, sz):
- for blk in self.blocks:
- x = blk(x)
- x, sz = self.downsample(x, sz)
- return x, sz
- class ADDPosEmbed(nn.Module):
- def __init__(self, feat_max_size=[8, 32], embed_dim=768):
- super().__init__()
- pos_embed = torch.zeros(
- [1, feat_max_size[0] * feat_max_size[1], embed_dim],
- dtype=torch.float32)
- trunc_normal_(pos_embed, mean=0, std=0.02)
- self.pos_embed = nn.Parameter(
- pos_embed.transpose(1, 2).reshape(1, embed_dim, feat_max_size[0],
- feat_max_size[1]),
- requires_grad=True,
- )
- def forward(self, x):
- sz = x.shape[2:]
- x = x + self.pos_embed[:, :, :sz[0], :sz[1]]
- return x
- class POPatchEmbed(nn.Module):
- """Image to Patch Embedding."""
- def __init__(
- self,
- in_channels=3,
- feat_max_size=[8, 32],
- embed_dim=768,
- use_pos_embed=False,
- flatten=False,
- ):
- super().__init__()
- self.patch_embed = nn.Sequential(
- ConvBNLayer(
- in_channels=in_channels,
- out_channels=embed_dim // 2,
- kernel_size=3,
- stride=2,
- padding=1,
- act=nn.GELU,
- bias=None,
- ),
- ConvBNLayer(
- in_channels=embed_dim // 2,
- out_channels=embed_dim,
- kernel_size=3,
- stride=2,
- padding=1,
- act=nn.GELU,
- bias=None,
- ),
- )
- if use_pos_embed:
- self.patch_embed.append(ADDPosEmbed(feat_max_size, embed_dim))
- if flatten:
- self.patch_embed.append(FlattenTranspose())
- def forward(self, x):
- sz = x.shape[2:]
- x = self.patch_embed(x)
- return x, [sz[0] // 4, sz[1] // 4]
- class LastStage(nn.Module):
- def __init__(self, in_channels, out_channels, last_drop, out_char_num):
- super().__init__()
- self.last_conv = nn.Linear(
- in_channels, out_channels,
- bias=False) # self.num_features, self.out_channels, bias=False)
- self.hardswish = nn.Hardswish()
- self.dropout = nn.Dropout(p=last_drop)
- def forward(self, x, sz):
- x = x.reshape(-1, sz[0], sz[1], x.shape[-1])
- x = x.mean(1)
- x = self.last_conv(x)
- x = self.hardswish(x)
- x = self.dropout(x)
- return x, [1, sz[1]]
- class Feat2D(nn.Module):
- def __init__(self):
- super().__init__()
- def forward(self, x, sz):
- # b, L c
- # H W
- C = x.shape[-1]
- x = x.transpose(1, 2).reshape(-1, C, sz[0], sz[1])
- return x, sz
- # class LastStage(nn.Module):
- # def __init__(self, in_channels, out_channels, last_drop, out_char_num):
- # super().__init__()
- # self.avg_pool = nn.AdaptiveAvgPool2d([1, out_char_num])
- # self.last_conv = nn.Conv2d(
- # in_channels=in_channels,
- # out_channels=out_channels,
- # kernel_size=1,
- # stride=1,
- # padding=0,
- # bias=False,
- # )
- # self.hardswish = nn.Hardswish()
- # self.dropout = nn.Dropout(p=last_drop)
- # def forward(self, x, sz):
- # # x = x.reshape(-1, sz[0], sz[1], x.shape[-1])
- # C = x.shape[-1]
- # x = self.avg_pool(x.transpose(1, 2).reshape(-1, C, sz[0], sz[1]))
- # x = self.last_conv(x)
- # sz = x.shape[2:]
- # x = self.hardswish(x)
- # x = self.dropout(x)
- # x = x.flatten(2).transpose(1, 2)
- # return x, sz
- class SVTRv2LNConv(nn.Module):
- def __init__(self,
- max_sz=[32, 128],
- in_channels=3,
- out_channels=192,
- out_char_num=25,
- depths=[3, 6, 3],
- dims=[64, 128, 256],
- mixer=[['Conv'] * 3, ['Conv'] * 3 + ['Global'] * 3,
- ['Global'] * 3],
- use_pos_embed=True,
- sub_k=[[1, 1], [2, 1], [1, 1]],
- num_heads=[2, 4, 8],
- mlp_ratio=4,
- qkv_bias=True,
- qk_scale=None,
- drop_rate=0.0,
- last_drop=0.1,
- attn_drop_rate=0.0,
- drop_path_rate=0.1,
- norm_layer=nn.LayerNorm,
- act=nn.GELU,
- last_stage=False,
- feat2d=False,
- eps=1e-6,
- **kwargs):
- super().__init__()
- num_stages = len(depths)
- self.num_features = dims[-1]
- feat_max_size = [max_sz[0] // 4, max_sz[1] // 4]
- self.pope = POPatchEmbed(
- in_channels=in_channels,
- feat_max_size=feat_max_size,
- embed_dim=dims[0],
- use_pos_embed=use_pos_embed,
- flatten=mixer[0][0] != 'Conv',
- )
- dpr = np.linspace(0, drop_path_rate,
- sum(depths)) # stochastic depth decay rule
- self.stages = nn.ModuleList()
- for i_stage in range(num_stages):
- stage = SVTRStage(
- dim=dims[i_stage],
- out_dim=dims[i_stage + 1] if i_stage < num_stages - 1 else 0,
- depth=depths[i_stage],
- mixer=mixer[i_stage],
- sub_k=sub_k[i_stage],
- num_heads=num_heads[i_stage],
- mlp_ratio=mlp_ratio,
- qkv_bias=qkv_bias,
- qk_scale=qk_scale,
- drop=drop_rate,
- attn_drop=attn_drop_rate,
- drop_path=dpr[sum(depths[:i_stage]):sum(depths[:i_stage + 1])],
- norm_layer=norm_layer,
- act=act,
- downsample=False if i_stage == num_stages - 1 else True,
- eps=eps,
- )
- self.stages.append(stage)
- self.out_channels = self.num_features
- self.last_stage = last_stage
- if last_stage:
- self.out_channels = out_channels
- self.stages.append(
- LastStage(self.num_features, out_channels, last_drop,
- out_char_num))
- if feat2d:
- self.stages.append(Feat2D())
- self.apply(self._init_weights)
- def _init_weights(self, m: nn.Module):
- if isinstance(m, nn.Linear):
- trunc_normal_(m.weight, mean=0, std=0.02)
- if isinstance(m, nn.Linear) and m.bias is not None:
- zeros_(m.bias)
- if isinstance(m, nn.LayerNorm):
- zeros_(m.bias)
- ones_(m.weight)
- if isinstance(m, nn.Conv2d):
- kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
- @torch.jit.ignore
- def no_weight_decay(self):
- return {'patch_embed', 'downsample', 'pos_embed'}
- def forward(self, x):
- x, sz = self.pope(x)
- for stage in self.stages:
- x, sz = stage(x, sz)
- return x
|