rtmdet_v1_backbone.py 6.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157
  1. import torch
  2. import torch.nn as nn
  3. try:
  4. from .rtmdet_v1_basic import Conv, ELANBlock, DownSample
  5. except:
  6. from rtmdet_v1_basic import Conv, ELANBlock, DownSample
  7. model_urls = {
  8. 'elannet_pico': "https://github.com/yjh0410/image_classification_pytorch/releases/download/weight/elannet_pico.pth",
  9. 'elannet_nano': "https://github.com/yjh0410/image_classification_pytorch/releases/download/weight/elannet_nano.pth",
  10. 'elannet_tiny': "https://github.com/yjh0410/image_classification_pytorch/releases/download/weight/elannet_tiny.pth",
  11. 'elannet_small': "https://github.com/yjh0410/image_classification_pytorch/releases/download/weight/elannet_small.pth",
  12. 'elannet_medium': "https://github.com/yjh0410/image_classification_pytorch/releases/download/weight/elannet_medium.pth",
  13. 'elannet_large': "https://github.com/yjh0410/image_classification_pytorch/releases/download/weight/elannet_large.pth",
  14. 'elannet_huge': "https://github.com/yjh0410/image_classification_pytorch/releases/download/weight/elannet_huge.pth",
  15. }
  16. # ---------------------------- Backbones ----------------------------
  17. # ELANNet-P5
  18. class ELANNet(nn.Module):
  19. def __init__(self, width=1.0, depth=1.0, act_type='silu', norm_type='BN', depthwise=False):
  20. super(ELANNet, self).__init__()
  21. self.feat_dims = [int(512 * width), int(1024 * width), int(1024 * width)]
  22. # P1/2
  23. self.layer_1 = nn.Sequential(
  24. Conv(3, int(64*width), k=3, p=1, s=2, act_type=act_type, norm_type=norm_type),
  25. Conv(int(64*width), int(64*width), k=3, p=1, act_type=act_type, norm_type=norm_type, depthwise=depthwise)
  26. )
  27. # P2/4
  28. self.layer_2 = nn.Sequential(
  29. Conv(int(64*width), int(128*width), k=3, p=1, s=2, act_type=act_type, norm_type=norm_type, depthwise=depthwise),
  30. ELANBlock(in_dim=int(128*width), out_dim=int(256*width), expand_ratio=0.5, depth=depth,
  31. act_type=act_type, norm_type=norm_type, depthwise=depthwise)
  32. )
  33. # P3/8
  34. self.layer_3 = nn.Sequential(
  35. DownSample(in_dim=int(256*width), out_dim=int(256*width), act_type=act_type, norm_type=norm_type),
  36. ELANBlock(in_dim=int(256*width), out_dim=int(512*width), expand_ratio=0.5, depth=depth,
  37. act_type=act_type, norm_type=norm_type, depthwise=depthwise)
  38. )
  39. # P4/16
  40. self.layer_4 = nn.Sequential(
  41. DownSample(in_dim=int(512*width), out_dim=int(512*width), act_type=act_type, norm_type=norm_type),
  42. ELANBlock(in_dim=int(512*width), out_dim=int(1024*width), expand_ratio=0.5, depth=depth,
  43. act_type=act_type, norm_type=norm_type, depthwise=depthwise)
  44. )
  45. # P5/32
  46. self.layer_5 = nn.Sequential(
  47. DownSample(in_dim=int(1024*width), out_dim=int(1024*width), act_type=act_type, norm_type=norm_type),
  48. ELANBlock(in_dim=int(1024*width), out_dim=int(1024*width), expand_ratio=0.25, depth=depth,
  49. act_type=act_type, norm_type=norm_type, depthwise=depthwise)
  50. )
  51. def forward(self, x):
  52. c1 = self.layer_1(x)
  53. c2 = self.layer_2(c1)
  54. c3 = self.layer_3(c2)
  55. c4 = self.layer_4(c3)
  56. c5 = self.layer_5(c4)
  57. outputs = [c3, c4, c5]
  58. return outputs
  59. # ---------------------------- Functions ----------------------------
  60. ## load pretrained weight
  61. def load_weight(model, model_name):
  62. # load weight
  63. print('Loading pretrained weight ...')
  64. url = model_urls[model_name]
  65. if url is not None:
  66. checkpoint = torch.hub.load_state_dict_from_url(
  67. url=url, map_location="cpu", check_hash=True)
  68. # checkpoint state dict
  69. checkpoint_state_dict = checkpoint.pop("model")
  70. # model state dict
  71. model_state_dict = model.state_dict()
  72. # check
  73. for k in list(checkpoint_state_dict.keys()):
  74. if k in model_state_dict:
  75. shape_model = tuple(model_state_dict[k].shape)
  76. shape_checkpoint = tuple(checkpoint_state_dict[k].shape)
  77. if shape_model != shape_checkpoint:
  78. checkpoint_state_dict.pop(k)
  79. else:
  80. checkpoint_state_dict.pop(k)
  81. print(k)
  82. model.load_state_dict(checkpoint_state_dict)
  83. else:
  84. print('No pretrained for {}'.format(model_name))
  85. return model
  86. ## build ELAN-Net
  87. def build_backbone(cfg, pretrained=False):
  88. # model
  89. backbone = ELANNet(
  90. width=cfg['width'],
  91. depth=cfg['depth'],
  92. act_type=cfg['bk_act'],
  93. norm_type=cfg['bk_norm'],
  94. depthwise=cfg['bk_dpw']
  95. )
  96. # check whether to load imagenet pretrained weight
  97. if pretrained:
  98. if cfg['width'] == 0.25 and cfg['depth'] == 0.34 and cfg['bk_dpw']:
  99. backbone = load_weight(backbone, model_name='elannet_pico')
  100. elif cfg['width'] == 0.25 and cfg['depth'] == 0.34:
  101. backbone = load_weight(backbone, model_name='elannet_nano')
  102. elif cfg['width'] == 0.375 and cfg['depth'] == 0.34:
  103. backbone = load_weight(backbone, model_name='elannet_tiny')
  104. elif cfg['width'] == 0.5 and cfg['depth'] == 0.34:
  105. backbone = load_weight(backbone, model_name='elannet_small')
  106. elif cfg['width'] == 0.75 and cfg['depth'] == 0.67:
  107. backbone = load_weight(backbone, model_name='elannet_medium')
  108. elif cfg['width'] == 1.0 and cfg['depth'] == 1.0:
  109. backbone = load_weight(backbone, model_name='elannet_large')
  110. elif cfg['width'] == 1.25 and cfg['depth'] == 1.34:
  111. backbone = load_weight(backbone, model_name='elannet_huge')
  112. feat_dims = backbone.feat_dims
  113. return backbone, feat_dims
  114. if __name__ == '__main__':
  115. import time
  116. from thop import profile
  117. cfg = {
  118. 'pretrained': True,
  119. 'bk_act': 'silu',
  120. 'bk_norm': 'BN',
  121. 'bk_dpw': True,
  122. 'width': 0.25,
  123. 'depth': 0.34,
  124. }
  125. model, feats = build_backbone(cfg)
  126. x = torch.randn(1, 3, 640, 640)
  127. t0 = time.time()
  128. outputs = model(x)
  129. t1 = time.time()
  130. print('Time: ', t1 - t0)
  131. for out in outputs:
  132. print(out.shape)
  133. print('==============================')
  134. flops, params = profile(model, inputs=(x, ), verbose=False)
  135. print('==============================')
  136. print('GFLOPs : {:.2f}'.format(flops / 1e9 * 2))
  137. print('Params : {:.2f} M'.format(params / 1e6))