lodet_backbone.py 3.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130
  1. import torch
  2. import torch.nn as nn
  3. try:
  4. from .lodet_basic import Conv, SMBlock, DSBlock
  5. except:
  6. from lodet_basic import Conv, SMBlock, DSBlock
  7. model_urls = {
  8. 'smnet': None,
  9. }
  10. # ---------------------------- Backbones ----------------------------
  11. class ScaleModulationNet(nn.Module):
  12. def __init__(self, act_type='silu', norm_type='BN', depthwise=False):
  13. super(ScaleModulationNet, self).__init__()
  14. self.feat_dims = [64, 128, 256]
  15. # P1/2
  16. self.layer_1 = nn.Sequential(
  17. Conv(3, 16, k=3, p=1, s=2, act_type=act_type, norm_type=norm_type),
  18. Conv(16, 16, k=3, p=1, act_type=act_type, norm_type=norm_type, depthwise=depthwise),
  19. )
  20. # P2/4
  21. self.layer_2 = nn.Sequential(
  22. DSBlock(16, act_type, norm_type, depthwise),
  23. SMBlock(32, 32, act_type, norm_type, depthwise)
  24. )
  25. # P3/8
  26. self.layer_3 = nn.Sequential(
  27. DSBlock(32, act_type, norm_type, depthwise),
  28. SMBlock(64, 64, act_type, norm_type, depthwise)
  29. )
  30. # P4/16
  31. self.layer_4 = nn.Sequential(
  32. DSBlock(64, act_type, norm_type, depthwise),
  33. SMBlock(128, 128, act_type, norm_type, depthwise)
  34. )
  35. # P5/32
  36. self.layer_5 = nn.Sequential(
  37. DSBlock(128, act_type, norm_type, depthwise),
  38. SMBlock(256, 256, act_type, norm_type, depthwise)
  39. )
  40. def forward(self, x):
  41. c1 = self.layer_1(x)
  42. c2 = self.layer_2(c1)
  43. c3 = self.layer_3(c2)
  44. c4 = self.layer_4(c3)
  45. c5 = self.layer_5(c4)
  46. outputs = [c3, c4, c5]
  47. return outputs
  48. # ---------------------------- Functions ----------------------------
  49. ## load pretrained weight
  50. def load_weight(model, model_name):
  51. # load weight
  52. print('Loading pretrained weight ...')
  53. url = model_urls[model_name]
  54. if url is not None:
  55. checkpoint = torch.hub.load_state_dict_from_url(
  56. url=url, map_location="cpu", check_hash=True)
  57. # checkpoint state dict
  58. checkpoint_state_dict = checkpoint.pop("model")
  59. # model state dict
  60. model_state_dict = model.state_dict()
  61. # check
  62. for k in list(checkpoint_state_dict.keys()):
  63. if k in model_state_dict:
  64. shape_model = tuple(model_state_dict[k].shape)
  65. shape_checkpoint = tuple(checkpoint_state_dict[k].shape)
  66. if shape_model != shape_checkpoint:
  67. checkpoint_state_dict.pop(k)
  68. else:
  69. checkpoint_state_dict.pop(k)
  70. print(k)
  71. model.load_state_dict(checkpoint_state_dict)
  72. else:
  73. print('No pretrained for {}'.format(model_name))
  74. return model
  75. ## build SMnet
  76. def build_backbone(cfg, pretrained=False):
  77. # model
  78. backbone = ScaleModulationNet(
  79. act_type=cfg['bk_act'],
  80. norm_type=cfg['bk_norm'],
  81. depthwise=cfg['bk_dpw']
  82. )
  83. # check whether to load imagenet pretrained weight
  84. if pretrained:
  85. backbone = load_weight(backbone, model_name='smnet')
  86. feat_dims = backbone.feat_dims
  87. return backbone, feat_dims
  88. if __name__ == '__main__':
  89. import time
  90. from thop import profile
  91. cfg = {
  92. 'pretrained': True,
  93. 'bk_act': 'silu',
  94. 'bk_norm': 'BN',
  95. 'bk_dpw': True,
  96. }
  97. model, feats = build_backbone(cfg)
  98. x = torch.randn(1, 3, 640, 640)
  99. t0 = time.time()
  100. outputs = model(x)
  101. t1 = time.time()
  102. print('Time: ', t1 - t0)
  103. for out in outputs:
  104. print(out.shape)
  105. print('==============================')
  106. flops, params = profile(model, inputs=(x, ), verbose=False)
  107. print('==============================')
  108. print('GFLOPs : {:.2f}'.format(flops / 1e9 * 2))
  109. print('Params : {:.2f} M'.format(params / 1e6))