lodet_backbone.py 3.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127
  1. import torch
  2. import torch.nn as nn
  3. try:
  4. from .lodet_basic import Conv, SMBlock
  5. except:
  6. from lodet_basic import Conv, SMBlock
  7. model_urls = {
  8. 'smnet': None,
  9. }
  10. # ---------------------------- Backbones ----------------------------
  11. class ScaleModulationNet(nn.Module):
  12. def __init__(self, act_type='silu', norm_type='BN', depthwise=False):
  13. super(ScaleModulationNet, self).__init__()
  14. self.feat_dims = [128, 256, 256]
  15. # P1/2
  16. self.layer_1 = Conv(3, 32, k=3, p=1, s=2, act_type=act_type, norm_type=norm_type)
  17. # P2/4
  18. self.layer_2 = nn.Sequential(
  19. nn.MaxPool2d((2, 2), stride=2),
  20. SMBlock(32, 64, 0.5, act_type, norm_type, depthwise)
  21. )
  22. # P3/8
  23. self.layer_3 = nn.Sequential(
  24. nn.MaxPool2d((2, 2), stride=2),
  25. SMBlock(64, 128, 0.5, act_type, norm_type, depthwise)
  26. )
  27. # P4/16
  28. self.layer_4 = nn.Sequential(
  29. nn.MaxPool2d((2, 2), stride=2),
  30. SMBlock(128, 256, 0.5, act_type, norm_type, depthwise)
  31. )
  32. # P5/32
  33. self.layer_5 = nn.Sequential(
  34. nn.MaxPool2d((2, 2), stride=2),
  35. SMBlock(256, 256, 0.25, act_type, norm_type, depthwise)
  36. )
  37. def forward(self, x):
  38. c1 = self.layer_1(x)
  39. c2 = self.layer_2(c1)
  40. c3 = self.layer_3(c2)
  41. c4 = self.layer_4(c3)
  42. c5 = self.layer_5(c4)
  43. outputs = [c3, c4, c5]
  44. return outputs
  45. # ---------------------------- Functions ----------------------------
  46. ## load pretrained weight
  47. def load_weight(model, model_name):
  48. # load weight
  49. print('Loading pretrained weight ...')
  50. url = model_urls[model_name]
  51. if url is not None:
  52. checkpoint = torch.hub.load_state_dict_from_url(
  53. url=url, map_location="cpu", check_hash=True)
  54. # checkpoint state dict
  55. checkpoint_state_dict = checkpoint.pop("model")
  56. # model state dict
  57. model_state_dict = model.state_dict()
  58. # check
  59. for k in list(checkpoint_state_dict.keys()):
  60. if k in model_state_dict:
  61. shape_model = tuple(model_state_dict[k].shape)
  62. shape_checkpoint = tuple(checkpoint_state_dict[k].shape)
  63. if shape_model != shape_checkpoint:
  64. checkpoint_state_dict.pop(k)
  65. else:
  66. checkpoint_state_dict.pop(k)
  67. print(k)
  68. model.load_state_dict(checkpoint_state_dict)
  69. else:
  70. print('No pretrained for {}'.format(model_name))
  71. return model
  72. ## build SMnet
  73. def build_backbone(cfg, pretrained=False):
  74. # model
  75. backbone = ScaleModulationNet(
  76. act_type=cfg['bk_act'],
  77. norm_type=cfg['bk_norm'],
  78. depthwise=cfg['bk_dpw']
  79. )
  80. # check whether to load imagenet pretrained weight
  81. if pretrained:
  82. backbone = load_weight(backbone, model_name='smnet')
  83. feat_dims = backbone.feat_dims
  84. return backbone, feat_dims
  85. if __name__ == '__main__':
  86. import time
  87. from thop import profile
  88. cfg = {
  89. 'pretrained': True,
  90. 'bk_act': 'silu',
  91. 'bk_norm': 'BN',
  92. 'bk_dpw': True,
  93. }
  94. model, feats = build_backbone(cfg)
  95. x = torch.randn(1, 3, 640, 640)
  96. t0 = time.time()
  97. outputs = model(x)
  98. t1 = time.time()
  99. print('Time: ', t1 - t0)
  100. for out in outputs:
  101. print(out.shape)
  102. print('==============================')
  103. flops, params = profile(model, inputs=(x, ), verbose=False)
  104. print('==============================')
  105. print('GFLOPs : {:.2f}'.format(flops / 1e9 * 2))
  106. print('Params : {:.2f} M'.format(params / 1e6))