yolov4_pafpn.py 4.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127
  1. import torch
  2. import torch.nn as nn
  3. import torch.nn.functional as F
  4. from typing import List
  5. try:
  6. from .modules import ConvModule, CSPBlock
  7. except:
  8. from modules import ConvModule, CSPBlock
  9. # PaFPN-CSP
  10. class Yolov4PaFPN(nn.Module):
  11. def __init__(self,
  12. in_dims: List = [256, 512, 1024],
  13. head_dim: int = 256,
  14. ):
  15. super(Yolov4PaFPN, self).__init__()
  16. self.in_dims = in_dims
  17. self.head_dim = head_dim
  18. self.fpn_out_dims = [head_dim] * 3
  19. c3, c4, c5 = in_dims
  20. # top down
  21. ## P5 -> P4
  22. self.reduce_layer_1 = ConvModule(c5, 512, kernel_size=1)
  23. self.top_down_layer_1 = CSPBlock(in_dim = c4 + 512,
  24. out_dim = 512,
  25. expand_ratio = 0.5,
  26. num_blocks = 3,
  27. shortcut = False,
  28. )
  29. ## P4 -> P3
  30. self.reduce_layer_2 = ConvModule(512, 256, kernel_size=1)
  31. self.top_down_layer_2 = CSPBlock(in_dim = c3 + 256,
  32. out_dim = 256,
  33. expand_ratio = 0.5,
  34. num_blocks = 3,
  35. shortcut = False,
  36. )
  37. # bottom up
  38. ## P3 -> P4
  39. self.reduce_layer_3 = ConvModule(256, 256, kernel_size=3, stride=2)
  40. self.bottom_up_layer_1 = CSPBlock(in_dim = 256 + 256,
  41. out_dim = 512,
  42. expand_ratio = 0.5,
  43. num_blocks = 3,
  44. shortcut = False,
  45. )
  46. ## P4 -> P5
  47. self.reduce_layer_4 = ConvModule(512, 512, kernel_size=3, stride=2)
  48. self.bottom_up_layer_2 = CSPBlock(in_dim = 512 + 512,
  49. out_dim = 1024,
  50. expand_ratio = 0.5,
  51. num_blocks = 3,
  52. shortcut = False,
  53. )
  54. # output proj layers
  55. self.out_layers = nn.ModuleList([ConvModule(in_dim, head_dim, kernel_size=1)
  56. for in_dim in [256, 512, 1024]
  57. ])
  58. def forward(self, features):
  59. c3, c4, c5 = features
  60. # P5 -> P4
  61. c6 = self.reduce_layer_1(c5)
  62. c7 = F.interpolate(c6, scale_factor=2.0) # s32->s16
  63. c8 = torch.cat([c7, c4], dim=1)
  64. c9 = self.top_down_layer_1(c8)
  65. # P4 -> P3
  66. c10 = self.reduce_layer_2(c9)
  67. c11 = F.interpolate(c10, scale_factor=2.0) # s16->s8
  68. c12 = torch.cat([c11, c3], dim=1)
  69. c13 = self.top_down_layer_2(c12) # to det
  70. # P3 -> P4
  71. c14 = self.reduce_layer_3(c13)
  72. c15 = torch.cat([c14, c10], dim=1)
  73. c16 = self.bottom_up_layer_1(c15) # to det
  74. # P4 -> P5
  75. c17 = self.reduce_layer_4(c16)
  76. c18 = torch.cat([c17, c6], dim=1)
  77. c19 = self.bottom_up_layer_2(c18) # to det
  78. out_feats = [c13, c16, c19] # [P3, P4, P5]
  79. # output proj layers
  80. out_feats_proj = []
  81. for feat, layer in zip(out_feats, self.out_layers):
  82. out_feats_proj.append(layer(feat))
  83. return out_feats_proj
  84. if __name__=='__main__':
  85. import time
  86. from thop import profile
  87. # Model config
  88. # Build a head
  89. in_dims = [128, 256, 512]
  90. fpn = Yolov4PaFPN(in_dims, head_dim=256)
  91. # Randomly generate a input data
  92. x = [torch.randn(1, in_dims[0], 80, 80),
  93. torch.randn(1, in_dims[1], 40, 40),
  94. torch.randn(1, in_dims[2], 20, 20)]
  95. # Inference
  96. t0 = time.time()
  97. output = fpn(x)
  98. t1 = time.time()
  99. print('Time: ', t1 - t0)
  100. print('====== FPN output ====== ')
  101. for level, feat in enumerate(output):
  102. print("- Level-{} : ".format(level), feat.shape)
  103. flops, params = profile(fpn, inputs=(x, ), verbose=False)
  104. print('==============================')
  105. print('GFLOPs : {:.2f}'.format(flops / 1e9 * 2))
  106. print('Params : {:.2f} M'.format(params / 1e6))