yolov5_pafpn.py 5.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107
  1. from typing import List
  2. import torch
  3. import torch.nn as nn
  4. import torch.nn.functional as F
  5. from .yolov5_basic import BasicConv, CSPBlock
  6. # Yolov5FPN
  7. class Yolov5PaFPN(nn.Module):
  8. def __init__(self, cfg, in_dims: List = [256, 512, 1024]):
  9. super(Yolov5PaFPN, self).__init__()
  10. self.in_dims = in_dims
  11. c3, c4, c5 = in_dims
  12. # ---------------------- Yolov5's Top down FPN ----------------------
  13. ## P5 -> P4
  14. self.reduce_layer_1 = BasicConv(c5, round(512*cfg.width),
  15. kernel_size=1, padding=0, stride=1,
  16. act_type=cfg.fpn_act, norm_type=cfg.fpn_norm)
  17. self.top_down_layer_1 = CSPBlock(in_dim = c4 + round(512*cfg.width),
  18. out_dim = round(512*cfg.width),
  19. num_blocks = round(3*cfg.depth),
  20. expansion = 0.5,
  21. shortcut = False,
  22. act_type = cfg.fpn_act,
  23. norm_type = cfg.fpn_norm,
  24. depthwise = cfg.fpn_depthwise)
  25. ## P4 -> P3
  26. self.reduce_layer_2 = BasicConv(round(512*cfg.width), round(256*cfg.width),
  27. kernel_size=1, padding=0, stride=1,
  28. act_type=cfg.fpn_act, norm_type=cfg.fpn_norm)
  29. self.top_down_layer_2 = CSPBlock(in_dim = c3 + round(256*cfg.width),
  30. out_dim = round(256*cfg.width),
  31. num_blocks = round(3*cfg.depth),
  32. expansion = 0.5,
  33. shortcut = False,
  34. act_type = cfg.fpn_act,
  35. norm_type = cfg.fpn_norm,
  36. depthwise = cfg.fpn_depthwise)
  37. # ---------------------- Yolov5's Bottom up PAN ----------------------
  38. ## P3 -> P4
  39. self.downsample_layer_1 = BasicConv(round(256*cfg.width), round(256*cfg.width),
  40. kernel_size=3, padding=1, stride=2,
  41. act_type=cfg.fpn_act, norm_type=cfg.fpn_norm, depthwise=cfg.fpn_depthwise)
  42. self.bottom_up_layer_1 = CSPBlock(in_dim = round(256*cfg.width) + round(256*cfg.width),
  43. out_dim = round(512*cfg.width),
  44. num_blocks = round(3*cfg.depth),
  45. expansion = 0.5,
  46. shortcut = False,
  47. act_type = cfg.fpn_act,
  48. norm_type = cfg.fpn_norm,
  49. depthwise = cfg.fpn_depthwise)
  50. ## P4 -> P5
  51. self.downsample_layer_2 = BasicConv(round(512*cfg.width), round(512*cfg.width),
  52. kernel_size=3, padding=1, stride=2,
  53. act_type=cfg.fpn_act, norm_type=cfg.fpn_norm, depthwise=cfg.fpn_depthwise)
  54. self.bottom_up_layer_2 = CSPBlock(in_dim = round(512*cfg.width) + round(512*cfg.width),
  55. out_dim = round(1024*cfg.width),
  56. num_blocks = round(3*cfg.depth),
  57. expansion = 0.5,
  58. shortcut = False,
  59. act_type = cfg.fpn_act,
  60. norm_type = cfg.fpn_norm,
  61. depthwise = cfg.fpn_depthwise)
  62. # ---------------------- Yolov5's output projection ----------------------
  63. self.out_layers = nn.ModuleList([
  64. BasicConv(in_dim, round(cfg.head_dim*cfg.width), kernel_size=1,
  65. act_type=cfg.fpn_act, norm_type=cfg.fpn_norm)
  66. for in_dim in [round(256*cfg.width), round(512*cfg.width), round(1024*cfg.width)]
  67. ])
  68. self.out_dims = [round(cfg.head_dim*cfg.width)] * 3
  69. def forward(self, features):
  70. c3, c4, c5 = features
  71. # ------------------ Top down FPN ------------------
  72. ## P5 -> P4
  73. p5 = self.reduce_layer_1(c5)
  74. p5_up = F.interpolate(p5, scale_factor=2.0)
  75. p4 = self.top_down_layer_1(torch.cat([c4, p5_up], dim=1))
  76. ## P4 -> P3
  77. p4 = self.reduce_layer_2(p4)
  78. p4_up = F.interpolate(p4, scale_factor=2.0)
  79. p3 = self.top_down_layer_2(torch.cat([c3, p4_up], dim=1))
  80. # ------------------ Bottom up PAN ------------------
  81. ## P3 -> P4
  82. p3_ds = self.downsample_layer_1(p3)
  83. p4 = self.bottom_up_layer_1(torch.cat([p4, p3_ds], dim=1))
  84. ## P4 -> P5
  85. p4_ds = self.downsample_layer_2(p4)
  86. p5 = self.bottom_up_layer_2(torch.cat([p5, p4_ds], dim=1))
  87. out_feats = [p3, p4, p5]
  88. # output proj layers
  89. out_feats_proj = []
  90. for feat, layer in zip(out_feats, self.out_layers):
  91. out_feats_proj.append(layer(feat))
  92. return out_feats_proj