yolov4_pafpn.py 4.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100
  1. from typing import List
  2. import torch
  3. import torch.nn as nn
  4. import torch.nn.functional as F
  5. from .yolov4_basic import BasicConv, CSPBlock
  6. # Yolov4FPN
  7. class Yolov4PaFPN(nn.Module):
  8. def __init__(self, cfg, in_dims: List = [256, 512, 1024],
  9. ):
  10. super(Yolov4PaFPN, self).__init__()
  11. self.in_dims = in_dims
  12. c3, c4, c5 = in_dims
  13. # ---------------------- Yolov4's Top down FPN ----------------------
  14. ## P5 -> P4
  15. self.reduce_layer_1 = BasicConv(c5, round(512*cfg.width), kernel_size=1, act_type=cfg.fpn_act, norm_type=cfg.fpn_norm)
  16. self.top_down_layer_1 = CSPBlock(in_dim = c4 + round(512*cfg.width),
  17. out_dim = round(512*cfg.width),
  18. num_blocks = round(3*cfg.depth),
  19. expansion = 0.5,
  20. shortcut = False,
  21. act_type = cfg.fpn_act,
  22. norm_type = cfg.fpn_norm,
  23. depthwise = cfg.fpn_depthwise)
  24. ## P4 -> P3
  25. self.reduce_layer_2 = BasicConv(round(512*cfg.width), round(256*cfg.width), kernel_size=1, act_type=cfg.fpn_act, norm_type=cfg.fpn_norm)
  26. self.top_down_layer_2 = CSPBlock(in_dim = c3 + round(256*cfg.width),
  27. out_dim = round(256*cfg.width),
  28. num_blocks = round(3*cfg.depth),
  29. expansion = 0.5,
  30. shortcut = False,
  31. act_type = cfg.fpn_act,
  32. norm_type = cfg.fpn_norm,
  33. depthwise = cfg.fpn_depthwise)
  34. # ---------------------- Yolov4's Bottom up FPN ----------------------
  35. ## P3 -> P4
  36. self.downsample_layer_1 = BasicConv(round(256*cfg.width), round(256*cfg.width),
  37. kernel_size=3, padding=1, stride=2,
  38. act_type=cfg.fpn_act, norm_type=cfg.fpn_norm, depthwise=cfg.fpn_depthwise)
  39. self.bottom_up_layer_1 = CSPBlock(in_dim = round(256*cfg.width) + round(256*cfg.width),
  40. out_dim = round(512*cfg.width),
  41. num_blocks = round(3*cfg.depth),
  42. shortcut = False,
  43. act_type = cfg.fpn_act,
  44. norm_type = cfg.fpn_norm,
  45. depthwise = cfg.fpn_depthwise)
  46. ## P4 -> P5
  47. self.downsample_layer_2 = BasicConv(round(512*cfg.width), round(512*cfg.width),
  48. kernel_size=3, padding=1, stride=2,
  49. act_type=cfg.fpn_act, norm_type=cfg.fpn_norm, depthwise=cfg.fpn_depthwise)
  50. self.bottom_up_layer_2 = CSPBlock(in_dim = round(512*cfg.width) + round(512*cfg.width),
  51. out_dim = round(1024*cfg.width),
  52. num_blocks = round(3*cfg.depth),
  53. shortcut = False,
  54. act_type = cfg.fpn_act,
  55. norm_type = cfg.fpn_norm,
  56. depthwise = cfg.fpn_depthwise)
  57. # ---------------------- Yolov4's output projection ----------------------
  58. self.out_layers = nn.ModuleList([
  59. BasicConv(in_dim, round(cfg.head_dim*cfg.width), kernel_size=1,
  60. act_type=cfg.fpn_act, norm_type=cfg.fpn_norm)
  61. for in_dim in [round(256*cfg.width), round(512*cfg.width), round(1024*cfg.width)]
  62. ])
  63. self.out_dims = [round(cfg.head_dim*cfg.width)] * 3
  64. def forward(self, features):
  65. c3, c4, c5 = features
  66. # P5 -> P4
  67. p5 = self.reduce_layer_1(c5)
  68. p5_up = F.interpolate(p5, scale_factor=2.0)
  69. p4 = self.top_down_layer_1(torch.cat([c4, p5_up], dim=1))
  70. # P4 -> P3
  71. p4 = self.reduce_layer_2(p4)
  72. p4_up = F.interpolate(p4, scale_factor=2.0)
  73. p3 = self.top_down_layer_2(torch.cat([c3, p4_up], dim=1))
  74. # P3 -> P4
  75. p3_ds = self.downsample_layer_1(p3)
  76. p4 = self.bottom_up_layer_1(torch.cat([p4, p3_ds], dim=1))
  77. # P4 -> P5
  78. p4_ds = self.downsample_layer_2(p4)
  79. p5 = self.bottom_up_layer_2(torch.cat([p5, p4_ds], dim=1))
  80. out_feats = [p3, p4, p5]
  81. # output proj layers
  82. out_feats_proj = []
  83. for feat, layer in zip(out_feats, self.out_layers):
  84. out_feats_proj.append(layer(feat))
  85. return out_feats_proj