yolox2_pafpn.py 5.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108
  1. from typing import List
  2. import torch
  3. import torch.nn as nn
  4. import torch.nn.functional as F
  5. from .yolox2_basic import BasicConv, CSPBlock
  6. # Yolov5FPN
  7. class Yolov5PaFPN(nn.Module):
  8. def __init__(self, cfg, in_dims: List = [256, 512, 1024],
  9. ):
  10. super(Yolov5PaFPN, self).__init__()
  11. self.in_dims = in_dims
  12. c3, c4, c5 = in_dims
  13. # ---------------------- Yolox's Top down FPN ----------------------
  14. ## P5 -> P4
  15. self.reduce_layer_1 = BasicConv(c5, round(512*cfg.width),
  16. kernel_size=1, padding=0, stride=1,
  17. act_type=cfg.fpn_act, norm_type=cfg.fpn_norm)
  18. self.top_down_layer_1 = CSPBlock(in_dim = c4 + round(512*cfg.width),
  19. out_dim = round(512*cfg.width),
  20. num_blocks = round(3*cfg.depth),
  21. expansion = 0.5,
  22. shortcut = False,
  23. act_type = cfg.fpn_act,
  24. norm_type = cfg.fpn_norm,
  25. depthwise = cfg.fpn_depthwise)
  26. ## P4 -> P3
  27. self.reduce_layer_2 = BasicConv(round(512*cfg.width), round(256*cfg.width),
  28. kernel_size=1, padding=0, stride=1,
  29. act_type=cfg.fpn_act, norm_type=cfg.fpn_norm)
  30. self.top_down_layer_2 = CSPBlock(in_dim = c3 + round(256*cfg.width),
  31. out_dim = round(256*cfg.width),
  32. num_blocks = round(3*cfg.depth),
  33. expansion = 0.5,
  34. shortcut = False,
  35. act_type = cfg.fpn_act,
  36. norm_type = cfg.fpn_norm,
  37. depthwise = cfg.fpn_depthwise)
  38. # ---------------------- Yolox's Bottom up PAN ----------------------
  39. ## P3 -> P4
  40. self.downsample_layer_1 = BasicConv(round(256*cfg.width), round(256*cfg.width),
  41. kernel_size=3, padding=1, stride=2,
  42. act_type=cfg.fpn_act, norm_type=cfg.fpn_norm, depthwise=cfg.fpn_depthwise)
  43. self.bottom_up_layer_1 = CSPBlock(in_dim = round(256*cfg.width) + round(256*cfg.width),
  44. out_dim = round(512*cfg.width),
  45. num_blocks = round(3*cfg.depth),
  46. expansion = 0.5,
  47. shortcut = False,
  48. act_type = cfg.fpn_act,
  49. norm_type = cfg.fpn_norm,
  50. depthwise = cfg.fpn_depthwise)
  51. ## P4 -> P5
  52. self.downsample_layer_2 = BasicConv(round(512*cfg.width), round(512*cfg.width),
  53. kernel_size=3, padding=1, stride=2,
  54. act_type=cfg.fpn_act, norm_type=cfg.fpn_norm, depthwise=cfg.fpn_depthwise)
  55. self.bottom_up_layer_2 = CSPBlock(in_dim = round(512*cfg.width) + round(512*cfg.width),
  56. out_dim = round(1024*cfg.width),
  57. num_blocks = round(3*cfg.depth),
  58. expansion = 0.5,
  59. shortcut = False,
  60. act_type = cfg.fpn_act,
  61. norm_type = cfg.fpn_norm,
  62. depthwise = cfg.fpn_depthwise)
  63. # ---------------------- Yolox's output projection ----------------------
  64. self.out_layers = nn.ModuleList([
  65. BasicConv(in_dim, round(cfg.head_dim*cfg.width), kernel_size=1,
  66. act_type=cfg.fpn_act, norm_type=cfg.fpn_norm)
  67. for in_dim in [round(256*cfg.width), round(512*cfg.width), round(1024*cfg.width)]
  68. ])
  69. self.out_dims = [round(cfg.head_dim*cfg.width)] * 3
  70. def forward(self, features):
  71. c3, c4, c5 = features
  72. # ------------------ Top down FPN ------------------
  73. ## P5 -> P4
  74. p5 = self.reduce_layer_1(c5)
  75. p5_up = F.interpolate(p5, scale_factor=2.0)
  76. p4 = self.top_down_layer_1(torch.cat([c4, p5_up], dim=1))
  77. ## P4 -> P3
  78. p4 = self.reduce_layer_2(p4)
  79. p4_up = F.interpolate(p4, scale_factor=2.0)
  80. p3 = self.top_down_layer_2(torch.cat([c3, p4_up], dim=1))
  81. # ------------------ Bottom up PAN ------------------
  82. ## P3 -> P4
  83. p3_ds = self.downsample_layer_1(p3)
  84. p4 = self.bottom_up_layer_1(torch.cat([p4, p3_ds], dim=1))
  85. ## P4 -> P5
  86. p4_ds = self.downsample_layer_2(p4)
  87. p5 = self.bottom_up_layer_2(torch.cat([p5, p4_ds], dim=1))
  88. out_feats = [p3, p4, p5]
  89. # output proj layers
  90. out_feats_proj = []
  91. for feat, layer in zip(out_feats, self.out_layers):
  92. out_feats_proj.append(layer(feat))
  93. return out_feats_proj