yolov3_fpn.py 3.5 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879
  1. from typing import List
  2. import torch
  3. import torch.nn as nn
  4. import torch.nn.functional as F
  5. from .yolov3_basic import BasicConv, ResBlock
  6. # Yolov3FPN
  7. class Yolov3FPN(nn.Module):
  8. def __init__(self, cfg, in_dims: List = [256, 512, 1024],
  9. ):
  10. super(Yolov3FPN, self).__init__()
  11. self.in_dims = in_dims
  12. c3, c4, c5 = in_dims
  13. # ---------------------- Yolov3's Top down FPN ----------------------
  14. ## P5 -> P4
  15. self.top_down_layer_1 = ResBlock(in_dim = c5,
  16. out_dim = round(512*cfg.width),
  17. num_blocks = round(3*cfg.depth),
  18. expansion = 0.5,
  19. shortcut = False,
  20. act_type = cfg.fpn_act,
  21. norm_type = cfg.fpn_norm,
  22. depthwise = cfg.fpn_depthwise)
  23. self.reduce_layer_1 = BasicConv(round(512*cfg.width), round(256*cfg.width), kernel_size=1, act_type=cfg.fpn_act, norm_type=cfg.fpn_norm)
  24. ## P4 -> P3
  25. self.top_down_layer_2 = ResBlock(in_dim = c4 + round(256*cfg.width),
  26. out_dim = round(256*cfg.width),
  27. num_blocks = round(3*cfg.depth),
  28. expansion = 0.5,
  29. shortcut = False,
  30. act_type = cfg.fpn_act,
  31. norm_type = cfg.fpn_norm,
  32. depthwise = cfg.fpn_depthwise)
  33. self.reduce_layer_2 = BasicConv(round(256*cfg.width), round(128*cfg.width), kernel_size=1, act_type=cfg.fpn_act, norm_type=cfg.fpn_norm)
  34. ## P3
  35. self.top_down_layer_3 = ResBlock(in_dim = c3 + round(128*cfg.width),
  36. out_dim = round(128*cfg.width),
  37. num_blocks = round(3*cfg.depth),
  38. expansion = 0.5,
  39. shortcut = False,
  40. act_type = cfg.fpn_act,
  41. norm_type = cfg.fpn_norm,
  42. depthwise = cfg.fpn_depthwise)
  43. # ---------------------- Yolov3's output projection ----------------------
  44. self.out_layers = nn.ModuleList([
  45. BasicConv(in_dim, round(cfg.head_dim*cfg.width), kernel_size=1,
  46. act_type=cfg.fpn_act, norm_type=cfg.fpn_norm)
  47. for in_dim in [round(128*cfg.width), round(256*cfg.width), round(512*cfg.width)]
  48. ])
  49. self.out_dims = [round(cfg.head_dim*cfg.width)] * 3
  50. def forward(self, features):
  51. c3, c4, c5 = features
  52. # p5/32
  53. p5 = self.top_down_layer_1(c5)
  54. # p4/16
  55. p5_up = F.interpolate(self.reduce_layer_1(p5), scale_factor=2.0)
  56. p4 = self.top_down_layer_2(torch.cat([c4, p5_up], dim=1))
  57. # P3/8
  58. p4_up = F.interpolate(self.reduce_layer_2(p4), scale_factor=2.0)
  59. p3 = self.top_down_layer_3(torch.cat([c3, p4_up], dim=1))
  60. out_feats = [p3, p4, p5]
  61. # output proj layers
  62. out_feats_proj = []
  63. for feat, layer in zip(out_feats, self.out_layers):
  64. out_feats_proj.append(layer(feat))
  65. return out_feats_proj