yolov3_fpn.py 3.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475
  1. from typing import List
  2. import torch
  3. import torch.nn as nn
  4. import torch.nn.functional as F
  5. from .yolov3_basic import BasicConv, ResBlock
  6. # Yolov3FPN
  7. class Yolov3FPN(nn.Module):
  8. def __init__(self, cfg, in_dims: List = [256, 512, 1024],
  9. ):
  10. super(Yolov3FPN, self).__init__()
  11. self.in_dims = in_dims
  12. c3, c4, c5 = in_dims
  13. # ---------------------- Yolov3's Top down FPN ----------------------
  14. ## P5 -> P4
  15. self.top_down_layer_1 = ResBlock(in_dim = c5,
  16. out_dim = round(512*cfg.width),
  17. num_blocks = round(3*cfg.depth),
  18. shortcut = False,
  19. act_type = cfg.fpn_act,
  20. norm_type = cfg.fpn_norm,
  21. depthwise = cfg.fpn_depthwise)
  22. self.reduce_layer_1 = BasicConv(round(512*cfg.width), round(256*cfg.width), kernel_size=1, act_type=cfg.fpn_act, norm_type=cfg.fpn_norm)
  23. ## P4 -> P3
  24. self.top_down_layer_2 = ResBlock(in_dim = c4 + round(256*cfg.width),
  25. out_dim = round(256*cfg.width),
  26. num_blocks = round(3*cfg.depth),
  27. shortcut = False,
  28. act_type = cfg.fpn_act,
  29. norm_type = cfg.fpn_norm,
  30. depthwise = cfg.fpn_depthwise)
  31. self.reduce_layer_2 = BasicConv(round(256*cfg.width), round(128*cfg.width), kernel_size=1, act_type=cfg.fpn_act, norm_type=cfg.fpn_norm)
  32. ## P3
  33. self.top_down_layer_3 = ResBlock(in_dim = c3 + round(128*cfg.width),
  34. out_dim = round(128*cfg.width),
  35. num_blocks = round(3*cfg.depth),
  36. shortcut = False,
  37. act_type = cfg.fpn_act,
  38. norm_type = cfg.fpn_norm,
  39. depthwise = cfg.fpn_depthwise)
  40. # ---------------------- Yolov3's output projection ----------------------
  41. self.out_layers = nn.ModuleList([
  42. BasicConv(in_dim, round(cfg.head_dim*cfg.width), kernel_size=1,
  43. act_type=cfg.fpn_act, norm_type=cfg.fpn_norm)
  44. for in_dim in [round(128*cfg.width), round(256*cfg.width), round(512*cfg.width)]
  45. ])
  46. self.out_dims = [round(cfg.head_dim*cfg.width)] * 3
  47. def forward(self, features):
  48. c3, c4, c5 = features
  49. # p5/32
  50. p5 = self.top_down_layer_1(c5)
  51. # p4/16
  52. p5_up = F.interpolate(self.reduce_layer_1(p5), scale_factor=2.0)
  53. p4 = self.top_down_layer_2(torch.cat([c4, p5_up], dim=1))
  54. # P3/8
  55. p4_up = F.interpolate(self.reduce_layer_2(p4), scale_factor=2.0)
  56. p3 = self.top_down_layer_3(torch.cat([c3, p4_up], dim=1))
  57. out_feats = [p3, p4, p5]
  58. # output proj layers
  59. out_feats_proj = []
  60. for feat, layer in zip(out_feats, self.out_layers):
  61. out_feats_proj.append(layer(feat))
  62. return out_feats_proj