ctrnet_encoder.py 6.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180
  1. import torch
  2. import torch.nn as nn
  3. try:
  4. from .ctrnet_basic import Conv, RTCBlock
  5. except:
  6. from ctrnet_basic import Conv, RTCBlock
  7. # MIM-pretrained weights
  8. model_urls = {
  9. "rtcnet_n": None,
  10. "rtcnet_t": None,
  11. "rtcnet_s": None,
  12. "rtcnet_m": None,
  13. "rtcnet_l": None,
  14. "rtcnet_x": None,
  15. }
  16. # ---------------------------- Basic functions ----------------------------
  17. ## Real-time Convolutional Backbone
  18. class CTREncoder(nn.Module):
  19. def __init__(self, width=1.0, depth=1.0, ratio=1.0, act_type='silu', norm_type='BN', depthwise=False):
  20. super(CTREncoder, self).__init__()
  21. # ---------------- Basic parameters ----------------
  22. self.width_factor = width
  23. self.depth_factor = depth
  24. self.last_stage_factor = ratio
  25. self.feat_dims = [round(64 * width), round(128 * width), round(256 * width), round(512 * width), round(512 * width * ratio)]
  26. # ---------------- Network parameters ----------------
  27. ## P1/2
  28. self.layer_1 = Conv(3, self.feat_dims[0], k=3, p=1, s=2, act_type=act_type, norm_type=norm_type)
  29. ## P2/4
  30. self.layer_2 = nn.Sequential(
  31. Conv(self.feat_dims[0], self.feat_dims[1], k=3, p=1, s=2, act_type=act_type, norm_type=norm_type),
  32. RTCBlock(in_dim = self.feat_dims[1],
  33. out_dim = self.feat_dims[1],
  34. num_blocks = round(3*depth),
  35. shortcut = True,
  36. act_type = act_type,
  37. norm_type = norm_type,
  38. depthwise = depthwise)
  39. )
  40. ## P3/8
  41. self.layer_3 = nn.Sequential(
  42. Conv(self.feat_dims[1], self.feat_dims[2], k=3, p=1, s=2, act_type=act_type, norm_type=norm_type),
  43. RTCBlock(in_dim = self.feat_dims[2],
  44. out_dim = self.feat_dims[2],
  45. num_blocks = round(6*depth),
  46. shortcut = True,
  47. act_type = act_type,
  48. norm_type = norm_type,
  49. depthwise = depthwise)
  50. )
  51. ## P4/16
  52. self.layer_4 = nn.Sequential(
  53. Conv(self.feat_dims[2], self.feat_dims[3], k=3, p=1, s=2, act_type=act_type, norm_type=norm_type),
  54. RTCBlock(in_dim = self.feat_dims[3],
  55. out_dim = self.feat_dims[3],
  56. num_blocks = round(6*depth),
  57. shortcut = True,
  58. act_type = act_type,
  59. norm_type = norm_type,
  60. depthwise = depthwise)
  61. )
  62. ## P5/32
  63. self.layer_5 = nn.Sequential(
  64. Conv(self.feat_dims[3], self.feat_dims[4], k=3, p=1, s=2, act_type=act_type, norm_type=norm_type),
  65. RTCBlock(in_dim = self.feat_dims[4],
  66. out_dim = self.feat_dims[4],
  67. num_blocks = round(3*depth),
  68. shortcut = True,
  69. act_type = act_type,
  70. norm_type = norm_type,
  71. depthwise = depthwise)
  72. )
  73. def forward(self, x):
  74. c1 = self.layer_1(x)
  75. c2 = self.layer_2(c1)
  76. c3 = self.layer_3(c2)
  77. c4 = self.layer_4(c3)
  78. c5 = self.layer_5(c4)
  79. outputs = [c3, c4, c5]
  80. return outputs
  81. # ---------------------------- Functions ----------------------------
  82. ## build Backbone
  83. def build_encoder(cfg, pretrained=False):
  84. # build backbone model
  85. backbone = CTREncoder(width=cfg['width'],
  86. depth=cfg['depth'],
  87. ratio=cfg['ratio'],
  88. act_type=cfg['bk_act'],
  89. norm_type=cfg['bk_norm'],
  90. depthwise=cfg['bk_depthwise']
  91. )
  92. feat_dims = backbone.feat_dims[-3:]
  93. # load pretrained weight
  94. if pretrained:
  95. backbone = load_pretrained_weight(backbone)
  96. return backbone, feat_dims
  97. ## load pretrained weight
  98. def load_pretrained_weight(model):
  99. # Model name
  100. width, depth, ratio = model.width_factor, model.depth_factor, model.last_stage_factor
  101. if width == 0.25 and depth == 0.34 and ratio == 2.0:
  102. model_name = "rtcnet_n"
  103. elif width == 0.375 and depth == 0.34 and ratio == 2.0:
  104. model_name = "rtcnet_t"
  105. elif width == 0.50 and depth == 0.34 and ratio == 2.0:
  106. model_name = "rtcnet_s"
  107. elif width == 0.75 and depth == 0.67 and ratio == 1.5:
  108. model_name = "rtcnet_m"
  109. elif width == 1.0 and depth == 1.0 and ratio == 1.0:
  110. model_name = "rtcnet_l"
  111. elif width == 1.25 and depth == 1.34 and ratio == 1.0:
  112. model_name = "rtcnet_x"
  113. # Load pretrained weight
  114. url = model_urls[model_name]
  115. if url is not None:
  116. print('Loading pretrained weight ...')
  117. checkpoint = torch.hub.load_state_dict_from_url(
  118. url=url, map_location="cpu", check_hash=True)
  119. # checkpoint state dict
  120. checkpoint_state_dict = checkpoint.pop("model")
  121. # model state dict
  122. model_state_dict = model.state_dict()
  123. # check
  124. for k in list(checkpoint_state_dict.keys()):
  125. if k in model_state_dict:
  126. shape_model = tuple(model_state_dict[k].shape)
  127. shape_checkpoint = tuple(checkpoint_state_dict[k].shape)
  128. if shape_model != shape_checkpoint:
  129. checkpoint_state_dict.pop(k)
  130. else:
  131. checkpoint_state_dict.pop(k)
  132. print(k)
  133. # load the weight
  134. model.load_state_dict(checkpoint_state_dict)
  135. else:
  136. print('No backbone pretrained for {}.'.format(model_name))
  137. return model
  138. if __name__ == '__main__':
  139. import time
  140. from thop import profile
  141. cfg = {
  142. 'bk_act': 'silu',
  143. 'bk_norm': 'BN',
  144. 'bk_depthwise': False,
  145. 'width': 1.0,
  146. 'depth': 1.0,
  147. 'ratio': 1.0,
  148. }
  149. model, feats = build_encoder(cfg)
  150. x = torch.randn(1, 3, 640, 640)
  151. t0 = time.time()
  152. outputs = model(x)
  153. t1 = time.time()
  154. print('Time: ', t1 - t0)
  155. for out in outputs:
  156. print(out.shape)
  157. x = torch.randn(1, 3, 640, 640)
  158. print('==============================')
  159. flops, params = profile(model, inputs=(x, ), verbose=False)
  160. print('==============================')
  161. print('GFLOPs : {:.2f}'.format(flops / 1e9 * 2))
  162. print('Params : {:.2f} M'.format(params / 1e6))