hardnet.py 6.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224
  1. # copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. import paddle
  15. import paddle.nn as nn
  16. from ppdet.core.workspace import register
  17. from ..shape_spec import ShapeSpec
  18. __all__ = ['HarDNet']
  19. def ConvLayer(in_channels,
  20. out_channels,
  21. kernel_size=3,
  22. stride=1,
  23. bias_attr=False):
  24. layer = nn.Sequential(
  25. ('conv', nn.Conv2D(
  26. in_channels,
  27. out_channels,
  28. kernel_size=kernel_size,
  29. stride=stride,
  30. padding=kernel_size // 2,
  31. groups=1,
  32. bias_attr=bias_attr)), ('norm', nn.BatchNorm2D(out_channels)),
  33. ('relu', nn.ReLU6()))
  34. return layer
  35. def DWConvLayer(in_channels,
  36. out_channels,
  37. kernel_size=3,
  38. stride=1,
  39. bias_attr=False):
  40. layer = nn.Sequential(
  41. ('dwconv', nn.Conv2D(
  42. in_channels,
  43. out_channels,
  44. kernel_size=kernel_size,
  45. stride=stride,
  46. padding=1,
  47. groups=out_channels,
  48. bias_attr=bias_attr)), ('norm', nn.BatchNorm2D(out_channels)))
  49. return layer
  50. def CombConvLayer(in_channels, out_channels, kernel_size=1, stride=1):
  51. layer = nn.Sequential(
  52. ('layer1', ConvLayer(
  53. in_channels, out_channels, kernel_size=kernel_size)),
  54. ('layer2', DWConvLayer(
  55. out_channels, out_channels, stride=stride)))
  56. return layer
  57. class HarDBlock(nn.Layer):
  58. def __init__(self,
  59. in_channels,
  60. growth_rate,
  61. grmul,
  62. n_layers,
  63. keepBase=False,
  64. residual_out=False,
  65. dwconv=False):
  66. super().__init__()
  67. self.keepBase = keepBase
  68. self.links = []
  69. layers_ = []
  70. self.out_channels = 0
  71. for i in range(n_layers):
  72. outch, inch, link = self.get_link(i + 1, in_channels, growth_rate,
  73. grmul)
  74. self.links.append(link)
  75. if dwconv:
  76. layers_.append(CombConvLayer(inch, outch))
  77. else:
  78. layers_.append(ConvLayer(inch, outch))
  79. if (i % 2 == 0) or (i == n_layers - 1):
  80. self.out_channels += outch
  81. self.layers = nn.LayerList(layers_)
  82. def get_out_ch(self):
  83. return self.out_channels
  84. def get_link(self, layer, base_ch, growth_rate, grmul):
  85. if layer == 0:
  86. return base_ch, 0, []
  87. out_channels = growth_rate
  88. link = []
  89. for i in range(10):
  90. dv = 2**i
  91. if layer % dv == 0:
  92. k = layer - dv
  93. link.append(k)
  94. if i > 0:
  95. out_channels *= grmul
  96. out_channels = int(int(out_channels + 1) / 2) * 2
  97. in_channels = 0
  98. for i in link:
  99. ch, _, _ = self.get_link(i, base_ch, growth_rate, grmul)
  100. in_channels += ch
  101. return out_channels, in_channels, link
  102. def forward(self, x):
  103. layers_ = [x]
  104. for layer in range(len(self.layers)):
  105. link = self.links[layer]
  106. tin = []
  107. for i in link:
  108. tin.append(layers_[i])
  109. if len(tin) > 1:
  110. x = paddle.concat(tin, 1)
  111. else:
  112. x = tin[0]
  113. out = self.layers[layer](x)
  114. layers_.append(out)
  115. t = len(layers_)
  116. out_ = []
  117. for i in range(t):
  118. if (i == 0 and self.keepBase) or (i == t - 1) or (i % 2 == 1):
  119. out_.append(layers_[i])
  120. out = paddle.concat(out_, 1)
  121. return out
  122. @register
  123. class HarDNet(nn.Layer):
  124. def __init__(self, depth_wise=False, return_idx=[1, 3, 8, 13], arch=85):
  125. super(HarDNet, self).__init__()
  126. assert arch in [39, 68, 85], "HarDNet-{} not support.".format(arch)
  127. if arch == 85:
  128. first_ch = [48, 96]
  129. second_kernel = 3
  130. ch_list = [192, 256, 320, 480, 720]
  131. grmul = 1.7
  132. gr = [24, 24, 28, 36, 48]
  133. n_layers = [8, 16, 16, 16, 16]
  134. elif arch == 68:
  135. first_ch = [32, 64]
  136. second_kernel = 3
  137. ch_list = [128, 256, 320, 640]
  138. grmul = 1.7
  139. gr = [14, 16, 20, 40]
  140. n_layers = [8, 16, 16, 16]
  141. self.return_idx = return_idx
  142. self._out_channels = [96, 214, 458, 784]
  143. avg_pool = True
  144. if depth_wise:
  145. second_kernel = 1
  146. avg_pool = False
  147. blks = len(n_layers)
  148. self.base = nn.LayerList([])
  149. # First Layer: Standard Conv3x3, Stride=2
  150. self.base.append(
  151. ConvLayer(
  152. in_channels=3,
  153. out_channels=first_ch[0],
  154. kernel_size=3,
  155. stride=2,
  156. bias_attr=False))
  157. # Second Layer
  158. self.base.append(
  159. ConvLayer(
  160. first_ch[0], first_ch[1], kernel_size=second_kernel))
  161. # Avgpooling or DWConv3x3 downsampling
  162. if avg_pool:
  163. self.base.append(nn.AvgPool2D(kernel_size=3, stride=2, padding=1))
  164. else:
  165. self.base.append(DWConvLayer(first_ch[1], first_ch[1], stride=2))
  166. # Build all HarDNet blocks
  167. ch = first_ch[1]
  168. for i in range(blks):
  169. blk = HarDBlock(ch, gr[i], grmul, n_layers[i], dwconv=depth_wise)
  170. ch = blk.out_channels
  171. self.base.append(blk)
  172. if i != blks - 1:
  173. self.base.append(ConvLayer(ch, ch_list[i], kernel_size=1))
  174. ch = ch_list[i]
  175. if i == 0:
  176. self.base.append(
  177. nn.AvgPool2D(
  178. kernel_size=2, stride=2, ceil_mode=True))
  179. elif i != blks - 1 and i != 1 and i != 3:
  180. self.base.append(nn.AvgPool2D(kernel_size=2, stride=2))
  181. def forward(self, inputs):
  182. x = inputs['image']
  183. outs = []
  184. for i, layer in enumerate(self.base):
  185. x = layer(x)
  186. if i in self.return_idx:
  187. outs.append(x)
  188. return outs
  189. @property
  190. def out_shape(self):
  191. return [ShapeSpec(channels=self._out_channels[i]) for i in range(4)]