lite_hrnet.py 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886
  1. # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. """
  15. This code is based on
  16. https://github.com/HRNet/Lite-HRNet/blob/hrnet/models/backbones/litehrnet.py
  17. """
  18. import paddle
  19. import paddle.nn as nn
  20. import paddle.nn.functional as F
  21. from numbers import Integral
  22. from paddle import ParamAttr
  23. from paddle.regularizer import L2Decay
  24. from paddle.nn.initializer import Normal, Constant
  25. from ppdet.core.workspace import register
  26. from ppdet.modeling.shape_spec import ShapeSpec
  27. from ppdet.modeling.ops import channel_shuffle
  28. from .. import layers as L
  29. __all__ = ['LiteHRNet']
  30. class ConvNormLayer(nn.Layer):
  31. def __init__(self,
  32. ch_in,
  33. ch_out,
  34. filter_size,
  35. stride=1,
  36. groups=1,
  37. norm_type=None,
  38. norm_groups=32,
  39. norm_decay=0.,
  40. freeze_norm=False,
  41. act=None):
  42. super(ConvNormLayer, self).__init__()
  43. self.act = act
  44. norm_lr = 0. if freeze_norm else 1.
  45. if norm_type is not None:
  46. assert norm_type in ['bn', 'sync_bn', 'gn'], \
  47. "norm_type should be one of ['bn', 'sync_bn', 'gn'], but got {}".format(norm_type)
  48. param_attr = ParamAttr(
  49. initializer=Constant(1.0),
  50. learning_rate=norm_lr,
  51. regularizer=L2Decay(norm_decay), )
  52. bias_attr = ParamAttr(
  53. learning_rate=norm_lr, regularizer=L2Decay(norm_decay))
  54. global_stats = True if freeze_norm else None
  55. if norm_type in ['bn', 'sync_bn']:
  56. self.norm = nn.BatchNorm2D(
  57. ch_out,
  58. weight_attr=param_attr,
  59. bias_attr=bias_attr,
  60. use_global_stats=global_stats, )
  61. elif norm_type == 'gn':
  62. self.norm = nn.GroupNorm(
  63. num_groups=norm_groups,
  64. num_channels=ch_out,
  65. weight_attr=param_attr,
  66. bias_attr=bias_attr)
  67. norm_params = self.norm.parameters()
  68. if freeze_norm:
  69. for param in norm_params:
  70. param.stop_gradient = True
  71. conv_bias_attr = False
  72. else:
  73. conv_bias_attr = True
  74. self.norm = None
  75. self.conv = nn.Conv2D(
  76. in_channels=ch_in,
  77. out_channels=ch_out,
  78. kernel_size=filter_size,
  79. stride=stride,
  80. padding=(filter_size - 1) // 2,
  81. groups=groups,
  82. weight_attr=ParamAttr(initializer=Normal(
  83. mean=0., std=0.001)),
  84. bias_attr=conv_bias_attr)
  85. def forward(self, inputs):
  86. out = self.conv(inputs)
  87. if self.norm is not None:
  88. out = self.norm(out)
  89. if self.act == 'relu':
  90. out = F.relu(out)
  91. elif self.act == 'sigmoid':
  92. out = F.sigmoid(out)
  93. return out
  94. class DepthWiseSeparableConvNormLayer(nn.Layer):
  95. def __init__(self,
  96. ch_in,
  97. ch_out,
  98. filter_size,
  99. stride=1,
  100. dw_norm_type=None,
  101. pw_norm_type=None,
  102. norm_decay=0.,
  103. freeze_norm=False,
  104. dw_act=None,
  105. pw_act=None):
  106. super(DepthWiseSeparableConvNormLayer, self).__init__()
  107. self.depthwise_conv = ConvNormLayer(
  108. ch_in=ch_in,
  109. ch_out=ch_in,
  110. filter_size=filter_size,
  111. stride=stride,
  112. groups=ch_in,
  113. norm_type=dw_norm_type,
  114. act=dw_act,
  115. norm_decay=norm_decay,
  116. freeze_norm=freeze_norm, )
  117. self.pointwise_conv = ConvNormLayer(
  118. ch_in=ch_in,
  119. ch_out=ch_out,
  120. filter_size=1,
  121. stride=1,
  122. norm_type=pw_norm_type,
  123. act=pw_act,
  124. norm_decay=norm_decay,
  125. freeze_norm=freeze_norm, )
  126. def forward(self, x):
  127. x = self.depthwise_conv(x)
  128. x = self.pointwise_conv(x)
  129. return x
  130. class CrossResolutionWeightingModule(nn.Layer):
  131. def __init__(self,
  132. channels,
  133. ratio=16,
  134. norm_type='bn',
  135. freeze_norm=False,
  136. norm_decay=0.):
  137. super(CrossResolutionWeightingModule, self).__init__()
  138. self.channels = channels
  139. total_channel = sum(channels)
  140. self.conv1 = ConvNormLayer(
  141. ch_in=total_channel,
  142. ch_out=total_channel // ratio,
  143. filter_size=1,
  144. stride=1,
  145. norm_type=norm_type,
  146. act='relu',
  147. freeze_norm=freeze_norm,
  148. norm_decay=norm_decay)
  149. self.conv2 = ConvNormLayer(
  150. ch_in=total_channel // ratio,
  151. ch_out=total_channel,
  152. filter_size=1,
  153. stride=1,
  154. norm_type=norm_type,
  155. act='sigmoid',
  156. freeze_norm=freeze_norm,
  157. norm_decay=norm_decay)
  158. def forward(self, x):
  159. mini_size = x[-1].shape[-2:]
  160. out = [F.adaptive_avg_pool2d(s, mini_size) for s in x[:-1]] + [x[-1]]
  161. out = paddle.concat(out, 1)
  162. out = self.conv1(out)
  163. out = self.conv2(out)
  164. out = paddle.split(out, self.channels, 1)
  165. out = [
  166. s * F.interpolate(
  167. a, s.shape[-2:], mode='nearest') for s, a in zip(x, out)
  168. ]
  169. return out
  170. class SpatialWeightingModule(nn.Layer):
  171. def __init__(self, in_channel, ratio=16, freeze_norm=False, norm_decay=0.):
  172. super(SpatialWeightingModule, self).__init__()
  173. self.global_avgpooling = nn.AdaptiveAvgPool2D(1)
  174. self.conv1 = ConvNormLayer(
  175. ch_in=in_channel,
  176. ch_out=in_channel // ratio,
  177. filter_size=1,
  178. stride=1,
  179. act='relu',
  180. freeze_norm=freeze_norm,
  181. norm_decay=norm_decay)
  182. self.conv2 = ConvNormLayer(
  183. ch_in=in_channel // ratio,
  184. ch_out=in_channel,
  185. filter_size=1,
  186. stride=1,
  187. act='sigmoid',
  188. freeze_norm=freeze_norm,
  189. norm_decay=norm_decay)
  190. def forward(self, x):
  191. out = self.global_avgpooling(x)
  192. out = self.conv1(out)
  193. out = self.conv2(out)
  194. return x * out
  195. class ConditionalChannelWeightingBlock(nn.Layer):
  196. def __init__(self,
  197. in_channels,
  198. stride,
  199. reduce_ratio,
  200. norm_type='bn',
  201. freeze_norm=False,
  202. norm_decay=0.):
  203. super(ConditionalChannelWeightingBlock, self).__init__()
  204. assert stride in [1, 2]
  205. branch_channels = [channel // 2 for channel in in_channels]
  206. self.cross_resolution_weighting = CrossResolutionWeightingModule(
  207. branch_channels,
  208. ratio=reduce_ratio,
  209. norm_type=norm_type,
  210. freeze_norm=freeze_norm,
  211. norm_decay=norm_decay)
  212. self.depthwise_convs = nn.LayerList([
  213. ConvNormLayer(
  214. channel,
  215. channel,
  216. filter_size=3,
  217. stride=stride,
  218. groups=channel,
  219. norm_type=norm_type,
  220. freeze_norm=freeze_norm,
  221. norm_decay=norm_decay) for channel in branch_channels
  222. ])
  223. self.spatial_weighting = nn.LayerList([
  224. SpatialWeightingModule(
  225. channel,
  226. ratio=4,
  227. freeze_norm=freeze_norm,
  228. norm_decay=norm_decay) for channel in branch_channels
  229. ])
  230. def forward(self, x):
  231. x = [s.chunk(2, axis=1) for s in x]
  232. x1 = [s[0] for s in x]
  233. x2 = [s[1] for s in x]
  234. x2 = self.cross_resolution_weighting(x2)
  235. x2 = [dw(s) for s, dw in zip(x2, self.depthwise_convs)]
  236. x2 = [sw(s) for s, sw in zip(x2, self.spatial_weighting)]
  237. out = [paddle.concat([s1, s2], axis=1) for s1, s2 in zip(x1, x2)]
  238. out = [channel_shuffle(s, groups=2) for s in out]
  239. return out
  240. class ShuffleUnit(nn.Layer):
  241. def __init__(self,
  242. in_channel,
  243. out_channel,
  244. stride,
  245. norm_type='bn',
  246. freeze_norm=False,
  247. norm_decay=0.):
  248. super(ShuffleUnit, self).__init__()
  249. branch_channel = out_channel // 2
  250. self.stride = stride
  251. if self.stride == 1:
  252. assert in_channel == branch_channel * 2, \
  253. "when stride=1, in_channel {} should equal to branch_channel*2 {}".format(in_channel, branch_channel * 2)
  254. if stride > 1:
  255. self.branch1 = nn.Sequential(
  256. ConvNormLayer(
  257. ch_in=in_channel,
  258. ch_out=in_channel,
  259. filter_size=3,
  260. stride=self.stride,
  261. groups=in_channel,
  262. norm_type=norm_type,
  263. freeze_norm=freeze_norm,
  264. norm_decay=norm_decay),
  265. ConvNormLayer(
  266. ch_in=in_channel,
  267. ch_out=branch_channel,
  268. filter_size=1,
  269. stride=1,
  270. norm_type=norm_type,
  271. act='relu',
  272. freeze_norm=freeze_norm,
  273. norm_decay=norm_decay), )
  274. self.branch2 = nn.Sequential(
  275. ConvNormLayer(
  276. ch_in=branch_channel if stride == 1 else in_channel,
  277. ch_out=branch_channel,
  278. filter_size=1,
  279. stride=1,
  280. norm_type=norm_type,
  281. act='relu',
  282. freeze_norm=freeze_norm,
  283. norm_decay=norm_decay),
  284. ConvNormLayer(
  285. ch_in=branch_channel,
  286. ch_out=branch_channel,
  287. filter_size=3,
  288. stride=self.stride,
  289. groups=branch_channel,
  290. norm_type=norm_type,
  291. freeze_norm=freeze_norm,
  292. norm_decay=norm_decay),
  293. ConvNormLayer(
  294. ch_in=branch_channel,
  295. ch_out=branch_channel,
  296. filter_size=1,
  297. stride=1,
  298. norm_type=norm_type,
  299. act='relu',
  300. freeze_norm=freeze_norm,
  301. norm_decay=norm_decay), )
  302. def forward(self, x):
  303. if self.stride > 1:
  304. x1 = self.branch1(x)
  305. x2 = self.branch2(x)
  306. else:
  307. x1, x2 = x.chunk(2, axis=1)
  308. x2 = self.branch2(x2)
  309. out = paddle.concat([x1, x2], axis=1)
  310. out = channel_shuffle(out, groups=2)
  311. return out
  312. class IterativeHead(nn.Layer):
  313. def __init__(self,
  314. in_channels,
  315. norm_type='bn',
  316. freeze_norm=False,
  317. norm_decay=0.):
  318. super(IterativeHead, self).__init__()
  319. num_branches = len(in_channels)
  320. self.in_channels = in_channels[::-1]
  321. projects = []
  322. for i in range(num_branches):
  323. if i != num_branches - 1:
  324. projects.append(
  325. DepthWiseSeparableConvNormLayer(
  326. ch_in=self.in_channels[i],
  327. ch_out=self.in_channels[i + 1],
  328. filter_size=3,
  329. stride=1,
  330. dw_act=None,
  331. pw_act='relu',
  332. dw_norm_type=norm_type,
  333. pw_norm_type=norm_type,
  334. freeze_norm=freeze_norm,
  335. norm_decay=norm_decay))
  336. else:
  337. projects.append(
  338. DepthWiseSeparableConvNormLayer(
  339. ch_in=self.in_channels[i],
  340. ch_out=self.in_channels[i],
  341. filter_size=3,
  342. stride=1,
  343. dw_act=None,
  344. pw_act='relu',
  345. dw_norm_type=norm_type,
  346. pw_norm_type=norm_type,
  347. freeze_norm=freeze_norm,
  348. norm_decay=norm_decay))
  349. self.projects = nn.LayerList(projects)
  350. def forward(self, x):
  351. x = x[::-1]
  352. y = []
  353. last_x = None
  354. for i, s in enumerate(x):
  355. if last_x is not None:
  356. last_x = F.interpolate(
  357. last_x,
  358. size=s.shape[-2:],
  359. mode='bilinear',
  360. align_corners=True)
  361. s = s + last_x
  362. s = self.projects[i](s)
  363. y.append(s)
  364. last_x = s
  365. return y[::-1]
  366. class Stem(nn.Layer):
  367. def __init__(self,
  368. in_channel,
  369. stem_channel,
  370. out_channel,
  371. expand_ratio,
  372. norm_type='bn',
  373. freeze_norm=False,
  374. norm_decay=0.):
  375. super(Stem, self).__init__()
  376. self.conv1 = ConvNormLayer(
  377. in_channel,
  378. stem_channel,
  379. filter_size=3,
  380. stride=2,
  381. norm_type=norm_type,
  382. act='relu',
  383. freeze_norm=freeze_norm,
  384. norm_decay=norm_decay)
  385. mid_channel = int(round(stem_channel * expand_ratio))
  386. branch_channel = stem_channel // 2
  387. if stem_channel == out_channel:
  388. inc_channel = out_channel - branch_channel
  389. else:
  390. inc_channel = out_channel - stem_channel
  391. self.branch1 = nn.Sequential(
  392. ConvNormLayer(
  393. ch_in=branch_channel,
  394. ch_out=branch_channel,
  395. filter_size=3,
  396. stride=2,
  397. groups=branch_channel,
  398. norm_type=norm_type,
  399. freeze_norm=freeze_norm,
  400. norm_decay=norm_decay),
  401. ConvNormLayer(
  402. ch_in=branch_channel,
  403. ch_out=inc_channel,
  404. filter_size=1,
  405. stride=1,
  406. norm_type=norm_type,
  407. act='relu',
  408. freeze_norm=freeze_norm,
  409. norm_decay=norm_decay), )
  410. self.expand_conv = ConvNormLayer(
  411. ch_in=branch_channel,
  412. ch_out=mid_channel,
  413. filter_size=1,
  414. stride=1,
  415. norm_type=norm_type,
  416. act='relu',
  417. freeze_norm=freeze_norm,
  418. norm_decay=norm_decay)
  419. self.depthwise_conv = ConvNormLayer(
  420. ch_in=mid_channel,
  421. ch_out=mid_channel,
  422. filter_size=3,
  423. stride=2,
  424. groups=mid_channel,
  425. norm_type=norm_type,
  426. freeze_norm=freeze_norm,
  427. norm_decay=norm_decay)
  428. self.linear_conv = ConvNormLayer(
  429. ch_in=mid_channel,
  430. ch_out=branch_channel
  431. if stem_channel == out_channel else stem_channel,
  432. filter_size=1,
  433. stride=1,
  434. norm_type=norm_type,
  435. act='relu',
  436. freeze_norm=freeze_norm,
  437. norm_decay=norm_decay)
  438. def forward(self, x):
  439. x = self.conv1(x)
  440. x1, x2 = x.chunk(2, axis=1)
  441. x1 = self.branch1(x1)
  442. x2 = self.expand_conv(x2)
  443. x2 = self.depthwise_conv(x2)
  444. x2 = self.linear_conv(x2)
  445. out = paddle.concat([x1, x2], axis=1)
  446. out = channel_shuffle(out, groups=2)
  447. return out
  448. class LiteHRNetModule(nn.Layer):
  449. def __init__(self,
  450. num_branches,
  451. num_blocks,
  452. in_channels,
  453. reduce_ratio,
  454. module_type,
  455. multiscale_output=False,
  456. with_fuse=True,
  457. norm_type='bn',
  458. freeze_norm=False,
  459. norm_decay=0.):
  460. super(LiteHRNetModule, self).__init__()
  461. assert num_branches == len(in_channels),\
  462. "num_branches {} should equal to num_in_channels {}".format(num_branches, len(in_channels))
  463. assert module_type in [
  464. 'LITE', 'NAIVE'
  465. ], "module_type should be one of ['LITE', 'NAIVE']"
  466. self.num_branches = num_branches
  467. self.in_channels = in_channels
  468. self.multiscale_output = multiscale_output
  469. self.with_fuse = with_fuse
  470. self.norm_type = 'bn'
  471. self.module_type = module_type
  472. if self.module_type == 'LITE':
  473. self.layers = self._make_weighting_blocks(
  474. num_blocks,
  475. reduce_ratio,
  476. freeze_norm=freeze_norm,
  477. norm_decay=norm_decay)
  478. elif self.module_type == 'NAIVE':
  479. self.layers = self._make_naive_branches(
  480. num_branches,
  481. num_blocks,
  482. freeze_norm=freeze_norm,
  483. norm_decay=norm_decay)
  484. if self.with_fuse:
  485. self.fuse_layers = self._make_fuse_layers(
  486. freeze_norm=freeze_norm, norm_decay=norm_decay)
  487. self.relu = nn.ReLU()
  488. def _make_weighting_blocks(self,
  489. num_blocks,
  490. reduce_ratio,
  491. stride=1,
  492. freeze_norm=False,
  493. norm_decay=0.):
  494. layers = []
  495. for i in range(num_blocks):
  496. layers.append(
  497. ConditionalChannelWeightingBlock(
  498. self.in_channels,
  499. stride=stride,
  500. reduce_ratio=reduce_ratio,
  501. norm_type=self.norm_type,
  502. freeze_norm=freeze_norm,
  503. norm_decay=norm_decay))
  504. return nn.Sequential(*layers)
  505. def _make_naive_branches(self,
  506. num_branches,
  507. num_blocks,
  508. freeze_norm=False,
  509. norm_decay=0.):
  510. branches = []
  511. for branch_idx in range(num_branches):
  512. layers = []
  513. for i in range(num_blocks):
  514. layers.append(
  515. ShuffleUnit(
  516. self.in_channels[branch_idx],
  517. self.in_channels[branch_idx],
  518. stride=1,
  519. norm_type=self.norm_type,
  520. freeze_norm=freeze_norm,
  521. norm_decay=norm_decay))
  522. branches.append(nn.Sequential(*layers))
  523. return nn.LayerList(branches)
  524. def _make_fuse_layers(self, freeze_norm=False, norm_decay=0.):
  525. if self.num_branches == 1:
  526. return None
  527. fuse_layers = []
  528. num_out_branches = self.num_branches if self.multiscale_output else 1
  529. for i in range(num_out_branches):
  530. fuse_layer = []
  531. for j in range(self.num_branches):
  532. if j > i:
  533. fuse_layer.append(
  534. nn.Sequential(
  535. L.Conv2d(
  536. self.in_channels[j],
  537. self.in_channels[i],
  538. kernel_size=1,
  539. stride=1,
  540. padding=0,
  541. bias=False, ),
  542. nn.BatchNorm2D(self.in_channels[i]),
  543. nn.Upsample(
  544. scale_factor=2**(j - i), mode='nearest')))
  545. elif j == i:
  546. fuse_layer.append(None)
  547. else:
  548. conv_downsamples = []
  549. for k in range(i - j):
  550. if k == i - j - 1:
  551. conv_downsamples.append(
  552. nn.Sequential(
  553. L.Conv2d(
  554. self.in_channels[j],
  555. self.in_channels[j],
  556. kernel_size=3,
  557. stride=2,
  558. padding=1,
  559. groups=self.in_channels[j],
  560. bias=False, ),
  561. nn.BatchNorm2D(self.in_channels[j]),
  562. L.Conv2d(
  563. self.in_channels[j],
  564. self.in_channels[i],
  565. kernel_size=1,
  566. stride=1,
  567. padding=0,
  568. bias=False, ),
  569. nn.BatchNorm2D(self.in_channels[i])))
  570. else:
  571. conv_downsamples.append(
  572. nn.Sequential(
  573. L.Conv2d(
  574. self.in_channels[j],
  575. self.in_channels[j],
  576. kernel_size=3,
  577. stride=2,
  578. padding=1,
  579. groups=self.in_channels[j],
  580. bias=False, ),
  581. nn.BatchNorm2D(self.in_channels[j]),
  582. L.Conv2d(
  583. self.in_channels[j],
  584. self.in_channels[j],
  585. kernel_size=1,
  586. stride=1,
  587. padding=0,
  588. bias=False, ),
  589. nn.BatchNorm2D(self.in_channels[j]),
  590. nn.ReLU()))
  591. fuse_layer.append(nn.Sequential(*conv_downsamples))
  592. fuse_layers.append(nn.LayerList(fuse_layer))
  593. return nn.LayerList(fuse_layers)
  594. def forward(self, x):
  595. if self.num_branches == 1:
  596. return [self.layers[0](x[0])]
  597. if self.module_type == 'LITE':
  598. out = self.layers(x)
  599. elif self.module_type == 'NAIVE':
  600. for i in range(self.num_branches):
  601. x[i] = self.layers[i](x[i])
  602. out = x
  603. if self.with_fuse:
  604. out_fuse = []
  605. for i in range(len(self.fuse_layers)):
  606. y = out[0] if i == 0 else self.fuse_layers[i][0](out[0])
  607. for j in range(self.num_branches):
  608. if j == 0:
  609. y += y
  610. elif i == j:
  611. y += out[j]
  612. else:
  613. y += self.fuse_layers[i][j](out[j])
  614. if i == 0:
  615. out[i] = y
  616. out_fuse.append(self.relu(y))
  617. out = out_fuse
  618. elif not self.multiscale_output:
  619. out = [out[0]]
  620. return out
  621. @register
  622. class LiteHRNet(nn.Layer):
  623. """
  624. @inproceedings{Yulitehrnet21,
  625. title={Lite-HRNet: A Lightweight High-Resolution Network},
  626. author={Yu, Changqian and Xiao, Bin and Gao, Changxin and Yuan, Lu and Zhang, Lei and Sang, Nong and Wang, Jingdong},
  627. booktitle={CVPR},year={2021}
  628. }
  629. Args:
  630. network_type (str): the network_type should be one of ["lite_18", "lite_30", "naive", "wider_naive"],
  631. "naive": Simply combining the shuffle block in ShuffleNet and the highresolution design pattern in HRNet.
  632. "wider_naive": Naive network with wider channels in each block.
  633. "lite_18": Lite-HRNet-18, which replaces the pointwise convolution in a shuffle block by conditional channel weighting.
  634. "lite_30": Lite-HRNet-30, with more blocks compared with Lite-HRNet-18.
  635. freeze_at (int): the stage to freeze
  636. freeze_norm (bool): whether to freeze norm in HRNet
  637. norm_decay (float): weight decay for normalization layer weights
  638. return_idx (List): the stage to return
  639. """
  640. def __init__(self,
  641. network_type,
  642. freeze_at=0,
  643. freeze_norm=True,
  644. norm_decay=0.,
  645. return_idx=[0, 1, 2, 3]):
  646. super(LiteHRNet, self).__init__()
  647. if isinstance(return_idx, Integral):
  648. return_idx = [return_idx]
  649. assert network_type in ["lite_18", "lite_30", "naive", "wider_naive"], \
  650. "the network_type should be one of [lite_18, lite_30, naive, wider_naive]"
  651. assert len(return_idx) > 0, "need one or more return index"
  652. self.freeze_at = freeze_at
  653. self.freeze_norm = freeze_norm
  654. self.norm_decay = norm_decay
  655. self.return_idx = return_idx
  656. self.norm_type = 'bn'
  657. self.module_configs = {
  658. "lite_18": {
  659. "num_modules": [2, 4, 2],
  660. "num_branches": [2, 3, 4],
  661. "num_blocks": [2, 2, 2],
  662. "module_type": ["LITE", "LITE", "LITE"],
  663. "reduce_ratios": [8, 8, 8],
  664. "num_channels": [[40, 80], [40, 80, 160], [40, 80, 160, 320]],
  665. },
  666. "lite_30": {
  667. "num_modules": [3, 8, 3],
  668. "num_branches": [2, 3, 4],
  669. "num_blocks": [2, 2, 2],
  670. "module_type": ["LITE", "LITE", "LITE"],
  671. "reduce_ratios": [8, 8, 8],
  672. "num_channels": [[40, 80], [40, 80, 160], [40, 80, 160, 320]],
  673. },
  674. "naive": {
  675. "num_modules": [2, 4, 2],
  676. "num_branches": [2, 3, 4],
  677. "num_blocks": [2, 2, 2],
  678. "module_type": ["NAIVE", "NAIVE", "NAIVE"],
  679. "reduce_ratios": [1, 1, 1],
  680. "num_channels": [[30, 60], [30, 60, 120], [30, 60, 120, 240]],
  681. },
  682. "wider_naive": {
  683. "num_modules": [2, 4, 2],
  684. "num_branches": [2, 3, 4],
  685. "num_blocks": [2, 2, 2],
  686. "module_type": ["NAIVE", "NAIVE", "NAIVE"],
  687. "reduce_ratios": [1, 1, 1],
  688. "num_channels": [[40, 80], [40, 80, 160], [40, 80, 160, 320]],
  689. },
  690. }
  691. self.stages_config = self.module_configs[network_type]
  692. self.stem = Stem(3, 32, 32, 1)
  693. num_channels_pre_layer = [32]
  694. for stage_idx in range(3):
  695. num_channels = self.stages_config["num_channels"][stage_idx]
  696. setattr(self, 'transition{}'.format(stage_idx),
  697. self._make_transition_layer(num_channels_pre_layer,
  698. num_channels, self.freeze_norm,
  699. self.norm_decay))
  700. stage, num_channels_pre_layer = self._make_stage(
  701. self.stages_config, stage_idx, num_channels, True,
  702. self.freeze_norm, self.norm_decay)
  703. setattr(self, 'stage{}'.format(stage_idx), stage)
  704. self.head_layer = IterativeHead(num_channels_pre_layer, 'bn',
  705. self.freeze_norm, self.norm_decay)
  706. def _make_transition_layer(self,
  707. num_channels_pre_layer,
  708. num_channels_cur_layer,
  709. freeze_norm=False,
  710. norm_decay=0.):
  711. num_branches_pre = len(num_channels_pre_layer)
  712. num_branches_cur = len(num_channels_cur_layer)
  713. transition_layers = []
  714. for i in range(num_branches_cur):
  715. if i < num_branches_pre:
  716. if num_channels_cur_layer[i] != num_channels_pre_layer[i]:
  717. transition_layers.append(
  718. nn.Sequential(
  719. L.Conv2d(
  720. num_channels_pre_layer[i],
  721. num_channels_pre_layer[i],
  722. kernel_size=3,
  723. stride=1,
  724. padding=1,
  725. groups=num_channels_pre_layer[i],
  726. bias=False),
  727. nn.BatchNorm2D(num_channels_pre_layer[i]),
  728. L.Conv2d(
  729. num_channels_pre_layer[i],
  730. num_channels_cur_layer[i],
  731. kernel_size=1,
  732. stride=1,
  733. padding=0,
  734. bias=False, ),
  735. nn.BatchNorm2D(num_channels_cur_layer[i]),
  736. nn.ReLU()))
  737. else:
  738. transition_layers.append(None)
  739. else:
  740. conv_downsamples = []
  741. for j in range(i + 1 - num_branches_pre):
  742. conv_downsamples.append(
  743. nn.Sequential(
  744. L.Conv2d(
  745. num_channels_pre_layer[-1],
  746. num_channels_pre_layer[-1],
  747. groups=num_channels_pre_layer[-1],
  748. kernel_size=3,
  749. stride=2,
  750. padding=1,
  751. bias=False, ),
  752. nn.BatchNorm2D(num_channels_pre_layer[-1]),
  753. L.Conv2d(
  754. num_channels_pre_layer[-1],
  755. num_channels_cur_layer[i]
  756. if j == i - num_branches_pre else
  757. num_channels_pre_layer[-1],
  758. kernel_size=1,
  759. stride=1,
  760. padding=0,
  761. bias=False, ),
  762. nn.BatchNorm2D(num_channels_cur_layer[i]
  763. if j == i - num_branches_pre else
  764. num_channels_pre_layer[-1]),
  765. nn.ReLU()))
  766. transition_layers.append(nn.Sequential(*conv_downsamples))
  767. return nn.LayerList(transition_layers)
  768. def _make_stage(self,
  769. stages_config,
  770. stage_idx,
  771. in_channels,
  772. multiscale_output,
  773. freeze_norm=False,
  774. norm_decay=0.):
  775. num_modules = stages_config["num_modules"][stage_idx]
  776. num_branches = stages_config["num_branches"][stage_idx]
  777. num_blocks = stages_config["num_blocks"][stage_idx]
  778. reduce_ratio = stages_config['reduce_ratios'][stage_idx]
  779. module_type = stages_config['module_type'][stage_idx]
  780. modules = []
  781. for i in range(num_modules):
  782. if not multiscale_output and i == num_modules - 1:
  783. reset_multiscale_output = False
  784. else:
  785. reset_multiscale_output = True
  786. modules.append(
  787. LiteHRNetModule(
  788. num_branches,
  789. num_blocks,
  790. in_channels,
  791. reduce_ratio,
  792. module_type,
  793. multiscale_output=reset_multiscale_output,
  794. with_fuse=True,
  795. freeze_norm=freeze_norm,
  796. norm_decay=norm_decay))
  797. in_channels = modules[-1].in_channels
  798. return nn.Sequential(*modules), in_channels
  799. def forward(self, inputs):
  800. x = inputs['image']
  801. x = self.stem(x)
  802. y_list = [x]
  803. for stage_idx in range(3):
  804. x_list = []
  805. transition = getattr(self, 'transition{}'.format(stage_idx))
  806. for j in range(self.stages_config["num_branches"][stage_idx]):
  807. if transition[j] is not None:
  808. if j >= len(y_list):
  809. x_list.append(transition[j](y_list[-1]))
  810. else:
  811. x_list.append(transition[j](y_list[j]))
  812. else:
  813. x_list.append(y_list[j])
  814. y_list = getattr(self, 'stage{}'.format(stage_idx))(x_list)
  815. x = self.head_layer(y_list)
  816. res = []
  817. for i, layer in enumerate(x):
  818. if i == self.freeze_at:
  819. layer.stop_gradient = True
  820. if i in self.return_idx:
  821. res.append(layer)
  822. return res
  823. @property
  824. def out_shape(self):
  825. return [
  826. ShapeSpec(
  827. channels=self._out_channels[i], stride=self._out_strides[i])
  828. for i in self.return_idx
  829. ]