layers.py 54 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446
  1. # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. import math
  15. import six
  16. import numpy as np
  17. from numbers import Integral
  18. import paddle
  19. import paddle.nn as nn
  20. from paddle import ParamAttr
  21. from paddle import to_tensor
  22. import paddle.nn.functional as F
  23. from paddle.nn.initializer import Normal, Constant, XavierUniform
  24. from paddle.regularizer import L2Decay
  25. from ppdet.core.workspace import register, serializable
  26. from ppdet.modeling.bbox_utils import delta2bbox
  27. from . import ops
  28. from .initializer import xavier_uniform_, constant_
  29. from paddle.vision.ops import DeformConv2D
  30. def _to_list(l):
  31. if isinstance(l, (list, tuple)):
  32. return list(l)
  33. return [l]
  34. class DeformableConvV2(nn.Layer):
  35. def __init__(self,
  36. in_channels,
  37. out_channels,
  38. kernel_size,
  39. stride=1,
  40. padding=0,
  41. dilation=1,
  42. groups=1,
  43. weight_attr=None,
  44. bias_attr=None,
  45. lr_scale=1,
  46. regularizer=None,
  47. skip_quant=False,
  48. dcn_bias_regularizer=L2Decay(0.),
  49. dcn_bias_lr_scale=2.):
  50. super(DeformableConvV2, self).__init__()
  51. self.offset_channel = 2 * kernel_size**2
  52. self.mask_channel = kernel_size**2
  53. if lr_scale == 1 and regularizer is None:
  54. offset_bias_attr = ParamAttr(initializer=Constant(0.))
  55. else:
  56. offset_bias_attr = ParamAttr(
  57. initializer=Constant(0.),
  58. learning_rate=lr_scale,
  59. regularizer=regularizer)
  60. self.conv_offset = nn.Conv2D(
  61. in_channels,
  62. 3 * kernel_size**2,
  63. kernel_size,
  64. stride=stride,
  65. padding=(kernel_size - 1) // 2,
  66. weight_attr=ParamAttr(initializer=Constant(0.0)),
  67. bias_attr=offset_bias_attr)
  68. if skip_quant:
  69. self.conv_offset.skip_quant = True
  70. if bias_attr:
  71. # in FCOS-DCN head, specifically need learning_rate and regularizer
  72. dcn_bias_attr = ParamAttr(
  73. initializer=Constant(value=0),
  74. regularizer=dcn_bias_regularizer,
  75. learning_rate=dcn_bias_lr_scale)
  76. else:
  77. # in ResNet backbone, do not need bias
  78. dcn_bias_attr = False
  79. self.conv_dcn = DeformConv2D(
  80. in_channels,
  81. out_channels,
  82. kernel_size,
  83. stride=stride,
  84. padding=(kernel_size - 1) // 2 * dilation,
  85. dilation=dilation,
  86. groups=groups,
  87. weight_attr=weight_attr,
  88. bias_attr=dcn_bias_attr)
  89. def forward(self, x):
  90. offset_mask = self.conv_offset(x)
  91. offset, mask = paddle.split(
  92. offset_mask,
  93. num_or_sections=[self.offset_channel, self.mask_channel],
  94. axis=1)
  95. mask = F.sigmoid(mask)
  96. y = self.conv_dcn(x, offset, mask=mask)
  97. return y
  98. class ConvNormLayer(nn.Layer):
  99. def __init__(self,
  100. ch_in,
  101. ch_out,
  102. filter_size,
  103. stride,
  104. groups=1,
  105. norm_type='bn',
  106. norm_decay=0.,
  107. norm_groups=32,
  108. use_dcn=False,
  109. bias_on=False,
  110. lr_scale=1.,
  111. freeze_norm=False,
  112. initializer=Normal(
  113. mean=0., std=0.01),
  114. skip_quant=False,
  115. dcn_lr_scale=2.,
  116. dcn_regularizer=L2Decay(0.)):
  117. super(ConvNormLayer, self).__init__()
  118. assert norm_type in ['bn', 'sync_bn', 'gn', None]
  119. if bias_on:
  120. bias_attr = ParamAttr(
  121. initializer=Constant(value=0.), learning_rate=lr_scale)
  122. else:
  123. bias_attr = False
  124. if not use_dcn:
  125. self.conv = nn.Conv2D(
  126. in_channels=ch_in,
  127. out_channels=ch_out,
  128. kernel_size=filter_size,
  129. stride=stride,
  130. padding=(filter_size - 1) // 2,
  131. groups=groups,
  132. weight_attr=ParamAttr(
  133. initializer=initializer, learning_rate=1.),
  134. bias_attr=bias_attr)
  135. if skip_quant:
  136. self.conv.skip_quant = True
  137. else:
  138. # in FCOS-DCN head, specifically need learning_rate and regularizer
  139. self.conv = DeformableConvV2(
  140. in_channels=ch_in,
  141. out_channels=ch_out,
  142. kernel_size=filter_size,
  143. stride=stride,
  144. padding=(filter_size - 1) // 2,
  145. groups=groups,
  146. weight_attr=ParamAttr(
  147. initializer=initializer, learning_rate=1.),
  148. bias_attr=True,
  149. lr_scale=dcn_lr_scale,
  150. regularizer=dcn_regularizer,
  151. dcn_bias_regularizer=dcn_regularizer,
  152. dcn_bias_lr_scale=dcn_lr_scale,
  153. skip_quant=skip_quant)
  154. norm_lr = 0. if freeze_norm else 1.
  155. param_attr = ParamAttr(
  156. learning_rate=norm_lr,
  157. regularizer=L2Decay(norm_decay) if norm_decay is not None else None)
  158. bias_attr = ParamAttr(
  159. learning_rate=norm_lr,
  160. regularizer=L2Decay(norm_decay) if norm_decay is not None else None)
  161. if norm_type in ['bn', 'sync_bn']:
  162. self.norm = nn.BatchNorm2D(
  163. ch_out, weight_attr=param_attr, bias_attr=bias_attr)
  164. elif norm_type == 'gn':
  165. self.norm = nn.GroupNorm(
  166. num_groups=norm_groups,
  167. num_channels=ch_out,
  168. weight_attr=param_attr,
  169. bias_attr=bias_attr)
  170. else:
  171. self.norm = None
  172. def forward(self, inputs):
  173. out = self.conv(inputs)
  174. if self.norm is not None:
  175. out = self.norm(out)
  176. return out
  177. class LiteConv(nn.Layer):
  178. def __init__(self,
  179. in_channels,
  180. out_channels,
  181. stride=1,
  182. with_act=True,
  183. norm_type='sync_bn',
  184. name=None):
  185. super(LiteConv, self).__init__()
  186. self.lite_conv = nn.Sequential()
  187. conv1 = ConvNormLayer(
  188. in_channels,
  189. in_channels,
  190. filter_size=5,
  191. stride=stride,
  192. groups=in_channels,
  193. norm_type=norm_type,
  194. initializer=XavierUniform())
  195. conv2 = ConvNormLayer(
  196. in_channels,
  197. out_channels,
  198. filter_size=1,
  199. stride=stride,
  200. norm_type=norm_type,
  201. initializer=XavierUniform())
  202. conv3 = ConvNormLayer(
  203. out_channels,
  204. out_channels,
  205. filter_size=1,
  206. stride=stride,
  207. norm_type=norm_type,
  208. initializer=XavierUniform())
  209. conv4 = ConvNormLayer(
  210. out_channels,
  211. out_channels,
  212. filter_size=5,
  213. stride=stride,
  214. groups=out_channels,
  215. norm_type=norm_type,
  216. initializer=XavierUniform())
  217. conv_list = [conv1, conv2, conv3, conv4]
  218. self.lite_conv.add_sublayer('conv1', conv1)
  219. self.lite_conv.add_sublayer('relu6_1', nn.ReLU6())
  220. self.lite_conv.add_sublayer('conv2', conv2)
  221. if with_act:
  222. self.lite_conv.add_sublayer('relu6_2', nn.ReLU6())
  223. self.lite_conv.add_sublayer('conv3', conv3)
  224. self.lite_conv.add_sublayer('relu6_3', nn.ReLU6())
  225. self.lite_conv.add_sublayer('conv4', conv4)
  226. if with_act:
  227. self.lite_conv.add_sublayer('relu6_4', nn.ReLU6())
  228. def forward(self, inputs):
  229. out = self.lite_conv(inputs)
  230. return out
  231. class DropBlock(nn.Layer):
  232. def __init__(self, block_size, keep_prob, name=None, data_format='NCHW'):
  233. """
  234. DropBlock layer, see https://arxiv.org/abs/1810.12890
  235. Args:
  236. block_size (int): block size
  237. keep_prob (int): keep probability
  238. name (str): layer name
  239. data_format (str): data format, NCHW or NHWC
  240. """
  241. super(DropBlock, self).__init__()
  242. self.block_size = block_size
  243. self.keep_prob = keep_prob
  244. self.name = name
  245. self.data_format = data_format
  246. def forward(self, x):
  247. if not self.training or self.keep_prob == 1:
  248. return x
  249. else:
  250. gamma = (1. - self.keep_prob) / (self.block_size**2)
  251. if self.data_format == 'NCHW':
  252. shape = x.shape[2:]
  253. else:
  254. shape = x.shape[1:3]
  255. for s in shape:
  256. gamma *= s / (s - self.block_size + 1)
  257. matrix = paddle.cast(paddle.rand(x.shape) < gamma, x.dtype)
  258. mask_inv = F.max_pool2d(
  259. matrix,
  260. self.block_size,
  261. stride=1,
  262. padding=self.block_size // 2,
  263. data_format=self.data_format)
  264. mask = 1. - mask_inv
  265. y = x * mask * (mask.numel() / mask.sum())
  266. return y
  267. @register
  268. @serializable
  269. class AnchorGeneratorSSD(object):
  270. def __init__(self,
  271. steps=[8, 16, 32, 64, 100, 300],
  272. aspect_ratios=[[2.], [2., 3.], [2., 3.], [2., 3.], [2.], [2.]],
  273. min_ratio=15,
  274. max_ratio=90,
  275. base_size=300,
  276. min_sizes=[30.0, 60.0, 111.0, 162.0, 213.0, 264.0],
  277. max_sizes=[60.0, 111.0, 162.0, 213.0, 264.0, 315.0],
  278. offset=0.5,
  279. flip=True,
  280. clip=False,
  281. min_max_aspect_ratios_order=False):
  282. self.steps = steps
  283. self.aspect_ratios = aspect_ratios
  284. self.min_ratio = min_ratio
  285. self.max_ratio = max_ratio
  286. self.base_size = base_size
  287. self.min_sizes = min_sizes
  288. self.max_sizes = max_sizes
  289. self.offset = offset
  290. self.flip = flip
  291. self.clip = clip
  292. self.min_max_aspect_ratios_order = min_max_aspect_ratios_order
  293. if self.min_sizes == [] and self.max_sizes == []:
  294. num_layer = len(aspect_ratios)
  295. step = int(
  296. math.floor(((self.max_ratio - self.min_ratio)) / (num_layer - 2
  297. )))
  298. for ratio in six.moves.range(self.min_ratio, self.max_ratio + 1,
  299. step):
  300. self.min_sizes.append(self.base_size * ratio / 100.)
  301. self.max_sizes.append(self.base_size * (ratio + step) / 100.)
  302. self.min_sizes = [self.base_size * .10] + self.min_sizes
  303. self.max_sizes = [self.base_size * .20] + self.max_sizes
  304. self.num_priors = []
  305. for aspect_ratio, min_size, max_size in zip(
  306. aspect_ratios, self.min_sizes, self.max_sizes):
  307. if isinstance(min_size, (list, tuple)):
  308. self.num_priors.append(
  309. len(_to_list(min_size)) + len(_to_list(max_size)))
  310. else:
  311. self.num_priors.append((len(aspect_ratio) * 2 + 1) * len(
  312. _to_list(min_size)) + len(_to_list(max_size)))
  313. def __call__(self, inputs, image):
  314. boxes = []
  315. for input, min_size, max_size, aspect_ratio, step in zip(
  316. inputs, self.min_sizes, self.max_sizes, self.aspect_ratios,
  317. self.steps):
  318. box, _ = ops.prior_box(
  319. input=input,
  320. image=image,
  321. min_sizes=_to_list(min_size),
  322. max_sizes=_to_list(max_size),
  323. aspect_ratios=aspect_ratio,
  324. flip=self.flip,
  325. clip=self.clip,
  326. steps=[step, step],
  327. offset=self.offset,
  328. min_max_aspect_ratios_order=self.min_max_aspect_ratios_order)
  329. boxes.append(paddle.reshape(box, [-1, 4]))
  330. return boxes
  331. @register
  332. @serializable
  333. class RCNNBox(object):
  334. __shared__ = ['num_classes', 'export_onnx']
  335. def __init__(self,
  336. prior_box_var=[10., 10., 5., 5.],
  337. code_type="decode_center_size",
  338. box_normalized=False,
  339. num_classes=80,
  340. export_onnx=False):
  341. super(RCNNBox, self).__init__()
  342. self.prior_box_var = prior_box_var
  343. self.code_type = code_type
  344. self.box_normalized = box_normalized
  345. self.num_classes = num_classes
  346. self.export_onnx = export_onnx
  347. def __call__(self, bbox_head_out, rois, im_shape, scale_factor):
  348. bbox_pred = bbox_head_out[0]
  349. cls_prob = bbox_head_out[1]
  350. roi = rois[0]
  351. rois_num = rois[1]
  352. if self.export_onnx:
  353. onnx_rois_num_per_im = rois_num[0]
  354. origin_shape = paddle.expand(im_shape[0, :],
  355. [onnx_rois_num_per_im, 2])
  356. else:
  357. origin_shape_list = []
  358. if isinstance(roi, list):
  359. batch_size = len(roi)
  360. else:
  361. batch_size = paddle.slice(paddle.shape(im_shape), [0], [0], [1])
  362. # bbox_pred.shape: [N, C*4]
  363. for idx in range(batch_size):
  364. rois_num_per_im = rois_num[idx]
  365. expand_im_shape = paddle.expand(im_shape[idx, :],
  366. [rois_num_per_im, 2])
  367. origin_shape_list.append(expand_im_shape)
  368. origin_shape = paddle.concat(origin_shape_list)
  369. # bbox_pred.shape: [N, C*4]
  370. # C=num_classes in faster/mask rcnn(bbox_head), C=1 in cascade rcnn(cascade_head)
  371. bbox = paddle.concat(roi)
  372. bbox = delta2bbox(bbox_pred, bbox, self.prior_box_var)
  373. scores = cls_prob[:, :-1]
  374. # bbox.shape: [N, C, 4]
  375. # bbox.shape[1] must be equal to scores.shape[1]
  376. total_num = bbox.shape[0]
  377. bbox_dim = bbox.shape[-1]
  378. bbox = paddle.expand(bbox, [total_num, self.num_classes, bbox_dim])
  379. origin_h = paddle.unsqueeze(origin_shape[:, 0], axis=1)
  380. origin_w = paddle.unsqueeze(origin_shape[:, 1], axis=1)
  381. zeros = paddle.zeros_like(origin_h)
  382. x1 = paddle.maximum(paddle.minimum(bbox[:, :, 0], origin_w), zeros)
  383. y1 = paddle.maximum(paddle.minimum(bbox[:, :, 1], origin_h), zeros)
  384. x2 = paddle.maximum(paddle.minimum(bbox[:, :, 2], origin_w), zeros)
  385. y2 = paddle.maximum(paddle.minimum(bbox[:, :, 3], origin_h), zeros)
  386. bbox = paddle.stack([x1, y1, x2, y2], axis=-1)
  387. bboxes = (bbox, rois_num)
  388. return bboxes, scores
  389. @register
  390. @serializable
  391. class MultiClassNMS(object):
  392. def __init__(self,
  393. score_threshold=.05,
  394. nms_top_k=-1,
  395. keep_top_k=100,
  396. nms_threshold=.5,
  397. normalized=True,
  398. nms_eta=1.0,
  399. return_index=False,
  400. return_rois_num=True,
  401. trt=False):
  402. super(MultiClassNMS, self).__init__()
  403. self.score_threshold = score_threshold
  404. self.nms_top_k = nms_top_k
  405. self.keep_top_k = keep_top_k
  406. self.nms_threshold = nms_threshold
  407. self.normalized = normalized
  408. self.nms_eta = nms_eta
  409. self.return_index = return_index
  410. self.return_rois_num = return_rois_num
  411. self.trt = trt
  412. def __call__(self, bboxes, score, background_label=-1):
  413. """
  414. bboxes (Tensor|List[Tensor]): 1. (Tensor) Predicted bboxes with shape
  415. [N, M, 4], N is the batch size and M
  416. is the number of bboxes
  417. 2. (List[Tensor]) bboxes and bbox_num,
  418. bboxes have shape of [M, C, 4], C
  419. is the class number and bbox_num means
  420. the number of bboxes of each batch with
  421. shape [N,]
  422. score (Tensor): Predicted scores with shape [N, C, M] or [M, C]
  423. background_label (int): Ignore the background label; For example, RCNN
  424. is num_classes and YOLO is -1.
  425. """
  426. kwargs = self.__dict__.copy()
  427. if isinstance(bboxes, tuple):
  428. bboxes, bbox_num = bboxes
  429. kwargs.update({'rois_num': bbox_num})
  430. if background_label > -1:
  431. kwargs.update({'background_label': background_label})
  432. kwargs.pop('trt')
  433. # TODO(wangxinxin08): paddle version should be develop or 2.3 and above to run nms on tensorrt
  434. if self.trt and (int(paddle.version.major) == 0 or
  435. (int(paddle.version.major) >= 2 and
  436. int(paddle.version.minor) >= 3)):
  437. # TODO(wangxinxin08): tricky switch to run nms on tensorrt
  438. kwargs.update({'nms_eta': 1.1})
  439. bbox, bbox_num, _ = ops.multiclass_nms(bboxes, score, **kwargs)
  440. mask = paddle.slice(bbox, [-1], [0], [1]) != -1
  441. bbox = paddle.masked_select(bbox, mask).reshape((-1, 6))
  442. return bbox, bbox_num, None
  443. else:
  444. return ops.multiclass_nms(bboxes, score, **kwargs)
  445. @register
  446. @serializable
  447. class MatrixNMS(object):
  448. __append_doc__ = True
  449. def __init__(self,
  450. score_threshold=.05,
  451. post_threshold=.05,
  452. nms_top_k=-1,
  453. keep_top_k=100,
  454. use_gaussian=False,
  455. gaussian_sigma=2.,
  456. normalized=False,
  457. background_label=0):
  458. super(MatrixNMS, self).__init__()
  459. self.score_threshold = score_threshold
  460. self.post_threshold = post_threshold
  461. self.nms_top_k = nms_top_k
  462. self.keep_top_k = keep_top_k
  463. self.normalized = normalized
  464. self.use_gaussian = use_gaussian
  465. self.gaussian_sigma = gaussian_sigma
  466. self.background_label = background_label
  467. def __call__(self, bbox, score, *args):
  468. return ops.matrix_nms(
  469. bboxes=bbox,
  470. scores=score,
  471. score_threshold=self.score_threshold,
  472. post_threshold=self.post_threshold,
  473. nms_top_k=self.nms_top_k,
  474. keep_top_k=self.keep_top_k,
  475. use_gaussian=self.use_gaussian,
  476. gaussian_sigma=self.gaussian_sigma,
  477. background_label=self.background_label,
  478. normalized=self.normalized)
  479. @register
  480. @serializable
  481. class YOLOBox(object):
  482. __shared__ = ['num_classes']
  483. def __init__(self,
  484. num_classes=80,
  485. conf_thresh=0.005,
  486. downsample_ratio=32,
  487. clip_bbox=True,
  488. scale_x_y=1.):
  489. self.num_classes = num_classes
  490. self.conf_thresh = conf_thresh
  491. self.downsample_ratio = downsample_ratio
  492. self.clip_bbox = clip_bbox
  493. self.scale_x_y = scale_x_y
  494. def __call__(self,
  495. yolo_head_out,
  496. anchors,
  497. im_shape,
  498. scale_factor,
  499. var_weight=None):
  500. boxes_list = []
  501. scores_list = []
  502. origin_shape = im_shape / scale_factor
  503. origin_shape = paddle.cast(origin_shape, 'int32')
  504. for i, head_out in enumerate(yolo_head_out):
  505. boxes, scores = ops.yolo_box(head_out, origin_shape, anchors[i],
  506. self.num_classes, self.conf_thresh,
  507. self.downsample_ratio // 2**i,
  508. self.clip_bbox, self.scale_x_y)
  509. boxes_list.append(boxes)
  510. scores_list.append(paddle.transpose(scores, perm=[0, 2, 1]))
  511. yolo_boxes = paddle.concat(boxes_list, axis=1)
  512. yolo_scores = paddle.concat(scores_list, axis=2)
  513. return yolo_boxes, yolo_scores
  514. @register
  515. @serializable
  516. class SSDBox(object):
  517. def __init__(self,
  518. is_normalized=True,
  519. prior_box_var=[0.1, 0.1, 0.2, 0.2],
  520. use_fuse_decode=False):
  521. self.is_normalized = is_normalized
  522. self.norm_delta = float(not self.is_normalized)
  523. self.prior_box_var = prior_box_var
  524. self.use_fuse_decode = use_fuse_decode
  525. def __call__(self,
  526. preds,
  527. prior_boxes,
  528. im_shape,
  529. scale_factor,
  530. var_weight=None):
  531. boxes, scores = preds
  532. boxes = paddle.concat(boxes, axis=1)
  533. prior_boxes = paddle.concat(prior_boxes)
  534. if self.use_fuse_decode:
  535. output_boxes = ops.box_coder(
  536. prior_boxes,
  537. self.prior_box_var,
  538. boxes,
  539. code_type="decode_center_size",
  540. box_normalized=self.is_normalized)
  541. else:
  542. pb_w = prior_boxes[:, 2] - prior_boxes[:, 0] + self.norm_delta
  543. pb_h = prior_boxes[:, 3] - prior_boxes[:, 1] + self.norm_delta
  544. pb_x = prior_boxes[:, 0] + pb_w * 0.5
  545. pb_y = prior_boxes[:, 1] + pb_h * 0.5
  546. out_x = pb_x + boxes[:, :, 0] * pb_w * self.prior_box_var[0]
  547. out_y = pb_y + boxes[:, :, 1] * pb_h * self.prior_box_var[1]
  548. out_w = paddle.exp(boxes[:, :, 2] * self.prior_box_var[2]) * pb_w
  549. out_h = paddle.exp(boxes[:, :, 3] * self.prior_box_var[3]) * pb_h
  550. output_boxes = paddle.stack(
  551. [
  552. out_x - out_w / 2., out_y - out_h / 2., out_x + out_w / 2.,
  553. out_y + out_h / 2.
  554. ],
  555. axis=-1)
  556. if self.is_normalized:
  557. h = (im_shape[:, 0] / scale_factor[:, 0]).unsqueeze(-1)
  558. w = (im_shape[:, 1] / scale_factor[:, 1]).unsqueeze(-1)
  559. im_shape = paddle.stack([w, h, w, h], axis=-1)
  560. output_boxes *= im_shape
  561. else:
  562. output_boxes[..., -2:] -= 1.0
  563. output_scores = F.softmax(paddle.concat(
  564. scores, axis=1)).transpose([0, 2, 1])
  565. return output_boxes, output_scores
  566. @register
  567. @serializable
  568. class AnchorGrid(object):
  569. """Generate anchor grid
  570. Args:
  571. image_size (int or list): input image size, may be a single integer or
  572. list of [h, w]. Default: 512
  573. min_level (int): min level of the feature pyramid. Default: 3
  574. max_level (int): max level of the feature pyramid. Default: 7
  575. anchor_base_scale: base anchor scale. Default: 4
  576. num_scales: number of anchor scales. Default: 3
  577. aspect_ratios: aspect ratios. default: [[1, 1], [1.4, 0.7], [0.7, 1.4]]
  578. """
  579. def __init__(self,
  580. image_size=512,
  581. min_level=3,
  582. max_level=7,
  583. anchor_base_scale=4,
  584. num_scales=3,
  585. aspect_ratios=[[1, 1], [1.4, 0.7], [0.7, 1.4]]):
  586. super(AnchorGrid, self).__init__()
  587. if isinstance(image_size, Integral):
  588. self.image_size = [image_size, image_size]
  589. else:
  590. self.image_size = image_size
  591. for dim in self.image_size:
  592. assert dim % 2 ** max_level == 0, \
  593. "image size should be multiple of the max level stride"
  594. self.min_level = min_level
  595. self.max_level = max_level
  596. self.anchor_base_scale = anchor_base_scale
  597. self.num_scales = num_scales
  598. self.aspect_ratios = aspect_ratios
  599. @property
  600. def base_cell(self):
  601. if not hasattr(self, '_base_cell'):
  602. self._base_cell = self.make_cell()
  603. return self._base_cell
  604. def make_cell(self):
  605. scales = [2**(i / self.num_scales) for i in range(self.num_scales)]
  606. scales = np.array(scales)
  607. ratios = np.array(self.aspect_ratios)
  608. ws = np.outer(scales, ratios[:, 0]).reshape(-1, 1)
  609. hs = np.outer(scales, ratios[:, 1]).reshape(-1, 1)
  610. anchors = np.hstack((-0.5 * ws, -0.5 * hs, 0.5 * ws, 0.5 * hs))
  611. return anchors
  612. def make_grid(self, stride):
  613. cell = self.base_cell * stride * self.anchor_base_scale
  614. x_steps = np.arange(stride // 2, self.image_size[1], stride)
  615. y_steps = np.arange(stride // 2, self.image_size[0], stride)
  616. offset_x, offset_y = np.meshgrid(x_steps, y_steps)
  617. offset_x = offset_x.flatten()
  618. offset_y = offset_y.flatten()
  619. offsets = np.stack((offset_x, offset_y, offset_x, offset_y), axis=-1)
  620. offsets = offsets[:, np.newaxis, :]
  621. return (cell + offsets).reshape(-1, 4)
  622. def generate(self):
  623. return [
  624. self.make_grid(2**l)
  625. for l in range(self.min_level, self.max_level + 1)
  626. ]
  627. def __call__(self):
  628. if not hasattr(self, '_anchor_vars'):
  629. anchor_vars = []
  630. helper = LayerHelper('anchor_grid')
  631. for idx, l in enumerate(range(self.min_level, self.max_level + 1)):
  632. stride = 2**l
  633. anchors = self.make_grid(stride)
  634. var = helper.create_parameter(
  635. attr=ParamAttr(name='anchors_{}'.format(idx)),
  636. shape=anchors.shape,
  637. dtype='float32',
  638. stop_gradient=True,
  639. default_initializer=NumpyArrayInitializer(anchors))
  640. anchor_vars.append(var)
  641. var.persistable = True
  642. self._anchor_vars = anchor_vars
  643. return self._anchor_vars
  644. @register
  645. @serializable
  646. class FCOSBox(object):
  647. __shared__ = ['num_classes']
  648. def __init__(self, num_classes=80):
  649. super(FCOSBox, self).__init__()
  650. self.num_classes = num_classes
  651. def _merge_hw(self, inputs, ch_type="channel_first"):
  652. """
  653. Merge h and w of the feature map into one dimension.
  654. Args:
  655. inputs (Tensor): Tensor of the input feature map
  656. ch_type (str): "channel_first" or "channel_last" style
  657. Return:
  658. new_shape (Tensor): The new shape after h and w merged
  659. """
  660. shape_ = paddle.shape(inputs)
  661. bs, ch, hi, wi = shape_[0], shape_[1], shape_[2], shape_[3]
  662. img_size = hi * wi
  663. img_size.stop_gradient = True
  664. if ch_type == "channel_first":
  665. new_shape = paddle.concat([bs, ch, img_size])
  666. elif ch_type == "channel_last":
  667. new_shape = paddle.concat([bs, img_size, ch])
  668. else:
  669. raise KeyError("Wrong ch_type %s" % ch_type)
  670. new_shape.stop_gradient = True
  671. return new_shape
  672. def _postprocessing_by_level(self, locations, box_cls, box_reg, box_ctn,
  673. scale_factor):
  674. """
  675. Postprocess each layer of the output with corresponding locations.
  676. Args:
  677. locations (Tensor): anchor points for current layer, [H*W, 2]
  678. box_cls (Tensor): categories prediction, [N, C, H, W],
  679. C is the number of classes
  680. box_reg (Tensor): bounding box prediction, [N, 4, H, W]
  681. box_ctn (Tensor): centerness prediction, [N, 1, H, W]
  682. scale_factor (Tensor): [h_scale, w_scale] for input images
  683. Return:
  684. box_cls_ch_last (Tensor): score for each category, in [N, C, M]
  685. C is the number of classes and M is the number of anchor points
  686. box_reg_decoding (Tensor): decoded bounding box, in [N, M, 4]
  687. last dimension is [x1, y1, x2, y2]
  688. """
  689. act_shape_cls = self._merge_hw(box_cls)
  690. box_cls_ch_last = paddle.reshape(x=box_cls, shape=act_shape_cls)
  691. box_cls_ch_last = F.sigmoid(box_cls_ch_last)
  692. act_shape_reg = self._merge_hw(box_reg)
  693. box_reg_ch_last = paddle.reshape(x=box_reg, shape=act_shape_reg)
  694. box_reg_ch_last = paddle.transpose(box_reg_ch_last, perm=[0, 2, 1])
  695. box_reg_decoding = paddle.stack(
  696. [
  697. locations[:, 0] - box_reg_ch_last[:, :, 0],
  698. locations[:, 1] - box_reg_ch_last[:, :, 1],
  699. locations[:, 0] + box_reg_ch_last[:, :, 2],
  700. locations[:, 1] + box_reg_ch_last[:, :, 3]
  701. ],
  702. axis=1)
  703. box_reg_decoding = paddle.transpose(box_reg_decoding, perm=[0, 2, 1])
  704. act_shape_ctn = self._merge_hw(box_ctn)
  705. box_ctn_ch_last = paddle.reshape(x=box_ctn, shape=act_shape_ctn)
  706. box_ctn_ch_last = F.sigmoid(box_ctn_ch_last)
  707. # recover the location to original image
  708. im_scale = paddle.concat([scale_factor, scale_factor], axis=1)
  709. im_scale = paddle.expand(im_scale, [box_reg_decoding.shape[0], 4])
  710. im_scale = paddle.reshape(im_scale, [box_reg_decoding.shape[0], -1, 4])
  711. box_reg_decoding = box_reg_decoding / im_scale
  712. box_cls_ch_last = box_cls_ch_last * box_ctn_ch_last
  713. return box_cls_ch_last, box_reg_decoding
  714. def __call__(self, locations, cls_logits, bboxes_reg, centerness,
  715. scale_factor):
  716. pred_boxes_ = []
  717. pred_scores_ = []
  718. for pts, cls, box, ctn in zip(locations, cls_logits, bboxes_reg,
  719. centerness):
  720. pred_scores_lvl, pred_boxes_lvl = self._postprocessing_by_level(
  721. pts, cls, box, ctn, scale_factor)
  722. pred_boxes_.append(pred_boxes_lvl)
  723. pred_scores_.append(pred_scores_lvl)
  724. pred_boxes = paddle.concat(pred_boxes_, axis=1)
  725. pred_scores = paddle.concat(pred_scores_, axis=2)
  726. return pred_boxes, pred_scores
  727. @register
  728. class TTFBox(object):
  729. __shared__ = ['down_ratio']
  730. def __init__(self, max_per_img=100, score_thresh=0.01, down_ratio=4):
  731. super(TTFBox, self).__init__()
  732. self.max_per_img = max_per_img
  733. self.score_thresh = score_thresh
  734. self.down_ratio = down_ratio
  735. def _simple_nms(self, heat, kernel=3):
  736. """
  737. Use maxpool to filter the max score, get local peaks.
  738. """
  739. pad = (kernel - 1) // 2
  740. hmax = F.max_pool2d(heat, kernel, stride=1, padding=pad)
  741. keep = paddle.cast(hmax == heat, 'float32')
  742. return heat * keep
  743. def _topk(self, scores):
  744. """
  745. Select top k scores and decode to get xy coordinates.
  746. """
  747. k = self.max_per_img
  748. shape_fm = paddle.shape(scores)
  749. shape_fm.stop_gradient = True
  750. cat, height, width = shape_fm[1], shape_fm[2], shape_fm[3]
  751. # batch size is 1
  752. scores_r = paddle.reshape(scores, [cat, -1])
  753. topk_scores, topk_inds = paddle.topk(scores_r, k)
  754. topk_scores, topk_inds = paddle.topk(scores_r, k)
  755. topk_ys = topk_inds // width
  756. topk_xs = topk_inds % width
  757. topk_score_r = paddle.reshape(topk_scores, [-1])
  758. topk_score, topk_ind = paddle.topk(topk_score_r, k)
  759. k_t = paddle.full(paddle.shape(topk_ind), k, dtype='int64')
  760. topk_clses = paddle.cast(paddle.floor_divide(topk_ind, k_t), 'float32')
  761. topk_inds = paddle.reshape(topk_inds, [-1])
  762. topk_ys = paddle.reshape(topk_ys, [-1, 1])
  763. topk_xs = paddle.reshape(topk_xs, [-1, 1])
  764. topk_inds = paddle.gather(topk_inds, topk_ind)
  765. topk_ys = paddle.gather(topk_ys, topk_ind)
  766. topk_xs = paddle.gather(topk_xs, topk_ind)
  767. return topk_score, topk_inds, topk_clses, topk_ys, topk_xs
  768. def _decode(self, hm, wh, im_shape, scale_factor):
  769. heatmap = F.sigmoid(hm)
  770. heat = self._simple_nms(heatmap)
  771. scores, inds, clses, ys, xs = self._topk(heat)
  772. ys = paddle.cast(ys, 'float32') * self.down_ratio
  773. xs = paddle.cast(xs, 'float32') * self.down_ratio
  774. scores = paddle.tensor.unsqueeze(scores, [1])
  775. clses = paddle.tensor.unsqueeze(clses, [1])
  776. wh_t = paddle.transpose(wh, [0, 2, 3, 1])
  777. wh = paddle.reshape(wh_t, [-1, paddle.shape(wh_t)[-1]])
  778. wh = paddle.gather(wh, inds)
  779. x1 = xs - wh[:, 0:1]
  780. y1 = ys - wh[:, 1:2]
  781. x2 = xs + wh[:, 2:3]
  782. y2 = ys + wh[:, 3:4]
  783. bboxes = paddle.concat([x1, y1, x2, y2], axis=1)
  784. scale_y = scale_factor[:, 0:1]
  785. scale_x = scale_factor[:, 1:2]
  786. scale_expand = paddle.concat(
  787. [scale_x, scale_y, scale_x, scale_y], axis=1)
  788. boxes_shape = paddle.shape(bboxes)
  789. boxes_shape.stop_gradient = True
  790. scale_expand = paddle.expand(scale_expand, shape=boxes_shape)
  791. bboxes = paddle.divide(bboxes, scale_expand)
  792. results = paddle.concat([clses, scores, bboxes], axis=1)
  793. # hack: append result with cls=-1 and score=1. to avoid all scores
  794. # are less than score_thresh which may cause error in gather.
  795. fill_r = paddle.to_tensor(np.array([[-1, 1, 0, 0, 0, 0]]))
  796. fill_r = paddle.cast(fill_r, results.dtype)
  797. results = paddle.concat([results, fill_r])
  798. scores = results[:, 1]
  799. valid_ind = paddle.nonzero(scores > self.score_thresh)
  800. results = paddle.gather(results, valid_ind)
  801. return results, paddle.shape(results)[0:1]
  802. def __call__(self, hm, wh, im_shape, scale_factor):
  803. results = []
  804. results_num = []
  805. for i in range(scale_factor.shape[0]):
  806. result, num = self._decode(hm[i:i + 1, ], wh[i:i + 1, ],
  807. im_shape[i:i + 1, ],
  808. scale_factor[i:i + 1, ])
  809. results.append(result)
  810. results_num.append(num)
  811. results = paddle.concat(results, axis=0)
  812. results_num = paddle.concat(results_num, axis=0)
  813. return results, results_num
  814. @register
  815. @serializable
  816. class JDEBox(object):
  817. __shared__ = ['num_classes']
  818. def __init__(self, num_classes=1, conf_thresh=0.3, downsample_ratio=32):
  819. self.num_classes = num_classes
  820. self.conf_thresh = conf_thresh
  821. self.downsample_ratio = downsample_ratio
  822. def generate_anchor(self, nGh, nGw, anchor_wh):
  823. nA = len(anchor_wh)
  824. yv, xv = paddle.meshgrid([paddle.arange(nGh), paddle.arange(nGw)])
  825. mesh = paddle.stack(
  826. (xv, yv), axis=0).cast(dtype='float32') # 2 x nGh x nGw
  827. meshs = paddle.tile(mesh, [nA, 1, 1, 1])
  828. anchor_offset_mesh = anchor_wh[:, :, None][:, :, :, None].repeat(
  829. int(nGh), axis=-2).repeat(
  830. int(nGw), axis=-1)
  831. anchor_offset_mesh = paddle.to_tensor(
  832. anchor_offset_mesh.astype(np.float32))
  833. # nA x 2 x nGh x nGw
  834. anchor_mesh = paddle.concat([meshs, anchor_offset_mesh], axis=1)
  835. anchor_mesh = paddle.transpose(anchor_mesh,
  836. [0, 2, 3, 1]) # (nA x nGh x nGw) x 4
  837. return anchor_mesh
  838. def decode_delta(self, delta, fg_anchor_list):
  839. px, py, pw, ph = fg_anchor_list[:, 0], fg_anchor_list[:,1], \
  840. fg_anchor_list[:, 2], fg_anchor_list[:,3]
  841. dx, dy, dw, dh = delta[:, 0], delta[:, 1], delta[:, 2], delta[:, 3]
  842. gx = pw * dx + px
  843. gy = ph * dy + py
  844. gw = pw * paddle.exp(dw)
  845. gh = ph * paddle.exp(dh)
  846. gx1 = gx - gw * 0.5
  847. gy1 = gy - gh * 0.5
  848. gx2 = gx + gw * 0.5
  849. gy2 = gy + gh * 0.5
  850. return paddle.stack([gx1, gy1, gx2, gy2], axis=1)
  851. def decode_delta_map(self, nA, nGh, nGw, delta_map, anchor_vec):
  852. anchor_mesh = self.generate_anchor(nGh, nGw, anchor_vec)
  853. anchor_mesh = paddle.unsqueeze(anchor_mesh, 0)
  854. pred_list = self.decode_delta(
  855. paddle.reshape(
  856. delta_map, shape=[-1, 4]),
  857. paddle.reshape(
  858. anchor_mesh, shape=[-1, 4]))
  859. pred_map = paddle.reshape(pred_list, shape=[nA * nGh * nGw, 4])
  860. return pred_map
  861. def _postprocessing_by_level(self, nA, stride, head_out, anchor_vec):
  862. boxes_shape = head_out.shape # [nB, nA*6, nGh, nGw]
  863. nGh, nGw = boxes_shape[-2], boxes_shape[-1]
  864. nB = 1 # TODO: only support bs=1 now
  865. boxes_list, scores_list = [], []
  866. for idx in range(nB):
  867. p = paddle.reshape(
  868. head_out[idx], shape=[nA, self.num_classes + 5, nGh, nGw])
  869. p = paddle.transpose(p, perm=[0, 2, 3, 1]) # [nA, nGh, nGw, 6]
  870. delta_map = p[:, :, :, :4]
  871. boxes = self.decode_delta_map(nA, nGh, nGw, delta_map, anchor_vec)
  872. # [nA * nGh * nGw, 4]
  873. boxes_list.append(boxes * stride)
  874. p_conf = paddle.transpose(
  875. p[:, :, :, 4:6], perm=[3, 0, 1, 2]) # [2, nA, nGh, nGw]
  876. p_conf = F.softmax(
  877. p_conf, axis=0)[1, :, :, :].unsqueeze(-1) # [nA, nGh, nGw, 1]
  878. scores = paddle.reshape(p_conf, shape=[nA * nGh * nGw, 1])
  879. scores_list.append(scores)
  880. boxes_results = paddle.stack(boxes_list)
  881. scores_results = paddle.stack(scores_list)
  882. return boxes_results, scores_results
  883. def __call__(self, yolo_head_out, anchors):
  884. bbox_pred_list = []
  885. for i, head_out in enumerate(yolo_head_out):
  886. stride = self.downsample_ratio // 2**i
  887. anc_w, anc_h = anchors[i][0::2], anchors[i][1::2]
  888. anchor_vec = np.stack((anc_w, anc_h), axis=1) / stride
  889. nA = len(anc_w)
  890. boxes, scores = self._postprocessing_by_level(nA, stride, head_out,
  891. anchor_vec)
  892. bbox_pred_list.append(paddle.concat([boxes, scores], axis=-1))
  893. yolo_boxes_scores = paddle.concat(bbox_pred_list, axis=1)
  894. boxes_idx_over_conf_thr = paddle.nonzero(
  895. yolo_boxes_scores[:, :, -1] > self.conf_thresh)
  896. boxes_idx_over_conf_thr.stop_gradient = True
  897. return boxes_idx_over_conf_thr, yolo_boxes_scores
  898. @register
  899. @serializable
  900. class MaskMatrixNMS(object):
  901. """
  902. Matrix NMS for multi-class masks.
  903. Args:
  904. update_threshold (float): Updated threshold of categroy score in second time.
  905. pre_nms_top_n (int): Number of total instance to be kept per image before NMS
  906. post_nms_top_n (int): Number of total instance to be kept per image after NMS.
  907. kernel (str): 'linear' or 'gaussian'.
  908. sigma (float): std in gaussian method.
  909. Input:
  910. seg_preds (Variable): shape (n, h, w), segmentation feature maps
  911. seg_masks (Variable): shape (n, h, w), segmentation feature maps
  912. cate_labels (Variable): shape (n), mask labels in descending order
  913. cate_scores (Variable): shape (n), mask scores in descending order
  914. sum_masks (Variable): a float tensor of the sum of seg_masks
  915. Returns:
  916. Variable: cate_scores, tensors of shape (n)
  917. """
  918. def __init__(self,
  919. update_threshold=0.05,
  920. pre_nms_top_n=500,
  921. post_nms_top_n=100,
  922. kernel='gaussian',
  923. sigma=2.0):
  924. super(MaskMatrixNMS, self).__init__()
  925. self.update_threshold = update_threshold
  926. self.pre_nms_top_n = pre_nms_top_n
  927. self.post_nms_top_n = post_nms_top_n
  928. self.kernel = kernel
  929. self.sigma = sigma
  930. def _sort_score(self, scores, top_num):
  931. if paddle.shape(scores)[0] > top_num:
  932. return paddle.topk(scores, top_num)[1]
  933. else:
  934. return paddle.argsort(scores, descending=True)
  935. def __call__(self,
  936. seg_preds,
  937. seg_masks,
  938. cate_labels,
  939. cate_scores,
  940. sum_masks=None):
  941. # sort and keep top nms_pre
  942. sort_inds = self._sort_score(cate_scores, self.pre_nms_top_n)
  943. seg_masks = paddle.gather(seg_masks, index=sort_inds)
  944. seg_preds = paddle.gather(seg_preds, index=sort_inds)
  945. sum_masks = paddle.gather(sum_masks, index=sort_inds)
  946. cate_scores = paddle.gather(cate_scores, index=sort_inds)
  947. cate_labels = paddle.gather(cate_labels, index=sort_inds)
  948. seg_masks = paddle.flatten(seg_masks, start_axis=1, stop_axis=-1)
  949. # inter.
  950. inter_matrix = paddle.mm(seg_masks, paddle.transpose(seg_masks, [1, 0]))
  951. n_samples = paddle.shape(cate_labels)
  952. # union.
  953. sum_masks_x = paddle.expand(sum_masks, shape=[n_samples, n_samples])
  954. # iou.
  955. iou_matrix = (inter_matrix / (
  956. sum_masks_x + paddle.transpose(sum_masks_x, [1, 0]) - inter_matrix))
  957. iou_matrix = paddle.triu(iou_matrix, diagonal=1)
  958. # label_specific matrix.
  959. cate_labels_x = paddle.expand(cate_labels, shape=[n_samples, n_samples])
  960. label_matrix = paddle.cast(
  961. (cate_labels_x == paddle.transpose(cate_labels_x, [1, 0])),
  962. 'float32')
  963. label_matrix = paddle.triu(label_matrix, diagonal=1)
  964. # IoU compensation
  965. compensate_iou = paddle.max((iou_matrix * label_matrix), axis=0)
  966. compensate_iou = paddle.expand(
  967. compensate_iou, shape=[n_samples, n_samples])
  968. compensate_iou = paddle.transpose(compensate_iou, [1, 0])
  969. # IoU decay
  970. decay_iou = iou_matrix * label_matrix
  971. # matrix nms
  972. if self.kernel == 'gaussian':
  973. decay_matrix = paddle.exp(-1 * self.sigma * (decay_iou**2))
  974. compensate_matrix = paddle.exp(-1 * self.sigma *
  975. (compensate_iou**2))
  976. decay_coefficient = paddle.min(decay_matrix / compensate_matrix,
  977. axis=0)
  978. elif self.kernel == 'linear':
  979. decay_matrix = (1 - decay_iou) / (1 - compensate_iou)
  980. decay_coefficient = paddle.min(decay_matrix, axis=0)
  981. else:
  982. raise NotImplementedError
  983. # update the score.
  984. cate_scores = cate_scores * decay_coefficient
  985. y = paddle.zeros(shape=paddle.shape(cate_scores), dtype='float32')
  986. keep = paddle.where(cate_scores >= self.update_threshold, cate_scores,
  987. y)
  988. keep = paddle.nonzero(keep)
  989. keep = paddle.squeeze(keep, axis=[1])
  990. # Prevent empty and increase fake data
  991. keep = paddle.concat(
  992. [keep, paddle.cast(paddle.shape(cate_scores)[0] - 1, 'int64')])
  993. seg_preds = paddle.gather(seg_preds, index=keep)
  994. cate_scores = paddle.gather(cate_scores, index=keep)
  995. cate_labels = paddle.gather(cate_labels, index=keep)
  996. # sort and keep top_k
  997. sort_inds = self._sort_score(cate_scores, self.post_nms_top_n)
  998. seg_preds = paddle.gather(seg_preds, index=sort_inds)
  999. cate_scores = paddle.gather(cate_scores, index=sort_inds)
  1000. cate_labels = paddle.gather(cate_labels, index=sort_inds)
  1001. return seg_preds, cate_scores, cate_labels
  1002. def Conv2d(in_channels,
  1003. out_channels,
  1004. kernel_size,
  1005. stride=1,
  1006. padding=0,
  1007. dilation=1,
  1008. groups=1,
  1009. bias=True,
  1010. weight_init=Normal(std=0.001),
  1011. bias_init=Constant(0.)):
  1012. weight_attr = paddle.framework.ParamAttr(initializer=weight_init)
  1013. if bias:
  1014. bias_attr = paddle.framework.ParamAttr(initializer=bias_init)
  1015. else:
  1016. bias_attr = False
  1017. conv = nn.Conv2D(
  1018. in_channels,
  1019. out_channels,
  1020. kernel_size,
  1021. stride,
  1022. padding,
  1023. dilation,
  1024. groups,
  1025. weight_attr=weight_attr,
  1026. bias_attr=bias_attr)
  1027. return conv
  1028. def ConvTranspose2d(in_channels,
  1029. out_channels,
  1030. kernel_size,
  1031. stride=1,
  1032. padding=0,
  1033. output_padding=0,
  1034. groups=1,
  1035. bias=True,
  1036. dilation=1,
  1037. weight_init=Normal(std=0.001),
  1038. bias_init=Constant(0.)):
  1039. weight_attr = paddle.framework.ParamAttr(initializer=weight_init)
  1040. if bias:
  1041. bias_attr = paddle.framework.ParamAttr(initializer=bias_init)
  1042. else:
  1043. bias_attr = False
  1044. conv = nn.Conv2DTranspose(
  1045. in_channels,
  1046. out_channels,
  1047. kernel_size,
  1048. stride,
  1049. padding,
  1050. output_padding,
  1051. dilation,
  1052. groups,
  1053. weight_attr=weight_attr,
  1054. bias_attr=bias_attr)
  1055. return conv
  1056. def BatchNorm2d(num_features, eps=1e-05, momentum=0.9, affine=True):
  1057. if not affine:
  1058. weight_attr = False
  1059. bias_attr = False
  1060. else:
  1061. weight_attr = None
  1062. bias_attr = None
  1063. batchnorm = nn.BatchNorm2D(
  1064. num_features,
  1065. momentum,
  1066. eps,
  1067. weight_attr=weight_attr,
  1068. bias_attr=bias_attr)
  1069. return batchnorm
  1070. def ReLU():
  1071. return nn.ReLU()
  1072. def Upsample(scale_factor=None, mode='nearest', align_corners=False):
  1073. return nn.Upsample(None, scale_factor, mode, align_corners)
  1074. def MaxPool(kernel_size, stride, padding, ceil_mode=False):
  1075. return nn.MaxPool2D(kernel_size, stride, padding, ceil_mode=ceil_mode)
  1076. class Concat(nn.Layer):
  1077. def __init__(self, dim=0):
  1078. super(Concat, self).__init__()
  1079. self.dim = dim
  1080. def forward(self, inputs):
  1081. return paddle.concat(inputs, axis=self.dim)
  1082. def extra_repr(self):
  1083. return 'dim={}'.format(self.dim)
  1084. def _convert_attention_mask(attn_mask, dtype):
  1085. """
  1086. Convert the attention mask to the target dtype we expect.
  1087. Parameters:
  1088. attn_mask (Tensor, optional): A tensor used in multi-head attention
  1089. to prevents attention to some unwanted positions, usually the
  1090. paddings or the subsequent positions. It is a tensor with shape
  1091. broadcasted to `[batch_size, n_head, sequence_length, sequence_length]`.
  1092. When the data type is bool, the unwanted positions have `False`
  1093. values and the others have `True` values. When the data type is
  1094. int, the unwanted positions have 0 values and the others have 1
  1095. values. When the data type is float, the unwanted positions have
  1096. `-INF` values and the others have 0 values. It can be None when
  1097. nothing wanted or needed to be prevented attention to. Default None.
  1098. dtype (VarType): The target type of `attn_mask` we expect.
  1099. Returns:
  1100. Tensor: A Tensor with shape same as input `attn_mask`, with data type `dtype`.
  1101. """
  1102. return nn.layer.transformer._convert_attention_mask(attn_mask, dtype)
  1103. class MultiHeadAttention(nn.Layer):
  1104. """
  1105. Attention mapps queries and a set of key-value pairs to outputs, and
  1106. Multi-Head Attention performs multiple parallel attention to jointly attending
  1107. to information from different representation subspaces.
  1108. Please refer to `Attention Is All You Need <https://arxiv.org/pdf/1706.03762.pdf>`_
  1109. for more details.
  1110. Parameters:
  1111. embed_dim (int): The expected feature size in the input and output.
  1112. num_heads (int): The number of heads in multi-head attention.
  1113. dropout (float, optional): The dropout probability used on attention
  1114. weights to drop some attention targets. 0 for no dropout. Default 0
  1115. kdim (int, optional): The feature size in key. If None, assumed equal to
  1116. `embed_dim`. Default None.
  1117. vdim (int, optional): The feature size in value. If None, assumed equal to
  1118. `embed_dim`. Default None.
  1119. need_weights (bool, optional): Indicate whether to return the attention
  1120. weights. Default False.
  1121. Examples:
  1122. .. code-block:: python
  1123. import paddle
  1124. # encoder input: [batch_size, sequence_length, d_model]
  1125. query = paddle.rand((2, 4, 128))
  1126. # self attention mask: [batch_size, num_heads, query_len, query_len]
  1127. attn_mask = paddle.rand((2, 2, 4, 4))
  1128. multi_head_attn = paddle.nn.MultiHeadAttention(128, 2)
  1129. output = multi_head_attn(query, None, None, attn_mask=attn_mask) # [2, 4, 128]
  1130. """
  1131. def __init__(self,
  1132. embed_dim,
  1133. num_heads,
  1134. dropout=0.,
  1135. kdim=None,
  1136. vdim=None,
  1137. need_weights=False):
  1138. super(MultiHeadAttention, self).__init__()
  1139. self.embed_dim = embed_dim
  1140. self.kdim = kdim if kdim is not None else embed_dim
  1141. self.vdim = vdim if vdim is not None else embed_dim
  1142. self._qkv_same_embed_dim = self.kdim == embed_dim and self.vdim == embed_dim
  1143. self.num_heads = num_heads
  1144. self.dropout = dropout
  1145. self.need_weights = need_weights
  1146. self.head_dim = embed_dim // num_heads
  1147. assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads"
  1148. if self._qkv_same_embed_dim:
  1149. self.in_proj_weight = self.create_parameter(
  1150. shape=[embed_dim, 3 * embed_dim],
  1151. attr=None,
  1152. dtype=self._dtype,
  1153. is_bias=False)
  1154. self.in_proj_bias = self.create_parameter(
  1155. shape=[3 * embed_dim],
  1156. attr=None,
  1157. dtype=self._dtype,
  1158. is_bias=True)
  1159. else:
  1160. self.q_proj = nn.Linear(embed_dim, embed_dim)
  1161. self.k_proj = nn.Linear(self.kdim, embed_dim)
  1162. self.v_proj = nn.Linear(self.vdim, embed_dim)
  1163. self.out_proj = nn.Linear(embed_dim, embed_dim)
  1164. self._type_list = ('q_proj', 'k_proj', 'v_proj')
  1165. self._reset_parameters()
  1166. def _reset_parameters(self):
  1167. for p in self.parameters():
  1168. if p.dim() > 1:
  1169. xavier_uniform_(p)
  1170. else:
  1171. constant_(p)
  1172. def compute_qkv(self, tensor, index):
  1173. if self._qkv_same_embed_dim:
  1174. tensor = F.linear(
  1175. x=tensor,
  1176. weight=self.in_proj_weight[:, index * self.embed_dim:(index + 1)
  1177. * self.embed_dim],
  1178. bias=self.in_proj_bias[index * self.embed_dim:(index + 1) *
  1179. self.embed_dim]
  1180. if self.in_proj_bias is not None else None)
  1181. else:
  1182. tensor = getattr(self, self._type_list[index])(tensor)
  1183. tensor = tensor.reshape(
  1184. [0, 0, self.num_heads, self.head_dim]).transpose([0, 2, 1, 3])
  1185. return tensor
  1186. def forward(self, query, key=None, value=None, attn_mask=None):
  1187. r"""
  1188. Applies multi-head attention to map queries and a set of key-value pairs
  1189. to outputs.
  1190. Parameters:
  1191. query (Tensor): The queries for multi-head attention. It is a
  1192. tensor with shape `[batch_size, query_length, embed_dim]`. The
  1193. data type should be float32 or float64.
  1194. key (Tensor, optional): The keys for multi-head attention. It is
  1195. a tensor with shape `[batch_size, key_length, kdim]`. The
  1196. data type should be float32 or float64. If None, use `query` as
  1197. `key`. Default None.
  1198. value (Tensor, optional): The values for multi-head attention. It
  1199. is a tensor with shape `[batch_size, value_length, vdim]`.
  1200. The data type should be float32 or float64. If None, use `query` as
  1201. `value`. Default None.
  1202. attn_mask (Tensor, optional): A tensor used in multi-head attention
  1203. to prevents attention to some unwanted positions, usually the
  1204. paddings or the subsequent positions. It is a tensor with shape
  1205. broadcasted to `[batch_size, n_head, sequence_length, sequence_length]`.
  1206. When the data type is bool, the unwanted positions have `False`
  1207. values and the others have `True` values. When the data type is
  1208. int, the unwanted positions have 0 values and the others have 1
  1209. values. When the data type is float, the unwanted positions have
  1210. `-INF` values and the others have 0 values. It can be None when
  1211. nothing wanted or needed to be prevented attention to. Default None.
  1212. Returns:
  1213. Tensor|tuple: It is a tensor that has the same shape and data type \
  1214. as `query`, representing attention output. Or a tuple if \
  1215. `need_weights` is True or `cache` is not None. If `need_weights` \
  1216. is True, except for attention output, the tuple also includes \
  1217. the attention weights tensor shaped `[batch_size, num_heads, query_length, key_length]`. \
  1218. If `cache` is not None, the tuple then includes the new cache \
  1219. having the same type as `cache`, and if it is `StaticCache`, it \
  1220. is same as the input `cache`, if it is `Cache`, the new cache \
  1221. reserves tensors concatanating raw tensors with intermediate \
  1222. results of current query.
  1223. """
  1224. key = query if key is None else key
  1225. value = query if value is None else value
  1226. # compute q ,k ,v
  1227. q, k, v = (self.compute_qkv(t, i)
  1228. for i, t in enumerate([query, key, value]))
  1229. # scale dot product attention
  1230. product = paddle.matmul(x=q, y=k, transpose_y=True)
  1231. scaling = float(self.head_dim)**-0.5
  1232. product = product * scaling
  1233. if attn_mask is not None:
  1234. # Support bool or int mask
  1235. attn_mask = _convert_attention_mask(attn_mask, product.dtype)
  1236. product = product + attn_mask
  1237. weights = F.softmax(product)
  1238. if self.dropout:
  1239. weights = F.dropout(
  1240. weights,
  1241. self.dropout,
  1242. training=self.training,
  1243. mode="upscale_in_train")
  1244. out = paddle.matmul(weights, v)
  1245. # combine heads
  1246. out = paddle.transpose(out, perm=[0, 2, 1, 3])
  1247. out = paddle.reshape(x=out, shape=[0, 0, out.shape[2] * out.shape[3]])
  1248. # project to output
  1249. out = self.out_proj(out)
  1250. outs = [out]
  1251. if self.need_weights:
  1252. outs.append(weights)
  1253. return out if len(outs) == 1 else tuple(outs)
  1254. @register
  1255. class ConvMixer(nn.Layer):
  1256. def __init__(
  1257. self,
  1258. dim,
  1259. depth,
  1260. kernel_size=3, ):
  1261. super().__init__()
  1262. self.dim = dim
  1263. self.depth = depth
  1264. self.kernel_size = kernel_size
  1265. self.mixer = self.conv_mixer(dim, depth, kernel_size)
  1266. def forward(self, x):
  1267. return self.mixer(x)
  1268. @staticmethod
  1269. def conv_mixer(
  1270. dim,
  1271. depth,
  1272. kernel_size, ):
  1273. Seq, ActBn = nn.Sequential, lambda x: Seq(x, nn.GELU(), nn.BatchNorm2D(dim))
  1274. Residual = type('Residual', (Seq, ),
  1275. {'forward': lambda self, x: self[0](x) + x})
  1276. return Seq(* [
  1277. Seq(Residual(
  1278. ActBn(
  1279. nn.Conv2D(
  1280. dim, dim, kernel_size, groups=dim, padding="same"))),
  1281. ActBn(nn.Conv2D(dim, dim, 1))) for i in range(depth)
  1282. ])