pico_head.py 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783
  1. # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. from __future__ import absolute_import
  15. from __future__ import division
  16. from __future__ import print_function
  17. import math
  18. import numpy as np
  19. import paddle
  20. import paddle.nn as nn
  21. import paddle.nn.functional as F
  22. from paddle import ParamAttr
  23. from paddle.nn.initializer import Normal, Constant
  24. from ppdet.modeling.ops import get_static_shape
  25. from ..initializer import normal_
  26. from ..assigners.utils import generate_anchors_for_grid_cell
  27. from ..bbox_utils import bbox_center, batch_distance2bbox, bbox2distance
  28. from ppdet.core.workspace import register
  29. from ppdet.modeling.layers import ConvNormLayer
  30. from .simota_head import OTAVFLHead
  31. from .gfl_head import Integral, GFLHead
  32. from ppdet.modeling.necks.csp_pan import DPModule
  33. eps = 1e-9
  34. __all__ = ['PicoHead', 'PicoHeadV2', 'PicoFeat']
  35. class PicoSE(nn.Layer):
  36. def __init__(self, feat_channels):
  37. super(PicoSE, self).__init__()
  38. self.fc = nn.Conv2D(feat_channels, feat_channels, 1)
  39. self.conv = ConvNormLayer(feat_channels, feat_channels, 1, 1)
  40. self._init_weights()
  41. def _init_weights(self):
  42. normal_(self.fc.weight, std=0.001)
  43. def forward(self, feat, avg_feat):
  44. weight = F.sigmoid(self.fc(avg_feat))
  45. out = self.conv(feat * weight)
  46. return out
  47. @register
  48. class PicoFeat(nn.Layer):
  49. """
  50. PicoFeat of PicoDet
  51. Args:
  52. feat_in (int): The channel number of input Tensor.
  53. feat_out (int): The channel number of output Tensor.
  54. num_convs (int): The convolution number of the LiteGFLFeat.
  55. norm_type (str): Normalization type, 'bn'/'sync_bn'/'gn'.
  56. share_cls_reg (bool): Whether to share the cls and reg output.
  57. act (str): The act of per layers.
  58. use_se (bool): Whether to use se module.
  59. """
  60. def __init__(self,
  61. feat_in=256,
  62. feat_out=96,
  63. num_fpn_stride=3,
  64. num_convs=2,
  65. norm_type='bn',
  66. share_cls_reg=False,
  67. act='hard_swish',
  68. use_se=False):
  69. super(PicoFeat, self).__init__()
  70. self.num_convs = num_convs
  71. self.norm_type = norm_type
  72. self.share_cls_reg = share_cls_reg
  73. self.act = act
  74. self.use_se = use_se
  75. self.cls_convs = []
  76. self.reg_convs = []
  77. if use_se:
  78. assert share_cls_reg == True, \
  79. 'In the case of using se, share_cls_reg must be set to True'
  80. self.se = nn.LayerList()
  81. for stage_idx in range(num_fpn_stride):
  82. cls_subnet_convs = []
  83. reg_subnet_convs = []
  84. for i in range(self.num_convs):
  85. in_c = feat_in if i == 0 else feat_out
  86. cls_conv_dw = self.add_sublayer(
  87. 'cls_conv_dw{}.{}'.format(stage_idx, i),
  88. ConvNormLayer(
  89. ch_in=in_c,
  90. ch_out=feat_out,
  91. filter_size=5,
  92. stride=1,
  93. groups=feat_out,
  94. norm_type=norm_type,
  95. bias_on=False,
  96. lr_scale=2.))
  97. cls_subnet_convs.append(cls_conv_dw)
  98. cls_conv_pw = self.add_sublayer(
  99. 'cls_conv_pw{}.{}'.format(stage_idx, i),
  100. ConvNormLayer(
  101. ch_in=in_c,
  102. ch_out=feat_out,
  103. filter_size=1,
  104. stride=1,
  105. norm_type=norm_type,
  106. bias_on=False,
  107. lr_scale=2.))
  108. cls_subnet_convs.append(cls_conv_pw)
  109. if not self.share_cls_reg:
  110. reg_conv_dw = self.add_sublayer(
  111. 'reg_conv_dw{}.{}'.format(stage_idx, i),
  112. ConvNormLayer(
  113. ch_in=in_c,
  114. ch_out=feat_out,
  115. filter_size=5,
  116. stride=1,
  117. groups=feat_out,
  118. norm_type=norm_type,
  119. bias_on=False,
  120. lr_scale=2.))
  121. reg_subnet_convs.append(reg_conv_dw)
  122. reg_conv_pw = self.add_sublayer(
  123. 'reg_conv_pw{}.{}'.format(stage_idx, i),
  124. ConvNormLayer(
  125. ch_in=in_c,
  126. ch_out=feat_out,
  127. filter_size=1,
  128. stride=1,
  129. norm_type=norm_type,
  130. bias_on=False,
  131. lr_scale=2.))
  132. reg_subnet_convs.append(reg_conv_pw)
  133. self.cls_convs.append(cls_subnet_convs)
  134. self.reg_convs.append(reg_subnet_convs)
  135. if use_se:
  136. self.se.append(PicoSE(feat_out))
  137. def act_func(self, x):
  138. if self.act == "leaky_relu":
  139. x = F.leaky_relu(x)
  140. elif self.act == "hard_swish":
  141. x = F.hardswish(x)
  142. elif self.act == "relu6":
  143. x = F.relu6(x)
  144. return x
  145. def forward(self, fpn_feat, stage_idx):
  146. assert stage_idx < len(self.cls_convs)
  147. cls_feat = fpn_feat
  148. reg_feat = fpn_feat
  149. for i in range(len(self.cls_convs[stage_idx])):
  150. cls_feat = self.act_func(self.cls_convs[stage_idx][i](cls_feat))
  151. reg_feat = cls_feat
  152. if not self.share_cls_reg:
  153. reg_feat = self.act_func(self.reg_convs[stage_idx][i](reg_feat))
  154. if self.use_se:
  155. avg_feat = F.adaptive_avg_pool2d(cls_feat, (1, 1))
  156. se_feat = self.act_func(self.se[stage_idx](cls_feat, avg_feat))
  157. return cls_feat, se_feat
  158. return cls_feat, reg_feat
  159. @register
  160. class PicoHead(OTAVFLHead):
  161. """
  162. PicoHead
  163. Args:
  164. conv_feat (object): Instance of 'PicoFeat'
  165. num_classes (int): Number of classes
  166. fpn_stride (list): The stride of each FPN Layer
  167. prior_prob (float): Used to set the bias init for the class prediction layer
  168. loss_class (object): Instance of VariFocalLoss.
  169. loss_dfl (object): Instance of DistributionFocalLoss.
  170. loss_bbox (object): Instance of bbox loss.
  171. assigner (object): Instance of label assigner.
  172. reg_max: Max value of integral set :math: `{0, ..., reg_max}`
  173. n QFL setting. Default: 7.
  174. """
  175. __inject__ = [
  176. 'conv_feat', 'dgqp_module', 'loss_class', 'loss_dfl', 'loss_bbox',
  177. 'assigner', 'nms'
  178. ]
  179. __shared__ = ['num_classes', 'eval_size']
  180. def __init__(self,
  181. conv_feat='PicoFeat',
  182. dgqp_module=None,
  183. num_classes=80,
  184. fpn_stride=[8, 16, 32],
  185. prior_prob=0.01,
  186. loss_class='VariFocalLoss',
  187. loss_dfl='DistributionFocalLoss',
  188. loss_bbox='GIoULoss',
  189. assigner='SimOTAAssigner',
  190. reg_max=16,
  191. feat_in_chan=96,
  192. nms=None,
  193. nms_pre=1000,
  194. cell_offset=0,
  195. eval_size=None):
  196. super(PicoHead, self).__init__(
  197. conv_feat=conv_feat,
  198. dgqp_module=dgqp_module,
  199. num_classes=num_classes,
  200. fpn_stride=fpn_stride,
  201. prior_prob=prior_prob,
  202. loss_class=loss_class,
  203. loss_dfl=loss_dfl,
  204. loss_bbox=loss_bbox,
  205. assigner=assigner,
  206. reg_max=reg_max,
  207. feat_in_chan=feat_in_chan,
  208. nms=nms,
  209. nms_pre=nms_pre,
  210. cell_offset=cell_offset)
  211. self.conv_feat = conv_feat
  212. self.num_classes = num_classes
  213. self.fpn_stride = fpn_stride
  214. self.prior_prob = prior_prob
  215. self.loss_vfl = loss_class
  216. self.loss_dfl = loss_dfl
  217. self.loss_bbox = loss_bbox
  218. self.assigner = assigner
  219. self.reg_max = reg_max
  220. self.feat_in_chan = feat_in_chan
  221. self.nms = nms
  222. self.nms_pre = nms_pre
  223. self.cell_offset = cell_offset
  224. self.eval_size = eval_size
  225. self.use_sigmoid = self.loss_vfl.use_sigmoid
  226. if self.use_sigmoid:
  227. self.cls_out_channels = self.num_classes
  228. else:
  229. self.cls_out_channels = self.num_classes + 1
  230. bias_init_value = -math.log((1 - self.prior_prob) / self.prior_prob)
  231. # Clear the super class initialization
  232. self.gfl_head_cls = None
  233. self.gfl_head_reg = None
  234. self.scales_regs = None
  235. self.head_cls_list = []
  236. self.head_reg_list = []
  237. for i in range(len(fpn_stride)):
  238. head_cls = self.add_sublayer(
  239. "head_cls" + str(i),
  240. nn.Conv2D(
  241. in_channels=self.feat_in_chan,
  242. out_channels=self.cls_out_channels + 4 * (self.reg_max + 1)
  243. if self.conv_feat.share_cls_reg else self.cls_out_channels,
  244. kernel_size=1,
  245. stride=1,
  246. padding=0,
  247. weight_attr=ParamAttr(initializer=Normal(
  248. mean=0., std=0.01)),
  249. bias_attr=ParamAttr(
  250. initializer=Constant(value=bias_init_value))))
  251. self.head_cls_list.append(head_cls)
  252. if not self.conv_feat.share_cls_reg:
  253. head_reg = self.add_sublayer(
  254. "head_reg" + str(i),
  255. nn.Conv2D(
  256. in_channels=self.feat_in_chan,
  257. out_channels=4 * (self.reg_max + 1),
  258. kernel_size=1,
  259. stride=1,
  260. padding=0,
  261. weight_attr=ParamAttr(initializer=Normal(
  262. mean=0., std=0.01)),
  263. bias_attr=ParamAttr(initializer=Constant(value=0))))
  264. self.head_reg_list.append(head_reg)
  265. # initialize the anchor points
  266. if self.eval_size:
  267. self.anchor_points, self.stride_tensor = self._generate_anchors()
  268. def forward(self, fpn_feats, export_post_process=True):
  269. assert len(fpn_feats) == len(
  270. self.fpn_stride
  271. ), "The size of fpn_feats is not equal to size of fpn_stride"
  272. if self.training:
  273. return self.forward_train(fpn_feats)
  274. else:
  275. return self.forward_eval(
  276. fpn_feats, export_post_process=export_post_process)
  277. def forward_train(self, fpn_feats):
  278. cls_logits_list, bboxes_reg_list = [], []
  279. for i, fpn_feat in enumerate(fpn_feats):
  280. conv_cls_feat, conv_reg_feat = self.conv_feat(fpn_feat, i)
  281. if self.conv_feat.share_cls_reg:
  282. cls_logits = self.head_cls_list[i](conv_cls_feat)
  283. cls_score, bbox_pred = paddle.split(
  284. cls_logits,
  285. [self.cls_out_channels, 4 * (self.reg_max + 1)],
  286. axis=1)
  287. else:
  288. cls_score = self.head_cls_list[i](conv_cls_feat)
  289. bbox_pred = self.head_reg_list[i](conv_reg_feat)
  290. if self.dgqp_module:
  291. quality_score = self.dgqp_module(bbox_pred)
  292. cls_score = F.sigmoid(cls_score) * quality_score
  293. cls_logits_list.append(cls_score)
  294. bboxes_reg_list.append(bbox_pred)
  295. return (cls_logits_list, bboxes_reg_list)
  296. def forward_eval(self, fpn_feats, export_post_process=True):
  297. if self.eval_size:
  298. anchor_points, stride_tensor = self.anchor_points, self.stride_tensor
  299. else:
  300. anchor_points, stride_tensor = self._generate_anchors(fpn_feats)
  301. cls_logits_list, bboxes_reg_list = [], []
  302. for i, fpn_feat in enumerate(fpn_feats):
  303. conv_cls_feat, conv_reg_feat = self.conv_feat(fpn_feat, i)
  304. if self.conv_feat.share_cls_reg:
  305. cls_logits = self.head_cls_list[i](conv_cls_feat)
  306. cls_score, bbox_pred = paddle.split(
  307. cls_logits,
  308. [self.cls_out_channels, 4 * (self.reg_max + 1)],
  309. axis=1)
  310. else:
  311. cls_score = self.head_cls_list[i](conv_cls_feat)
  312. bbox_pred = self.head_reg_list[i](conv_reg_feat)
  313. if self.dgqp_module:
  314. quality_score = self.dgqp_module(bbox_pred)
  315. cls_score = F.sigmoid(cls_score) * quality_score
  316. if not export_post_process:
  317. # Now only supports batch size = 1 in deploy
  318. # TODO(ygh): support batch size > 1
  319. cls_score_out = F.sigmoid(cls_score).reshape(
  320. [1, self.cls_out_channels, -1]).transpose([0, 2, 1])
  321. bbox_pred = bbox_pred.reshape([1, (self.reg_max + 1) * 4,
  322. -1]).transpose([0, 2, 1])
  323. else:
  324. _, _, h, w = fpn_feat.shape
  325. l = h * w
  326. cls_score_out = F.sigmoid(
  327. cls_score.reshape([-1, self.cls_out_channels, l]))
  328. bbox_pred = bbox_pred.transpose([0, 2, 3, 1])
  329. bbox_pred = self.distribution_project(bbox_pred)
  330. bbox_pred = bbox_pred.reshape([-1, l, 4])
  331. cls_logits_list.append(cls_score_out)
  332. bboxes_reg_list.append(bbox_pred)
  333. if export_post_process:
  334. cls_logits_list = paddle.concat(cls_logits_list, axis=-1)
  335. bboxes_reg_list = paddle.concat(bboxes_reg_list, axis=1)
  336. bboxes_reg_list = batch_distance2bbox(anchor_points,
  337. bboxes_reg_list)
  338. bboxes_reg_list *= stride_tensor
  339. return (cls_logits_list, bboxes_reg_list)
  340. def _generate_anchors(self, feats=None):
  341. # just use in eval time
  342. anchor_points = []
  343. stride_tensor = []
  344. for i, stride in enumerate(self.fpn_stride):
  345. if feats is not None:
  346. _, _, h, w = feats[i].shape
  347. else:
  348. h = math.ceil(self.eval_size[0] / stride)
  349. w = math.ceil(self.eval_size[1] / stride)
  350. shift_x = paddle.arange(end=w) + self.cell_offset
  351. shift_y = paddle.arange(end=h) + self.cell_offset
  352. shift_y, shift_x = paddle.meshgrid(shift_y, shift_x)
  353. anchor_point = paddle.cast(
  354. paddle.stack(
  355. [shift_x, shift_y], axis=-1), dtype='float32')
  356. anchor_points.append(anchor_point.reshape([-1, 2]))
  357. stride_tensor.append(
  358. paddle.full(
  359. [h * w, 1], stride, dtype='float32'))
  360. anchor_points = paddle.concat(anchor_points)
  361. stride_tensor = paddle.concat(stride_tensor)
  362. return anchor_points, stride_tensor
  363. def post_process(self, head_outs, scale_factor, export_nms=True):
  364. pred_scores, pred_bboxes = head_outs
  365. if not export_nms:
  366. return pred_bboxes, pred_scores
  367. else:
  368. # rescale: [h_scale, w_scale] -> [w_scale, h_scale, w_scale, h_scale]
  369. scale_y, scale_x = paddle.split(scale_factor, 2, axis=-1)
  370. scale_factor = paddle.concat(
  371. [scale_x, scale_y, scale_x, scale_y],
  372. axis=-1).reshape([-1, 1, 4])
  373. # scale bbox to origin image size.
  374. pred_bboxes /= scale_factor
  375. bbox_pred, bbox_num, _ = self.nms(pred_bboxes, pred_scores)
  376. return bbox_pred, bbox_num
  377. @register
  378. class PicoHeadV2(GFLHead):
  379. """
  380. PicoHeadV2
  381. Args:
  382. conv_feat (object): Instance of 'PicoFeat'
  383. num_classes (int): Number of classes
  384. fpn_stride (list): The stride of each FPN Layer
  385. prior_prob (float): Used to set the bias init for the class prediction layer
  386. loss_class (object): Instance of VariFocalLoss.
  387. loss_dfl (object): Instance of DistributionFocalLoss.
  388. loss_bbox (object): Instance of bbox loss.
  389. assigner (object): Instance of label assigner.
  390. reg_max: Max value of integral set :math: `{0, ..., reg_max}`
  391. n QFL setting. Default: 7.
  392. """
  393. __inject__ = [
  394. 'conv_feat', 'dgqp_module', 'loss_class', 'loss_dfl', 'loss_bbox',
  395. 'static_assigner', 'assigner', 'nms'
  396. ]
  397. __shared__ = ['num_classes', 'eval_size']
  398. def __init__(self,
  399. conv_feat='PicoFeatV2',
  400. dgqp_module=None,
  401. num_classes=80,
  402. fpn_stride=[8, 16, 32],
  403. prior_prob=0.01,
  404. use_align_head=True,
  405. loss_class='VariFocalLoss',
  406. loss_dfl='DistributionFocalLoss',
  407. loss_bbox='GIoULoss',
  408. static_assigner_epoch=60,
  409. static_assigner='ATSSAssigner',
  410. assigner='TaskAlignedAssigner',
  411. reg_max=16,
  412. feat_in_chan=96,
  413. nms=None,
  414. nms_pre=1000,
  415. cell_offset=0,
  416. act='hard_swish',
  417. grid_cell_scale=5.0,
  418. eval_size=None):
  419. super(PicoHeadV2, self).__init__(
  420. conv_feat=conv_feat,
  421. dgqp_module=dgqp_module,
  422. num_classes=num_classes,
  423. fpn_stride=fpn_stride,
  424. prior_prob=prior_prob,
  425. loss_class=loss_class,
  426. loss_dfl=loss_dfl,
  427. loss_bbox=loss_bbox,
  428. reg_max=reg_max,
  429. feat_in_chan=feat_in_chan,
  430. nms=nms,
  431. nms_pre=nms_pre,
  432. cell_offset=cell_offset, )
  433. self.conv_feat = conv_feat
  434. self.num_classes = num_classes
  435. self.fpn_stride = fpn_stride
  436. self.prior_prob = prior_prob
  437. self.loss_vfl = loss_class
  438. self.loss_dfl = loss_dfl
  439. self.loss_bbox = loss_bbox
  440. self.static_assigner_epoch = static_assigner_epoch
  441. self.static_assigner = static_assigner
  442. self.assigner = assigner
  443. self.reg_max = reg_max
  444. self.feat_in_chan = feat_in_chan
  445. self.nms = nms
  446. self.nms_pre = nms_pre
  447. self.cell_offset = cell_offset
  448. self.act = act
  449. self.grid_cell_scale = grid_cell_scale
  450. self.use_align_head = use_align_head
  451. self.cls_out_channels = self.num_classes
  452. self.eval_size = eval_size
  453. bias_init_value = -math.log((1 - self.prior_prob) / self.prior_prob)
  454. # Clear the super class initialization
  455. self.gfl_head_cls = None
  456. self.gfl_head_reg = None
  457. self.scales_regs = None
  458. self.head_cls_list = nn.LayerList()
  459. self.head_reg_list = nn.LayerList()
  460. self.cls_align = nn.LayerList()
  461. for i in range(len(fpn_stride)):
  462. head_cls = self.add_sublayer(
  463. "head_cls" + str(i),
  464. nn.Conv2D(
  465. in_channels=self.feat_in_chan,
  466. out_channels=self.cls_out_channels,
  467. kernel_size=1,
  468. stride=1,
  469. padding=0,
  470. weight_attr=ParamAttr(initializer=Normal(
  471. mean=0., std=0.01)),
  472. bias_attr=ParamAttr(
  473. initializer=Constant(value=bias_init_value))))
  474. self.head_cls_list.append(head_cls)
  475. head_reg = self.add_sublayer(
  476. "head_reg" + str(i),
  477. nn.Conv2D(
  478. in_channels=self.feat_in_chan,
  479. out_channels=4 * (self.reg_max + 1),
  480. kernel_size=1,
  481. stride=1,
  482. padding=0,
  483. weight_attr=ParamAttr(initializer=Normal(
  484. mean=0., std=0.01)),
  485. bias_attr=ParamAttr(initializer=Constant(value=0))))
  486. self.head_reg_list.append(head_reg)
  487. if self.use_align_head:
  488. self.cls_align.append(
  489. DPModule(
  490. self.feat_in_chan,
  491. 1,
  492. 5,
  493. act=self.act,
  494. use_act_in_out=False))
  495. # initialize the anchor points
  496. if self.eval_size:
  497. self.anchor_points, self.stride_tensor = self._generate_anchors()
  498. def forward(self, fpn_feats, export_post_process=True):
  499. assert len(fpn_feats) == len(
  500. self.fpn_stride
  501. ), "The size of fpn_feats is not equal to size of fpn_stride"
  502. if self.training:
  503. return self.forward_train(fpn_feats)
  504. else:
  505. return self.forward_eval(
  506. fpn_feats, export_post_process=export_post_process)
  507. def forward_train(self, fpn_feats):
  508. cls_score_list, reg_list, box_list = [], [], []
  509. for i, (fpn_feat, stride) in enumerate(zip(fpn_feats, self.fpn_stride)):
  510. b, _, h, w = get_static_shape(fpn_feat)
  511. # task decomposition
  512. conv_cls_feat, se_feat = self.conv_feat(fpn_feat, i)
  513. cls_logit = self.head_cls_list[i](se_feat)
  514. reg_pred = self.head_reg_list[i](se_feat)
  515. # cls prediction and alignment
  516. if self.use_align_head:
  517. cls_prob = F.sigmoid(self.cls_align[i](conv_cls_feat))
  518. cls_score = (F.sigmoid(cls_logit) * cls_prob + eps).sqrt()
  519. else:
  520. cls_score = F.sigmoid(cls_logit)
  521. cls_score_out = cls_score.transpose([0, 2, 3, 1])
  522. bbox_pred = reg_pred.transpose([0, 2, 3, 1])
  523. b, cell_h, cell_w, _ = paddle.shape(cls_score_out)
  524. y, x = self.get_single_level_center_point(
  525. [cell_h, cell_w], stride, cell_offset=self.cell_offset)
  526. center_points = paddle.stack([x, y], axis=-1)
  527. cls_score_out = cls_score_out.reshape(
  528. [b, -1, self.cls_out_channels])
  529. bbox_pred = self.distribution_project(bbox_pred) * stride
  530. bbox_pred = bbox_pred.reshape([b, cell_h * cell_w, 4])
  531. bbox_pred = batch_distance2bbox(
  532. center_points, bbox_pred, max_shapes=None)
  533. cls_score_list.append(cls_score.flatten(2).transpose([0, 2, 1]))
  534. reg_list.append(reg_pred.flatten(2).transpose([0, 2, 1]))
  535. box_list.append(bbox_pred / stride)
  536. cls_score_list = paddle.concat(cls_score_list, axis=1)
  537. box_list = paddle.concat(box_list, axis=1)
  538. reg_list = paddle.concat(reg_list, axis=1)
  539. return cls_score_list, reg_list, box_list, fpn_feats
  540. def forward_eval(self, fpn_feats, export_post_process=True):
  541. if self.eval_size:
  542. anchor_points, stride_tensor = self.anchor_points, self.stride_tensor
  543. else:
  544. anchor_points, stride_tensor = self._generate_anchors(fpn_feats)
  545. cls_score_list, box_list = [], []
  546. for i, (fpn_feat, stride) in enumerate(zip(fpn_feats, self.fpn_stride)):
  547. _, _, h, w = fpn_feat.shape
  548. # task decomposition
  549. conv_cls_feat, se_feat = self.conv_feat(fpn_feat, i)
  550. cls_logit = self.head_cls_list[i](se_feat)
  551. reg_pred = self.head_reg_list[i](se_feat)
  552. # cls prediction and alignment
  553. if self.use_align_head:
  554. cls_prob = F.sigmoid(self.cls_align[i](conv_cls_feat))
  555. cls_score = (F.sigmoid(cls_logit) * cls_prob + eps).sqrt()
  556. else:
  557. cls_score = F.sigmoid(cls_logit)
  558. if not export_post_process:
  559. # Now only supports batch size = 1 in deploy
  560. cls_score_list.append(
  561. cls_score.reshape([1, self.cls_out_channels, -1]).transpose(
  562. [0, 2, 1]))
  563. box_list.append(
  564. reg_pred.reshape([1, (self.reg_max + 1) * 4, -1]).transpose(
  565. [0, 2, 1]))
  566. else:
  567. l = h * w
  568. cls_score_out = cls_score.reshape(
  569. [-1, self.cls_out_channels, l])
  570. bbox_pred = reg_pred.transpose([0, 2, 3, 1])
  571. bbox_pred = self.distribution_project(bbox_pred)
  572. bbox_pred = bbox_pred.reshape([-1, l, 4])
  573. cls_score_list.append(cls_score_out)
  574. box_list.append(bbox_pred)
  575. if export_post_process:
  576. cls_score_list = paddle.concat(cls_score_list, axis=-1)
  577. box_list = paddle.concat(box_list, axis=1)
  578. box_list = batch_distance2bbox(anchor_points, box_list)
  579. box_list *= stride_tensor
  580. return cls_score_list, box_list
  581. def get_loss(self, head_outs, gt_meta):
  582. pred_scores, pred_regs, pred_bboxes, fpn_feats = head_outs
  583. gt_labels = gt_meta['gt_class']
  584. gt_bboxes = gt_meta['gt_bbox']
  585. gt_scores = gt_meta['gt_score'] if 'gt_score' in gt_meta else None
  586. num_imgs = gt_meta['im_id'].shape[0]
  587. pad_gt_mask = gt_meta['pad_gt_mask']
  588. anchors, _, num_anchors_list, stride_tensor_list = generate_anchors_for_grid_cell(
  589. fpn_feats, self.fpn_stride, self.grid_cell_scale, self.cell_offset)
  590. centers = bbox_center(anchors)
  591. # label assignment
  592. if gt_meta['epoch_id'] < self.static_assigner_epoch:
  593. assigned_labels, assigned_bboxes, assigned_scores = self.static_assigner(
  594. anchors,
  595. num_anchors_list,
  596. gt_labels,
  597. gt_bboxes,
  598. pad_gt_mask,
  599. bg_index=self.num_classes,
  600. gt_scores=gt_scores,
  601. pred_bboxes=pred_bboxes.detach() * stride_tensor_list)
  602. else:
  603. assigned_labels, assigned_bboxes, assigned_scores = self.assigner(
  604. pred_scores.detach(),
  605. pred_bboxes.detach() * stride_tensor_list,
  606. centers,
  607. num_anchors_list,
  608. gt_labels,
  609. gt_bboxes,
  610. pad_gt_mask,
  611. bg_index=self.num_classes,
  612. gt_scores=gt_scores)
  613. assigned_bboxes /= stride_tensor_list
  614. centers_shape = centers.shape
  615. flatten_centers = centers.expand(
  616. [num_imgs, centers_shape[0], centers_shape[1]]).reshape([-1, 2])
  617. flatten_strides = stride_tensor_list.expand(
  618. [num_imgs, centers_shape[0], 1]).reshape([-1, 1])
  619. flatten_cls_preds = pred_scores.reshape([-1, self.num_classes])
  620. flatten_regs = pred_regs.reshape([-1, 4 * (self.reg_max + 1)])
  621. flatten_bboxes = pred_bboxes.reshape([-1, 4])
  622. flatten_bbox_targets = assigned_bboxes.reshape([-1, 4])
  623. flatten_labels = assigned_labels.reshape([-1])
  624. flatten_assigned_scores = assigned_scores.reshape(
  625. [-1, self.num_classes])
  626. pos_inds = paddle.nonzero(
  627. paddle.logical_and((flatten_labels >= 0),
  628. (flatten_labels < self.num_classes)),
  629. as_tuple=False).squeeze(1)
  630. num_total_pos = len(pos_inds)
  631. if num_total_pos > 0:
  632. pos_bbox_targets = paddle.gather(
  633. flatten_bbox_targets, pos_inds, axis=0)
  634. pos_decode_bbox_pred = paddle.gather(
  635. flatten_bboxes, pos_inds, axis=0)
  636. pos_reg = paddle.gather(flatten_regs, pos_inds, axis=0)
  637. pos_strides = paddle.gather(flatten_strides, pos_inds, axis=0)
  638. pos_centers = paddle.gather(
  639. flatten_centers, pos_inds, axis=0) / pos_strides
  640. weight_targets = flatten_assigned_scores.detach()
  641. weight_targets = paddle.gather(
  642. weight_targets.max(axis=1, keepdim=True), pos_inds, axis=0)
  643. pred_corners = pos_reg.reshape([-1, self.reg_max + 1])
  644. target_corners = bbox2distance(pos_centers, pos_bbox_targets,
  645. self.reg_max).reshape([-1])
  646. # regression loss
  647. loss_bbox = paddle.sum(
  648. self.loss_bbox(pos_decode_bbox_pred,
  649. pos_bbox_targets) * weight_targets)
  650. # dfl loss
  651. loss_dfl = self.loss_dfl(
  652. pred_corners,
  653. target_corners,
  654. weight=weight_targets.expand([-1, 4]).reshape([-1]),
  655. avg_factor=4.0)
  656. else:
  657. loss_bbox = paddle.zeros([1])
  658. loss_dfl = paddle.zeros([1])
  659. avg_factor = flatten_assigned_scores.sum()
  660. if paddle.distributed.get_world_size() > 1:
  661. paddle.distributed.all_reduce(avg_factor)
  662. avg_factor = paddle.clip(
  663. avg_factor / paddle.distributed.get_world_size(), min=1)
  664. loss_vfl = self.loss_vfl(
  665. flatten_cls_preds, flatten_assigned_scores, avg_factor=avg_factor)
  666. loss_bbox = loss_bbox / avg_factor
  667. loss_dfl = loss_dfl / avg_factor
  668. loss_states = dict(
  669. loss_vfl=loss_vfl, loss_bbox=loss_bbox, loss_dfl=loss_dfl)
  670. return loss_states
  671. def _generate_anchors(self, feats=None):
  672. # just use in eval time
  673. anchor_points = []
  674. stride_tensor = []
  675. for i, stride in enumerate(self.fpn_stride):
  676. if feats is not None:
  677. _, _, h, w = feats[i].shape
  678. else:
  679. h = math.ceil(self.eval_size[0] / stride)
  680. w = math.ceil(self.eval_size[1] / stride)
  681. shift_x = paddle.arange(end=w) + self.cell_offset
  682. shift_y = paddle.arange(end=h) + self.cell_offset
  683. shift_y, shift_x = paddle.meshgrid(shift_y, shift_x)
  684. anchor_point = paddle.cast(
  685. paddle.stack(
  686. [shift_x, shift_y], axis=-1), dtype='float32')
  687. anchor_points.append(anchor_point.reshape([-1, 2]))
  688. stride_tensor.append(
  689. paddle.full(
  690. [h * w, 1], stride, dtype='float32'))
  691. anchor_points = paddle.concat(anchor_points)
  692. stride_tensor = paddle.concat(stride_tensor)
  693. return anchor_points, stride_tensor
  694. def post_process(self, head_outs, scale_factor, export_nms=True):
  695. pred_scores, pred_bboxes = head_outs
  696. if not export_nms:
  697. return pred_bboxes, pred_scores
  698. else:
  699. # rescale: [h_scale, w_scale] -> [w_scale, h_scale, w_scale, h_scale]
  700. scale_y, scale_x = paddle.split(scale_factor, 2, axis=-1)
  701. scale_factor = paddle.concat(
  702. [scale_x, scale_y, scale_x, scale_y],
  703. axis=-1).reshape([-1, 1, 4])
  704. # scale bbox to origin image size.
  705. pred_bboxes /= scale_factor
  706. bbox_pred, bbox_num, _ = self.nms(pred_bboxes, pred_scores)
  707. return bbox_pred, bbox_num