ld_gfl_head.py 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330
  1. # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # The code is based on:
  15. # https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/dense_heads/ld_head.py
  16. from __future__ import absolute_import
  17. from __future__ import division
  18. from __future__ import print_function
  19. import math
  20. import numpy as np
  21. import paddle
  22. import paddle.nn as nn
  23. import paddle.nn.functional as F
  24. from paddle import ParamAttr
  25. from paddle.nn.initializer import Normal, Constant
  26. from ppdet.core.workspace import register, serializable
  27. from ppdet.modeling.layers import ConvNormLayer
  28. from ppdet.modeling.bbox_utils import distance2bbox, bbox2distance, batch_distance2bbox
  29. from ppdet.data.transform.atss_assigner import bbox_overlaps
  30. from .gfl_head import GFLHead
  31. @register
  32. class LDGFLHead(GFLHead):
  33. """
  34. GFLHead for LD distill
  35. Args:
  36. conv_feat (object): Instance of 'FCOSFeat'
  37. num_classes (int): Number of classes
  38. fpn_stride (list): The stride of each FPN Layer
  39. prior_prob (float): Used to set the bias init for the class prediction layer
  40. loss_class (object): Instance of QualityFocalLoss.
  41. loss_dfl (object): Instance of DistributionFocalLoss.
  42. loss_bbox (object): Instance of bbox loss.
  43. reg_max: Max value of integral set :math: `{0, ..., reg_max}`
  44. n QFL setting. Default: 16.
  45. """
  46. __inject__ = [
  47. 'conv_feat', 'dgqp_module', 'loss_class', 'loss_dfl', 'loss_bbox',
  48. 'loss_ld', 'loss_ld_vlr', 'loss_kd', 'nms'
  49. ]
  50. __shared__ = ['num_classes']
  51. def __init__(self,
  52. conv_feat='FCOSFeat',
  53. dgqp_module=None,
  54. num_classes=80,
  55. fpn_stride=[8, 16, 32, 64, 128],
  56. prior_prob=0.01,
  57. loss_class='QualityFocalLoss',
  58. loss_dfl='DistributionFocalLoss',
  59. loss_bbox='GIoULoss',
  60. loss_ld='KnowledgeDistillationKLDivLoss',
  61. loss_ld_vlr='KnowledgeDistillationKLDivLoss',
  62. loss_kd='KnowledgeDistillationKLDivLoss',
  63. reg_max=16,
  64. feat_in_chan=256,
  65. nms=None,
  66. nms_pre=1000,
  67. cell_offset=0):
  68. super(LDGFLHead, self).__init__(
  69. conv_feat=conv_feat,
  70. dgqp_module=dgqp_module,
  71. num_classes=num_classes,
  72. fpn_stride=fpn_stride,
  73. prior_prob=prior_prob,
  74. loss_class=loss_class,
  75. loss_dfl=loss_dfl,
  76. loss_bbox=loss_bbox,
  77. reg_max=reg_max,
  78. feat_in_chan=feat_in_chan,
  79. nms=nms,
  80. nms_pre=nms_pre,
  81. cell_offset=cell_offset)
  82. self.loss_ld = loss_ld
  83. self.loss_kd = loss_kd
  84. self.loss_ld_vlr = loss_ld_vlr
  85. def forward(self, fpn_feats):
  86. assert len(fpn_feats) == len(
  87. self.fpn_stride
  88. ), "The size of fpn_feats is not equal to size of fpn_stride"
  89. cls_logits_list = []
  90. bboxes_reg_list = []
  91. for stride, scale_reg, fpn_feat in zip(self.fpn_stride,
  92. self.scales_regs, fpn_feats):
  93. conv_cls_feat, conv_reg_feat = self.conv_feat(fpn_feat)
  94. cls_score = self.gfl_head_cls(conv_cls_feat)
  95. bbox_pred = scale_reg(self.gfl_head_reg(conv_reg_feat))
  96. if self.dgqp_module:
  97. quality_score = self.dgqp_module(bbox_pred)
  98. cls_score = F.sigmoid(cls_score) * quality_score
  99. if not self.training:
  100. cls_score = F.sigmoid(cls_score.transpose([0, 2, 3, 1]))
  101. bbox_pred = bbox_pred.transpose([0, 2, 3, 1])
  102. b, cell_h, cell_w, _ = paddle.shape(cls_score)
  103. y, x = self.get_single_level_center_point(
  104. [cell_h, cell_w], stride, cell_offset=self.cell_offset)
  105. center_points = paddle.stack([x, y], axis=-1)
  106. cls_score = cls_score.reshape([b, -1, self.cls_out_channels])
  107. bbox_pred = self.distribution_project(bbox_pred) * stride
  108. bbox_pred = bbox_pred.reshape([b, cell_h * cell_w, 4])
  109. # NOTE: If keep_ratio=False and image shape value that
  110. # multiples of 32, distance2bbox not set max_shapes parameter
  111. # to speed up model prediction. If need to set max_shapes,
  112. # please use inputs['im_shape'].
  113. bbox_pred = batch_distance2bbox(
  114. center_points, bbox_pred, max_shapes=None)
  115. cls_logits_list.append(cls_score)
  116. bboxes_reg_list.append(bbox_pred)
  117. return (cls_logits_list, bboxes_reg_list)
  118. def get_loss(self, gfl_head_outs, gt_meta, soft_label_list,
  119. soft_targets_list):
  120. cls_logits, bboxes_reg = gfl_head_outs
  121. num_level_anchors = [
  122. featmap.shape[-2] * featmap.shape[-1] for featmap in cls_logits
  123. ]
  124. grid_cells_list = self._images_to_levels(gt_meta['grid_cells'],
  125. num_level_anchors)
  126. labels_list = self._images_to_levels(gt_meta['labels'],
  127. num_level_anchors)
  128. label_weights_list = self._images_to_levels(gt_meta['label_weights'],
  129. num_level_anchors)
  130. bbox_targets_list = self._images_to_levels(gt_meta['bbox_targets'],
  131. num_level_anchors)
  132. # vlr regions
  133. vlr_regions_list = self._images_to_levels(gt_meta['vlr_regions'],
  134. num_level_anchors)
  135. num_total_pos = sum(gt_meta['pos_num'])
  136. try:
  137. paddle.distributed.all_reduce(num_total_pos)
  138. num_total_pos = paddle.clip(
  139. num_total_pos / paddle.distributed.get_world_size(), min=1.)
  140. except:
  141. num_total_pos = max(num_total_pos, 1)
  142. loss_bbox_list, loss_dfl_list, loss_qfl_list, loss_ld_list, avg_factor = [], [], [], [], []
  143. loss_ld_vlr_list, loss_kd_list = [], []
  144. for cls_score, bbox_pred, grid_cells, labels, label_weights, bbox_targets, stride, soft_targets,\
  145. soft_label, vlr_region in zip(
  146. cls_logits, bboxes_reg, grid_cells_list, labels_list,
  147. label_weights_list, bbox_targets_list, self.fpn_stride, soft_targets_list,
  148. soft_label_list, vlr_regions_list):
  149. grid_cells = grid_cells.reshape([-1, 4])
  150. cls_score = cls_score.transpose([0, 2, 3, 1]).reshape(
  151. [-1, self.cls_out_channels])
  152. bbox_pred = bbox_pred.transpose([0, 2, 3, 1]).reshape(
  153. [-1, 4 * (self.reg_max + 1)])
  154. soft_targets = soft_targets.transpose([0, 2, 3, 1]).reshape(
  155. [-1, 4 * (self.reg_max + 1)])
  156. soft_label = soft_label.transpose([0, 2, 3, 1]).reshape(
  157. [-1, self.cls_out_channels])
  158. # feture im
  159. # teacher_x = teacher_x.transpose([0, 2, 3, 1]).reshape([-1, 256])
  160. # x = x.transpose([0, 2, 3, 1]).reshape([-1, 256])
  161. bbox_targets = bbox_targets.reshape([-1, 4])
  162. labels = labels.reshape([-1])
  163. label_weights = label_weights.reshape([-1])
  164. vlr_region = vlr_region.reshape([-1])
  165. bg_class_ind = self.num_classes
  166. pos_inds = paddle.nonzero(
  167. paddle.logical_and((labels >= 0), (labels < bg_class_ind)),
  168. as_tuple=False).squeeze(1)
  169. score = np.zeros(labels.shape)
  170. remain_inds = (vlr_region > 0).nonzero()
  171. if len(pos_inds) > 0:
  172. pos_bbox_targets = paddle.gather(bbox_targets, pos_inds, axis=0)
  173. pos_bbox_pred = paddle.gather(bbox_pred, pos_inds, axis=0)
  174. pos_grid_cells = paddle.gather(grid_cells, pos_inds, axis=0)
  175. pos_grid_cell_centers = self._grid_cells_to_center(
  176. pos_grid_cells) / stride
  177. weight_targets = F.sigmoid(cls_score.detach())
  178. weight_targets = paddle.gather(
  179. weight_targets.max(axis=1, keepdim=True), pos_inds, axis=0)
  180. pos_bbox_pred_corners = self.distribution_project(pos_bbox_pred)
  181. pos_decode_bbox_pred = distance2bbox(pos_grid_cell_centers,
  182. pos_bbox_pred_corners)
  183. pos_decode_bbox_targets = pos_bbox_targets / stride
  184. bbox_iou = bbox_overlaps(
  185. pos_decode_bbox_pred.detach().numpy(),
  186. pos_decode_bbox_targets.detach().numpy(),
  187. is_aligned=True)
  188. score[pos_inds.numpy()] = bbox_iou
  189. pred_corners = pos_bbox_pred.reshape([-1, self.reg_max + 1])
  190. pos_soft_targets = paddle.gather(soft_targets, pos_inds, axis=0)
  191. soft_corners = pos_soft_targets.reshape([-1, self.reg_max + 1])
  192. target_corners = bbox2distance(pos_grid_cell_centers,
  193. pos_decode_bbox_targets,
  194. self.reg_max).reshape([-1])
  195. # regression loss
  196. loss_bbox = paddle.sum(
  197. self.loss_bbox(pos_decode_bbox_pred,
  198. pos_decode_bbox_targets) * weight_targets)
  199. # dfl loss
  200. loss_dfl = self.loss_dfl(
  201. pred_corners,
  202. target_corners,
  203. weight=weight_targets.expand([-1, 4]).reshape([-1]),
  204. avg_factor=4.0)
  205. # ld loss
  206. loss_ld = self.loss_ld(
  207. pred_corners,
  208. soft_corners,
  209. weight=weight_targets.expand([-1, 4]).reshape([-1]),
  210. avg_factor=4.0)
  211. loss_kd = self.loss_kd(
  212. paddle.gather(
  213. cls_score, pos_inds, axis=0),
  214. paddle.gather(
  215. soft_label, pos_inds, axis=0),
  216. weight=paddle.gather(
  217. label_weights, pos_inds, axis=0),
  218. avg_factor=pos_inds.shape[0])
  219. else:
  220. loss_bbox = bbox_pred.sum() * 0
  221. loss_dfl = bbox_pred.sum() * 0
  222. loss_ld = bbox_pred.sum() * 0
  223. loss_kd = bbox_pred.sum() * 0
  224. weight_targets = paddle.to_tensor([0], dtype='float32')
  225. if len(remain_inds) > 0:
  226. neg_pred_corners = bbox_pred[remain_inds].reshape(
  227. [-1, self.reg_max + 1])
  228. neg_soft_corners = soft_targets[remain_inds].reshape(
  229. [-1, self.reg_max + 1])
  230. remain_targets = vlr_region[remain_inds]
  231. loss_ld_vlr = self.loss_ld_vlr(
  232. neg_pred_corners,
  233. neg_soft_corners,
  234. weight=remain_targets.expand([-1, 4]).reshape([-1]),
  235. avg_factor=16.0)
  236. else:
  237. loss_ld_vlr = bbox_pred.sum() * 0
  238. # qfl loss
  239. score = paddle.to_tensor(score)
  240. loss_qfl = self.loss_qfl(
  241. cls_score, (labels, score),
  242. weight=label_weights,
  243. avg_factor=num_total_pos)
  244. loss_bbox_list.append(loss_bbox)
  245. loss_dfl_list.append(loss_dfl)
  246. loss_qfl_list.append(loss_qfl)
  247. loss_ld_list.append(loss_ld)
  248. loss_ld_vlr_list.append(loss_ld_vlr)
  249. loss_kd_list.append(loss_kd)
  250. avg_factor.append(weight_targets.sum())
  251. avg_factor = sum(avg_factor) # + 1e-6
  252. try:
  253. paddle.distributed.all_reduce(avg_factor)
  254. avg_factor = paddle.clip(
  255. avg_factor / paddle.distributed.get_world_size(), min=1)
  256. except:
  257. avg_factor = max(avg_factor.item(), 1)
  258. if avg_factor <= 0:
  259. loss_qfl = paddle.to_tensor(0, dtype='float32', stop_gradient=False)
  260. loss_bbox = paddle.to_tensor(
  261. 0, dtype='float32', stop_gradient=False)
  262. loss_dfl = paddle.to_tensor(0, dtype='float32', stop_gradient=False)
  263. loss_ld = paddle.to_tensor(0, dtype='float32', stop_gradient=False)
  264. loss_ld_vlr = paddle.to_tensor(
  265. 0, dtype='float32', stop_gradient=False)
  266. loss_kd = paddle.to_tensor(0, dtype='float32', stop_gradient=False)
  267. else:
  268. losses_bbox = list(map(lambda x: x / avg_factor, loss_bbox_list))
  269. losses_dfl = list(map(lambda x: x / avg_factor, loss_dfl_list))
  270. loss_qfl = sum(loss_qfl_list)
  271. loss_bbox = sum(losses_bbox)
  272. loss_dfl = sum(losses_dfl)
  273. loss_ld = sum(loss_ld_list)
  274. loss_ld_vlr = sum(loss_ld_vlr_list)
  275. loss_kd = sum(loss_kd_list)
  276. loss_states = dict(
  277. loss_qfl=loss_qfl,
  278. loss_bbox=loss_bbox,
  279. loss_dfl=loss_dfl,
  280. loss_ld=loss_ld,
  281. loss_ld_vlr=loss_ld_vlr,
  282. loss_kd=loss_kd)
  283. return loss_states