ops.py 48 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114
  1. # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. import paddle
  15. import paddle.nn.functional as F
  16. import paddle.nn as nn
  17. from paddle import ParamAttr
  18. from paddle.regularizer import L2Decay
  19. try:
  20. import paddle._legacy_C_ops as C_ops
  21. except:
  22. import paddle._C_ops as C_ops
  23. from paddle import in_dynamic_mode
  24. from paddle.common_ops_import import Variable, LayerHelper, check_variable_and_dtype, check_type, check_dtype
  25. __all__ = [
  26. 'prior_box', 'generate_proposals', 'box_coder', 'multiclass_nms',
  27. 'distribute_fpn_proposals', 'matrix_nms', 'batch_norm', 'mish', 'silu',
  28. 'swish', 'identity', 'anchor_generator'
  29. ]
  30. def identity(x):
  31. return x
  32. def mish(x):
  33. return F.mish(x) if hasattr(F, mish) else x * F.tanh(F.softplus(x))
  34. def silu(x):
  35. return F.silu(x)
  36. def swish(x):
  37. return x * F.sigmoid(x)
  38. TRT_ACT_SPEC = {'swish': swish, 'silu': swish}
  39. ACT_SPEC = {'mish': mish, 'silu': silu}
  40. def get_act_fn(act=None, trt=False):
  41. assert act is None or isinstance(act, (
  42. str, dict)), 'name of activation should be str, dict or None'
  43. if not act:
  44. return identity
  45. if isinstance(act, dict):
  46. name = act['name']
  47. act.pop('name')
  48. kwargs = act
  49. else:
  50. name = act
  51. kwargs = dict()
  52. if trt and name in TRT_ACT_SPEC:
  53. fn = TRT_ACT_SPEC[name]
  54. elif name in ACT_SPEC:
  55. fn = ACT_SPEC[name]
  56. else:
  57. fn = getattr(F, name)
  58. return lambda x: fn(x, **kwargs)
  59. def batch_norm(ch,
  60. norm_type='bn',
  61. norm_decay=0.,
  62. freeze_norm=False,
  63. initializer=None,
  64. data_format='NCHW'):
  65. norm_lr = 0. if freeze_norm else 1.
  66. weight_attr = ParamAttr(
  67. initializer=initializer,
  68. learning_rate=norm_lr,
  69. regularizer=L2Decay(norm_decay),
  70. trainable=False if freeze_norm else True)
  71. bias_attr = ParamAttr(
  72. learning_rate=norm_lr,
  73. regularizer=L2Decay(norm_decay),
  74. trainable=False if freeze_norm else True)
  75. if norm_type in ['sync_bn', 'bn']:
  76. norm_layer = nn.BatchNorm2D(
  77. ch,
  78. weight_attr=weight_attr,
  79. bias_attr=bias_attr,
  80. data_format=data_format)
  81. norm_params = norm_layer.parameters()
  82. if freeze_norm:
  83. for param in norm_params:
  84. param.stop_gradient = True
  85. return norm_layer
  86. @paddle.jit.not_to_static
  87. def anchor_generator(input,
  88. anchor_sizes=None,
  89. aspect_ratios=None,
  90. variance=[0.1, 0.1, 0.2, 0.2],
  91. stride=None,
  92. offset=0.5):
  93. """
  94. **Anchor generator operator**
  95. Generate anchors for Faster RCNN algorithm.
  96. Each position of the input produce N anchors, N =
  97. size(anchor_sizes) * size(aspect_ratios). The order of generated anchors
  98. is firstly aspect_ratios loop then anchor_sizes loop.
  99. Args:
  100. input(Variable): 4-D Tensor with shape [N,C,H,W]. The input feature map.
  101. anchor_sizes(float32|list|tuple, optional): The anchor sizes of generated
  102. anchors, given in absolute pixels e.g. [64., 128., 256., 512.].
  103. For instance, the anchor size of 64 means the area of this anchor
  104. equals to 64**2. None by default.
  105. aspect_ratios(float32|list|tuple, optional): The height / width ratios
  106. of generated anchors, e.g. [0.5, 1.0, 2.0]. None by default.
  107. variance(list|tuple, optional): The variances to be used in box
  108. regression deltas. The data type is float32, [0.1, 0.1, 0.2, 0.2] by
  109. default.
  110. stride(list|tuple, optional): The anchors stride across width and height.
  111. The data type is float32. e.g. [16.0, 16.0]. None by default.
  112. offset(float32, optional): Prior boxes center offset. 0.5 by default.
  113. Returns:
  114. Tuple:
  115. Anchors(Variable): The output anchors with a layout of [H, W, num_anchors, 4].
  116. H is the height of input, W is the width of input,
  117. num_anchors is the box count of each position.
  118. Each anchor is in (xmin, ymin, xmax, ymax) format an unnormalized.
  119. Variances(Variable): The expanded variances of anchors
  120. with a layout of [H, W, num_priors, 4].
  121. H is the height of input, W is the width of input
  122. num_anchors is the box count of each position.
  123. Each variance is in (xcenter, ycenter, w, h) format.
  124. Examples:
  125. .. code-block:: python
  126. import paddle.fluid as fluid
  127. conv1 = fluid.data(name='conv1', shape=[None, 48, 16, 16], dtype='float32')
  128. anchor, var = fluid.layers.anchor_generator(
  129. input=conv1,
  130. anchor_sizes=[64, 128, 256, 512],
  131. aspect_ratios=[0.5, 1.0, 2.0],
  132. variance=[0.1, 0.1, 0.2, 0.2],
  133. stride=[16.0, 16.0],
  134. offset=0.5)
  135. """
  136. def _is_list_or_tuple_(data):
  137. return (isinstance(data, list) or isinstance(data, tuple))
  138. if not _is_list_or_tuple_(anchor_sizes):
  139. anchor_sizes = [anchor_sizes]
  140. if not _is_list_or_tuple_(aspect_ratios):
  141. aspect_ratios = [aspect_ratios]
  142. if not (_is_list_or_tuple_(stride) and len(stride) == 2):
  143. raise ValueError('stride should be a list or tuple ',
  144. 'with length 2, (stride_width, stride_height).')
  145. anchor_sizes = list(map(float, anchor_sizes))
  146. aspect_ratios = list(map(float, aspect_ratios))
  147. stride = list(map(float, stride))
  148. if in_dynamic_mode():
  149. attrs = ('anchor_sizes', anchor_sizes, 'aspect_ratios', aspect_ratios,
  150. 'variances', variance, 'stride', stride, 'offset', offset)
  151. anchor, var = C_ops.anchor_generator(input, *attrs)
  152. return anchor, var
  153. helper = LayerHelper("anchor_generator", **locals())
  154. dtype = helper.input_dtype()
  155. attrs = {
  156. 'anchor_sizes': anchor_sizes,
  157. 'aspect_ratios': aspect_ratios,
  158. 'variances': variance,
  159. 'stride': stride,
  160. 'offset': offset
  161. }
  162. anchor = helper.create_variable_for_type_inference(dtype)
  163. var = helper.create_variable_for_type_inference(dtype)
  164. helper.append_op(
  165. type="anchor_generator",
  166. inputs={"Input": input},
  167. outputs={"Anchors": anchor,
  168. "Variances": var},
  169. attrs=attrs, )
  170. anchor.stop_gradient = True
  171. var.stop_gradient = True
  172. return anchor, var
  173. @paddle.jit.not_to_static
  174. def distribute_fpn_proposals(fpn_rois,
  175. min_level,
  176. max_level,
  177. refer_level,
  178. refer_scale,
  179. pixel_offset=False,
  180. rois_num=None,
  181. name=None):
  182. r"""
  183. **This op only takes LoDTensor as input.** In Feature Pyramid Networks
  184. (FPN) models, it is needed to distribute all proposals into different FPN
  185. level, with respect to scale of the proposals, the referring scale and the
  186. referring level. Besides, to restore the order of proposals, we return an
  187. array which indicates the original index of rois in current proposals.
  188. To compute FPN level for each roi, the formula is given as follows:
  189. .. math::
  190. roi\_scale &= \sqrt{BBoxArea(fpn\_roi)}
  191. level = floor(&\log(\\frac{roi\_scale}{refer\_scale}) + refer\_level)
  192. where BBoxArea is a function to compute the area of each roi.
  193. Args:
  194. fpn_rois(Variable): 2-D Tensor with shape [N, 4] and data type is
  195. float32 or float64. The input fpn_rois.
  196. min_level(int32): The lowest level of FPN layer where the proposals come
  197. from.
  198. max_level(int32): The highest level of FPN layer where the proposals
  199. come from.
  200. refer_level(int32): The referring level of FPN layer with specified scale.
  201. refer_scale(int32): The referring scale of FPN layer with specified level.
  202. rois_num(Tensor): 1-D Tensor contains the number of RoIs in each image.
  203. The shape is [B] and data type is int32. B is the number of images.
  204. If it is not None then return a list of 1-D Tensor. Each element
  205. is the output RoIs' number of each image on the corresponding level
  206. and the shape is [B]. None by default.
  207. name(str, optional): For detailed information, please refer
  208. to :ref:`api_guide_Name`. Usually name is no need to set and
  209. None by default.
  210. Returns:
  211. Tuple:
  212. multi_rois(List) : A list of 2-D LoDTensor with shape [M, 4]
  213. and data type of float32 and float64. The length is
  214. max_level-min_level+1. The proposals in each FPN level.
  215. restore_ind(Variable): A 2-D Tensor with shape [N, 1], N is
  216. the number of total rois. The data type is int32. It is
  217. used to restore the order of fpn_rois.
  218. rois_num_per_level(List): A list of 1-D Tensor and each Tensor is
  219. the RoIs' number in each image on the corresponding level. The shape
  220. is [B] and data type of int32. B is the number of images
  221. Examples:
  222. .. code-block:: python
  223. import paddle
  224. from ppdet.modeling import ops
  225. paddle.enable_static()
  226. fpn_rois = paddle.static.data(
  227. name='data', shape=[None, 4], dtype='float32', lod_level=1)
  228. multi_rois, restore_ind = ops.distribute_fpn_proposals(
  229. fpn_rois=fpn_rois,
  230. min_level=2,
  231. max_level=5,
  232. refer_level=4,
  233. refer_scale=224)
  234. """
  235. num_lvl = max_level - min_level + 1
  236. if in_dynamic_mode():
  237. assert rois_num is not None, "rois_num should not be None in dygraph mode."
  238. attrs = ('min_level', min_level, 'max_level', max_level, 'refer_level',
  239. refer_level, 'refer_scale', refer_scale, 'pixel_offset',
  240. pixel_offset)
  241. multi_rois, restore_ind, rois_num_per_level = C_ops.distribute_fpn_proposals(
  242. fpn_rois, rois_num, num_lvl, num_lvl, *attrs)
  243. return multi_rois, restore_ind, rois_num_per_level
  244. else:
  245. check_variable_and_dtype(fpn_rois, 'fpn_rois', ['float32', 'float64'],
  246. 'distribute_fpn_proposals')
  247. helper = LayerHelper('distribute_fpn_proposals', **locals())
  248. dtype = helper.input_dtype('fpn_rois')
  249. multi_rois = [
  250. helper.create_variable_for_type_inference(dtype)
  251. for i in range(num_lvl)
  252. ]
  253. restore_ind = helper.create_variable_for_type_inference(dtype='int32')
  254. inputs = {'FpnRois': fpn_rois}
  255. outputs = {
  256. 'MultiFpnRois': multi_rois,
  257. 'RestoreIndex': restore_ind,
  258. }
  259. if rois_num is not None:
  260. inputs['RoisNum'] = rois_num
  261. rois_num_per_level = [
  262. helper.create_variable_for_type_inference(dtype='int32')
  263. for i in range(num_lvl)
  264. ]
  265. outputs['MultiLevelRoIsNum'] = rois_num_per_level
  266. else:
  267. rois_num_per_level = None
  268. helper.append_op(
  269. type='distribute_fpn_proposals',
  270. inputs=inputs,
  271. outputs=outputs,
  272. attrs={
  273. 'min_level': min_level,
  274. 'max_level': max_level,
  275. 'refer_level': refer_level,
  276. 'refer_scale': refer_scale,
  277. 'pixel_offset': pixel_offset
  278. })
  279. return multi_rois, restore_ind, rois_num_per_level
  280. @paddle.jit.not_to_static
  281. def prior_box(input,
  282. image,
  283. min_sizes,
  284. max_sizes=None,
  285. aspect_ratios=[1.],
  286. variance=[0.1, 0.1, 0.2, 0.2],
  287. flip=False,
  288. clip=False,
  289. steps=[0.0, 0.0],
  290. offset=0.5,
  291. min_max_aspect_ratios_order=False,
  292. name=None):
  293. """
  294. This op generates prior boxes for SSD(Single Shot MultiBox Detector) algorithm.
  295. Each position of the input produce N prior boxes, N is determined by
  296. the count of min_sizes, max_sizes and aspect_ratios, The size of the
  297. box is in range(min_size, max_size) interval, which is generated in
  298. sequence according to the aspect_ratios.
  299. Parameters:
  300. input(Tensor): 4-D tensor(NCHW), the data type should be float32 or float64.
  301. image(Tensor): 4-D tensor(NCHW), the input image data of PriorBoxOp,
  302. the data type should be float32 or float64.
  303. min_sizes(list|tuple|float): the min sizes of generated prior boxes.
  304. max_sizes(list|tuple|None): the max sizes of generated prior boxes.
  305. Default: None.
  306. aspect_ratios(list|tuple|float): the aspect ratios of generated
  307. prior boxes. Default: [1.].
  308. variance(list|tuple): the variances to be encoded in prior boxes.
  309. Default:[0.1, 0.1, 0.2, 0.2].
  310. flip(bool): Whether to flip aspect ratios. Default:False.
  311. clip(bool): Whether to clip out-of-boundary boxes. Default: False.
  312. step(list|tuple): Prior boxes step across width and height, If
  313. step[0] equals to 0.0 or step[1] equals to 0.0, the prior boxes step across
  314. height or weight of the input will be automatically calculated.
  315. Default: [0., 0.]
  316. offset(float): Prior boxes center offset. Default: 0.5
  317. min_max_aspect_ratios_order(bool): If set True, the output prior box is
  318. in order of [min, max, aspect_ratios], which is consistent with
  319. Caffe. Please note, this order affects the weights order of
  320. convolution layer followed by and does not affect the final
  321. detection results. Default: False.
  322. name(str, optional): The default value is None. Normally there is no need for
  323. user to set this property. For more information, please refer to :ref:`api_guide_Name`
  324. Returns:
  325. Tuple: A tuple with two Variable (boxes, variances)
  326. boxes(Tensor): the output prior boxes of PriorBox.
  327. 4-D tensor, the layout is [H, W, num_priors, 4].
  328. H is the height of input, W is the width of input,
  329. num_priors is the total box count of each position of input.
  330. variances(Tensor): the expanded variances of PriorBox.
  331. 4-D tensor, the layput is [H, W, num_priors, 4].
  332. H is the height of input, W is the width of input
  333. num_priors is the total box count of each position of input
  334. Examples:
  335. .. code-block:: python
  336. import paddle
  337. from ppdet.modeling import ops
  338. paddle.enable_static()
  339. input = paddle.static.data(name="input", shape=[None,3,6,9])
  340. image = paddle.static.data(name="image", shape=[None,3,9,12])
  341. box, var = ops.prior_box(
  342. input=input,
  343. image=image,
  344. min_sizes=[100.],
  345. clip=True,
  346. flip=True)
  347. """
  348. helper = LayerHelper("prior_box", **locals())
  349. dtype = helper.input_dtype()
  350. check_variable_and_dtype(
  351. input, 'input', ['uint8', 'int8', 'float32', 'float64'], 'prior_box')
  352. def _is_list_or_tuple_(data):
  353. return (isinstance(data, list) or isinstance(data, tuple))
  354. if not _is_list_or_tuple_(min_sizes):
  355. min_sizes = [min_sizes]
  356. if not _is_list_or_tuple_(aspect_ratios):
  357. aspect_ratios = [aspect_ratios]
  358. if not (_is_list_or_tuple_(steps) and len(steps) == 2):
  359. raise ValueError('steps should be a list or tuple ',
  360. 'with length 2, (step_width, step_height).')
  361. min_sizes = list(map(float, min_sizes))
  362. aspect_ratios = list(map(float, aspect_ratios))
  363. steps = list(map(float, steps))
  364. cur_max_sizes = None
  365. if max_sizes is not None and len(max_sizes) > 0 and max_sizes[0] > 0:
  366. if not _is_list_or_tuple_(max_sizes):
  367. max_sizes = [max_sizes]
  368. cur_max_sizes = max_sizes
  369. if in_dynamic_mode():
  370. attrs = ('min_sizes', min_sizes, 'aspect_ratios', aspect_ratios,
  371. 'variances', variance, 'flip', flip, 'clip', clip, 'step_w',
  372. steps[0], 'step_h', steps[1], 'offset', offset,
  373. 'min_max_aspect_ratios_order', min_max_aspect_ratios_order)
  374. if cur_max_sizes is not None:
  375. attrs += ('max_sizes', cur_max_sizes)
  376. box, var = C_ops.prior_box(input, image, *attrs)
  377. return box, var
  378. else:
  379. attrs = {
  380. 'min_sizes': min_sizes,
  381. 'aspect_ratios': aspect_ratios,
  382. 'variances': variance,
  383. 'flip': flip,
  384. 'clip': clip,
  385. 'step_w': steps[0],
  386. 'step_h': steps[1],
  387. 'offset': offset,
  388. 'min_max_aspect_ratios_order': min_max_aspect_ratios_order
  389. }
  390. if cur_max_sizes is not None:
  391. attrs['max_sizes'] = cur_max_sizes
  392. box = helper.create_variable_for_type_inference(dtype)
  393. var = helper.create_variable_for_type_inference(dtype)
  394. helper.append_op(
  395. type="prior_box",
  396. inputs={"Input": input,
  397. "Image": image},
  398. outputs={"Boxes": box,
  399. "Variances": var},
  400. attrs=attrs, )
  401. box.stop_gradient = True
  402. var.stop_gradient = True
  403. return box, var
  404. @paddle.jit.not_to_static
  405. def multiclass_nms(bboxes,
  406. scores,
  407. score_threshold,
  408. nms_top_k,
  409. keep_top_k,
  410. nms_threshold=0.3,
  411. normalized=True,
  412. nms_eta=1.,
  413. background_label=-1,
  414. return_index=False,
  415. return_rois_num=True,
  416. rois_num=None,
  417. name=None):
  418. """
  419. This operator is to do multi-class non maximum suppression (NMS) on
  420. boxes and scores.
  421. In the NMS step, this operator greedily selects a subset of detection bounding
  422. boxes that have high scores larger than score_threshold, if providing this
  423. threshold, then selects the largest nms_top_k confidences scores if nms_top_k
  424. is larger than -1. Then this operator pruns away boxes that have high IOU
  425. (intersection over union) overlap with already selected boxes by adaptive
  426. threshold NMS based on parameters of nms_threshold and nms_eta.
  427. Aftern NMS step, at most keep_top_k number of total bboxes are to be kept
  428. per image if keep_top_k is larger than -1.
  429. Args:
  430. bboxes (Tensor): Two types of bboxes are supported:
  431. 1. (Tensor) A 3-D Tensor with shape
  432. [N, M, 4 or 8 16 24 32] represents the
  433. predicted locations of M bounding bboxes,
  434. N is the batch size. Each bounding box has four
  435. coordinate values and the layout is
  436. [xmin, ymin, xmax, ymax], when box size equals to 4.
  437. 2. (LoDTensor) A 3-D Tensor with shape [M, C, 4]
  438. M is the number of bounding boxes, C is the
  439. class number
  440. scores (Tensor): Two types of scores are supported:
  441. 1. (Tensor) A 3-D Tensor with shape [N, C, M]
  442. represents the predicted confidence predictions.
  443. N is the batch size, C is the class number, M is
  444. number of bounding boxes. For each category there
  445. are total M scores which corresponding M bounding
  446. boxes. Please note, M is equal to the 2nd dimension
  447. of BBoxes.
  448. 2. (LoDTensor) A 2-D LoDTensor with shape [M, C].
  449. M is the number of bbox, C is the class number.
  450. In this case, input BBoxes should be the second
  451. case with shape [M, C, 4].
  452. background_label (int): The index of background label, the background
  453. label will be ignored. If set to -1, then all
  454. categories will be considered. Default: 0
  455. score_threshold (float): Threshold to filter out bounding boxes with
  456. low confidence score. If not provided,
  457. consider all boxes.
  458. nms_top_k (int): Maximum number of detections to be kept according to
  459. the confidences after the filtering detections based
  460. on score_threshold.
  461. nms_threshold (float): The threshold to be used in NMS. Default: 0.3
  462. nms_eta (float): The threshold to be used in NMS. Default: 1.0
  463. keep_top_k (int): Number of total bboxes to be kept per image after NMS
  464. step. -1 means keeping all bboxes after NMS step.
  465. normalized (bool): Whether detections are normalized. Default: True
  466. return_index(bool): Whether return selected index. Default: False
  467. rois_num(Tensor): 1-D Tensor contains the number of RoIs in each image.
  468. The shape is [B] and data type is int32. B is the number of images.
  469. If it is not None then return a list of 1-D Tensor. Each element
  470. is the output RoIs' number of each image on the corresponding level
  471. and the shape is [B]. None by default.
  472. name(str): Name of the multiclass nms op. Default: None.
  473. Returns:
  474. A tuple with two Variables: (Out, Index) if return_index is True,
  475. otherwise, a tuple with one Variable(Out) is returned.
  476. Out: A 2-D LoDTensor with shape [No, 6] represents the detections.
  477. Each row has 6 values: [label, confidence, xmin, ymin, xmax, ymax]
  478. or A 2-D LoDTensor with shape [No, 10] represents the detections.
  479. Each row has 10 values: [label, confidence, x1, y1, x2, y2, x3, y3,
  480. x4, y4]. No is the total number of detections.
  481. If all images have not detected results, all elements in LoD will be
  482. 0, and output tensor is empty (None).
  483. Index: Only return when return_index is True. A 2-D LoDTensor with
  484. shape [No, 1] represents the selected index which type is Integer.
  485. The index is the absolute value cross batches. No is the same number
  486. as Out. If the index is used to gather other attribute such as age,
  487. one needs to reshape the input(N, M, 1) to (N * M, 1) as first, where
  488. N is the batch size and M is the number of boxes.
  489. Examples:
  490. .. code-block:: python
  491. import paddle
  492. from ppdet.modeling import ops
  493. boxes = paddle.static.data(name='bboxes', shape=[81, 4],
  494. dtype='float32', lod_level=1)
  495. scores = paddle.static.data(name='scores', shape=[81],
  496. dtype='float32', lod_level=1)
  497. out, index = ops.multiclass_nms(bboxes=boxes,
  498. scores=scores,
  499. background_label=0,
  500. score_threshold=0.5,
  501. nms_top_k=400,
  502. nms_threshold=0.3,
  503. keep_top_k=200,
  504. normalized=False,
  505. return_index=True)
  506. """
  507. helper = LayerHelper('multiclass_nms3', **locals())
  508. if in_dynamic_mode():
  509. attrs = ('background_label', background_label, 'score_threshold',
  510. score_threshold, 'nms_top_k', nms_top_k, 'nms_threshold',
  511. nms_threshold, 'keep_top_k', keep_top_k, 'nms_eta', nms_eta,
  512. 'normalized', normalized)
  513. output, index, nms_rois_num = C_ops.multiclass_nms3(bboxes, scores,
  514. rois_num, *attrs)
  515. if not return_index:
  516. index = None
  517. return output, nms_rois_num, index
  518. else:
  519. output = helper.create_variable_for_type_inference(dtype=bboxes.dtype)
  520. index = helper.create_variable_for_type_inference(dtype='int32')
  521. inputs = {'BBoxes': bboxes, 'Scores': scores}
  522. outputs = {'Out': output, 'Index': index}
  523. if rois_num is not None:
  524. inputs['RoisNum'] = rois_num
  525. if return_rois_num:
  526. nms_rois_num = helper.create_variable_for_type_inference(
  527. dtype='int32')
  528. outputs['NmsRoisNum'] = nms_rois_num
  529. helper.append_op(
  530. type="multiclass_nms3",
  531. inputs=inputs,
  532. attrs={
  533. 'background_label': background_label,
  534. 'score_threshold': score_threshold,
  535. 'nms_top_k': nms_top_k,
  536. 'nms_threshold': nms_threshold,
  537. 'keep_top_k': keep_top_k,
  538. 'nms_eta': nms_eta,
  539. 'normalized': normalized
  540. },
  541. outputs=outputs)
  542. output.stop_gradient = True
  543. index.stop_gradient = True
  544. if not return_index:
  545. index = None
  546. if not return_rois_num:
  547. nms_rois_num = None
  548. return output, nms_rois_num, index
  549. @paddle.jit.not_to_static
  550. def matrix_nms(bboxes,
  551. scores,
  552. score_threshold,
  553. post_threshold,
  554. nms_top_k,
  555. keep_top_k,
  556. use_gaussian=False,
  557. gaussian_sigma=2.,
  558. background_label=0,
  559. normalized=True,
  560. return_index=False,
  561. return_rois_num=True,
  562. name=None):
  563. """
  564. **Matrix NMS**
  565. This operator does matrix non maximum suppression (NMS).
  566. First selects a subset of candidate bounding boxes that have higher scores
  567. than score_threshold (if provided), then the top k candidate is selected if
  568. nms_top_k is larger than -1. Score of the remaining candidate are then
  569. decayed according to the Matrix NMS scheme.
  570. Aftern NMS step, at most keep_top_k number of total bboxes are to be kept
  571. per image if keep_top_k is larger than -1.
  572. Args:
  573. bboxes (Tensor): A 3-D Tensor with shape [N, M, 4] represents the
  574. predicted locations of M bounding bboxes,
  575. N is the batch size. Each bounding box has four
  576. coordinate values and the layout is
  577. [xmin, ymin, xmax, ymax], when box size equals to 4.
  578. The data type is float32 or float64.
  579. scores (Tensor): A 3-D Tensor with shape [N, C, M]
  580. represents the predicted confidence predictions.
  581. N is the batch size, C is the class number, M is
  582. number of bounding boxes. For each category there
  583. are total M scores which corresponding M bounding
  584. boxes. Please note, M is equal to the 2nd dimension
  585. of BBoxes. The data type is float32 or float64.
  586. score_threshold (float): Threshold to filter out bounding boxes with
  587. low confidence score.
  588. post_threshold (float): Threshold to filter out bounding boxes with
  589. low confidence score AFTER decaying.
  590. nms_top_k (int): Maximum number of detections to be kept according to
  591. the confidences after the filtering detections based
  592. on score_threshold.
  593. keep_top_k (int): Number of total bboxes to be kept per image after NMS
  594. step. -1 means keeping all bboxes after NMS step.
  595. use_gaussian (bool): Use Gaussian as the decay function. Default: False
  596. gaussian_sigma (float): Sigma for Gaussian decay function. Default: 2.0
  597. background_label (int): The index of background label, the background
  598. label will be ignored. If set to -1, then all
  599. categories will be considered. Default: 0
  600. normalized (bool): Whether detections are normalized. Default: True
  601. return_index(bool): Whether return selected index. Default: False
  602. return_rois_num(bool): whether return rois_num. Default: True
  603. name(str): Name of the matrix nms op. Default: None.
  604. Returns:
  605. A tuple with three Tensor: (Out, Index, RoisNum) if return_index is True,
  606. otherwise, a tuple with two Tensor (Out, RoisNum) is returned.
  607. Out (Tensor): A 2-D Tensor with shape [No, 6] containing the
  608. detection results.
  609. Each row has 6 values: [label, confidence, xmin, ymin, xmax, ymax]
  610. (After version 1.3, when no boxes detected, the lod is changed
  611. from {0} to {1})
  612. Index (Tensor): A 2-D Tensor with shape [No, 1] containing the
  613. selected indices, which are absolute values cross batches.
  614. rois_num (Tensor): A 1-D Tensor with shape [N] containing
  615. the number of detected boxes in each image.
  616. Examples:
  617. .. code-block:: python
  618. import paddle
  619. from ppdet.modeling import ops
  620. boxes = paddle.static.data(name='bboxes', shape=[None,81, 4],
  621. dtype='float32', lod_level=1)
  622. scores = paddle.static.data(name='scores', shape=[None,81],
  623. dtype='float32', lod_level=1)
  624. out = ops.matrix_nms(bboxes=boxes, scores=scores, background_label=0,
  625. score_threshold=0.5, post_threshold=0.1,
  626. nms_top_k=400, keep_top_k=200, normalized=False)
  627. """
  628. check_variable_and_dtype(bboxes, 'BBoxes', ['float32', 'float64'],
  629. 'matrix_nms')
  630. check_variable_and_dtype(scores, 'Scores', ['float32', 'float64'],
  631. 'matrix_nms')
  632. check_type(score_threshold, 'score_threshold', float, 'matrix_nms')
  633. check_type(post_threshold, 'post_threshold', float, 'matrix_nms')
  634. check_type(nms_top_k, 'nums_top_k', int, 'matrix_nms')
  635. check_type(keep_top_k, 'keep_top_k', int, 'matrix_nms')
  636. check_type(normalized, 'normalized', bool, 'matrix_nms')
  637. check_type(use_gaussian, 'use_gaussian', bool, 'matrix_nms')
  638. check_type(gaussian_sigma, 'gaussian_sigma', float, 'matrix_nms')
  639. check_type(background_label, 'background_label', int, 'matrix_nms')
  640. if in_dynamic_mode():
  641. attrs = ('background_label', background_label, 'score_threshold',
  642. score_threshold, 'post_threshold', post_threshold, 'nms_top_k',
  643. nms_top_k, 'gaussian_sigma', gaussian_sigma, 'use_gaussian',
  644. use_gaussian, 'keep_top_k', keep_top_k, 'normalized',
  645. normalized)
  646. out, index, rois_num = C_ops.matrix_nms(bboxes, scores, *attrs)
  647. if not return_index:
  648. index = None
  649. if not return_rois_num:
  650. rois_num = None
  651. return out, rois_num, index
  652. else:
  653. helper = LayerHelper('matrix_nms', **locals())
  654. output = helper.create_variable_for_type_inference(dtype=bboxes.dtype)
  655. index = helper.create_variable_for_type_inference(dtype='int32')
  656. outputs = {'Out': output, 'Index': index}
  657. if return_rois_num:
  658. rois_num = helper.create_variable_for_type_inference(dtype='int32')
  659. outputs['RoisNum'] = rois_num
  660. helper.append_op(
  661. type="matrix_nms",
  662. inputs={'BBoxes': bboxes,
  663. 'Scores': scores},
  664. attrs={
  665. 'background_label': background_label,
  666. 'score_threshold': score_threshold,
  667. 'post_threshold': post_threshold,
  668. 'nms_top_k': nms_top_k,
  669. 'gaussian_sigma': gaussian_sigma,
  670. 'use_gaussian': use_gaussian,
  671. 'keep_top_k': keep_top_k,
  672. 'normalized': normalized
  673. },
  674. outputs=outputs)
  675. output.stop_gradient = True
  676. if not return_index:
  677. index = None
  678. if not return_rois_num:
  679. rois_num = None
  680. return output, rois_num, index
  681. @paddle.jit.not_to_static
  682. def box_coder(prior_box,
  683. prior_box_var,
  684. target_box,
  685. code_type="encode_center_size",
  686. box_normalized=True,
  687. axis=0,
  688. name=None):
  689. r"""
  690. **Box Coder Layer**
  691. Encode/Decode the target bounding box with the priorbox information.
  692. The Encoding schema described below:
  693. .. math::
  694. ox = (tx - px) / pw / pxv
  695. oy = (ty - py) / ph / pyv
  696. ow = \log(\abs(tw / pw)) / pwv
  697. oh = \log(\abs(th / ph)) / phv
  698. The Decoding schema described below:
  699. .. math::
  700. ox = (pw * pxv * tx * + px) - tw / 2
  701. oy = (ph * pyv * ty * + py) - th / 2
  702. ow = \exp(pwv * tw) * pw + tw / 2
  703. oh = \exp(phv * th) * ph + th / 2
  704. where `tx`, `ty`, `tw`, `th` denote the target box's center coordinates,
  705. width and height respectively. Similarly, `px`, `py`, `pw`, `ph` denote
  706. the priorbox's (anchor) center coordinates, width and height. `pxv`,
  707. `pyv`, `pwv`, `phv` denote the variance of the priorbox and `ox`, `oy`,
  708. `ow`, `oh` denote the encoded/decoded coordinates, width and height.
  709. During Box Decoding, two modes for broadcast are supported. Say target
  710. box has shape [N, M, 4], and the shape of prior box can be [N, 4] or
  711. [M, 4]. Then prior box will broadcast to target box along the
  712. assigned axis.
  713. Args:
  714. prior_box(Tensor): Box list prior_box is a 2-D Tensor with shape
  715. [M, 4] holds M boxes and data type is float32 or float64. Each box
  716. is represented as [xmin, ymin, xmax, ymax], [xmin, ymin] is the
  717. left top coordinate of the anchor box, if the input is image feature
  718. map, they are close to the origin of the coordinate system.
  719. [xmax, ymax] is the right bottom coordinate of the anchor box.
  720. prior_box_var(List|Tensor|None): prior_box_var supports three types
  721. of input. One is Tensor with shape [M, 4] which holds M group and
  722. data type is float32 or float64. The second is list consist of
  723. 4 elements shared by all boxes and data type is float32 or float64.
  724. Other is None and not involved in calculation.
  725. target_box(Tensor): This input can be a 2-D LoDTensor with shape
  726. [N, 4] when code_type is 'encode_center_size'. This input also can
  727. be a 3-D Tensor with shape [N, M, 4] when code_type is
  728. 'decode_center_size'. Each box is represented as
  729. [xmin, ymin, xmax, ymax]. The data type is float32 or float64.
  730. code_type(str): The code type used with the target box. It can be
  731. `encode_center_size` or `decode_center_size`. `encode_center_size`
  732. by default.
  733. box_normalized(bool): Whether treat the priorbox as a normalized box.
  734. Set true by default.
  735. axis(int): Which axis in PriorBox to broadcast for box decode,
  736. for example, if axis is 0 and TargetBox has shape [N, M, 4] and
  737. PriorBox has shape [M, 4], then PriorBox will broadcast to [N, M, 4]
  738. for decoding. It is only valid when code type is
  739. `decode_center_size`. Set 0 by default.
  740. name(str, optional): For detailed information, please refer
  741. to :ref:`api_guide_Name`. Usually name is no need to set and
  742. None by default.
  743. Returns:
  744. Tensor:
  745. output_box(Tensor): When code_type is 'encode_center_size', the
  746. output tensor of box_coder_op with shape [N, M, 4] representing the
  747. result of N target boxes encoded with M Prior boxes and variances.
  748. When code_type is 'decode_center_size', N represents the batch size
  749. and M represents the number of decoded boxes.
  750. Examples:
  751. .. code-block:: python
  752. import paddle
  753. from ppdet.modeling import ops
  754. paddle.enable_static()
  755. # For encode
  756. prior_box_encode = paddle.static.data(name='prior_box_encode',
  757. shape=[512, 4],
  758. dtype='float32')
  759. target_box_encode = paddle.static.data(name='target_box_encode',
  760. shape=[81, 4],
  761. dtype='float32')
  762. output_encode = ops.box_coder(prior_box=prior_box_encode,
  763. prior_box_var=[0.1,0.1,0.2,0.2],
  764. target_box=target_box_encode,
  765. code_type="encode_center_size")
  766. # For decode
  767. prior_box_decode = paddle.static.data(name='prior_box_decode',
  768. shape=[512, 4],
  769. dtype='float32')
  770. target_box_decode = paddle.static.data(name='target_box_decode',
  771. shape=[512, 81, 4],
  772. dtype='float32')
  773. output_decode = ops.box_coder(prior_box=prior_box_decode,
  774. prior_box_var=[0.1,0.1,0.2,0.2],
  775. target_box=target_box_decode,
  776. code_type="decode_center_size",
  777. box_normalized=False,
  778. axis=1)
  779. """
  780. check_variable_and_dtype(prior_box, 'prior_box', ['float32', 'float64'],
  781. 'box_coder')
  782. check_variable_and_dtype(target_box, 'target_box', ['float32', 'float64'],
  783. 'box_coder')
  784. if in_dynamic_mode():
  785. if isinstance(prior_box_var, Variable):
  786. output_box = C_ops.box_coder(
  787. prior_box, prior_box_var, target_box, "code_type", code_type,
  788. "box_normalized", box_normalized, "axis", axis)
  789. elif isinstance(prior_box_var, list):
  790. output_box = C_ops.box_coder(
  791. prior_box, None, target_box, "code_type", code_type,
  792. "box_normalized", box_normalized, "axis", axis, "variance",
  793. prior_box_var)
  794. else:
  795. raise TypeError(
  796. "Input variance of box_coder must be Variable or list")
  797. return output_box
  798. else:
  799. helper = LayerHelper("box_coder", **locals())
  800. output_box = helper.create_variable_for_type_inference(
  801. dtype=prior_box.dtype)
  802. inputs = {"PriorBox": prior_box, "TargetBox": target_box}
  803. attrs = {
  804. "code_type": code_type,
  805. "box_normalized": box_normalized,
  806. "axis": axis
  807. }
  808. if isinstance(prior_box_var, Variable):
  809. inputs['PriorBoxVar'] = prior_box_var
  810. elif isinstance(prior_box_var, list):
  811. attrs['variance'] = prior_box_var
  812. else:
  813. raise TypeError(
  814. "Input variance of box_coder must be Variable or list")
  815. helper.append_op(
  816. type="box_coder",
  817. inputs=inputs,
  818. attrs=attrs,
  819. outputs={"OutputBox": output_box})
  820. return output_box
  821. @paddle.jit.not_to_static
  822. def generate_proposals(scores,
  823. bbox_deltas,
  824. im_shape,
  825. anchors,
  826. variances,
  827. pre_nms_top_n=6000,
  828. post_nms_top_n=1000,
  829. nms_thresh=0.5,
  830. min_size=0.1,
  831. eta=1.0,
  832. pixel_offset=False,
  833. return_rois_num=False,
  834. name=None):
  835. """
  836. **Generate proposal Faster-RCNN**
  837. This operation proposes RoIs according to each box with their
  838. probability to be a foreground object and
  839. the box can be calculated by anchors. Bbox_deltais and scores
  840. to be an object are the output of RPN. Final proposals
  841. could be used to train detection net.
  842. For generating proposals, this operation performs following steps:
  843. 1. Transposes and resizes scores and bbox_deltas in size of
  844. (H*W*A, 1) and (H*W*A, 4)
  845. 2. Calculate box locations as proposals candidates.
  846. 3. Clip boxes to image
  847. 4. Remove predicted boxes with small area.
  848. 5. Apply NMS to get final proposals as output.
  849. Args:
  850. scores(Tensor): A 4-D Tensor with shape [N, A, H, W] represents
  851. the probability for each box to be an object.
  852. N is batch size, A is number of anchors, H and W are height and
  853. width of the feature map. The data type must be float32.
  854. bbox_deltas(Tensor): A 4-D Tensor with shape [N, 4*A, H, W]
  855. represents the difference between predicted box location and
  856. anchor location. The data type must be float32.
  857. im_shape(Tensor): A 2-D Tensor with shape [N, 2] represents H, W, the
  858. origin image size or input size. The data type can be float32 or
  859. float64.
  860. anchors(Tensor): A 4-D Tensor represents the anchors with a layout
  861. of [H, W, A, 4]. H and W are height and width of the feature map,
  862. num_anchors is the box count of each position. Each anchor is
  863. in (xmin, ymin, xmax, ymax) format an unnormalized. The data type must be float32.
  864. variances(Tensor): A 4-D Tensor. The expanded variances of anchors with a layout of
  865. [H, W, num_priors, 4]. Each variance is in
  866. (xcenter, ycenter, w, h) format. The data type must be float32.
  867. pre_nms_top_n(float): Number of total bboxes to be kept per
  868. image before NMS. The data type must be float32. `6000` by default.
  869. post_nms_top_n(float): Number of total bboxes to be kept per
  870. image after NMS. The data type must be float32. `1000` by default.
  871. nms_thresh(float): Threshold in NMS. The data type must be float32. `0.5` by default.
  872. min_size(float): Remove predicted boxes with either height or
  873. width < min_size. The data type must be float32. `0.1` by default.
  874. eta(float): Apply in adaptive NMS, if adaptive `threshold > 0.5`,
  875. `adaptive_threshold = adaptive_threshold * eta` in each iteration.
  876. return_rois_num(bool): When setting True, it will return a 1D Tensor with shape [N, ] that includes Rois's
  877. num of each image in one batch. The N is the image's num. For example, the tensor has values [4,5] that represents
  878. the first image has 4 Rois, the second image has 5 Rois. It only used in rcnn model.
  879. 'False' by default.
  880. name(str, optional): For detailed information, please refer
  881. to :ref:`api_guide_Name`. Usually name is no need to set and
  882. None by default.
  883. Returns:
  884. tuple:
  885. A tuple with format ``(rpn_rois, rpn_roi_probs)``.
  886. - **rpn_rois**: The generated RoIs. 2-D Tensor with shape ``[N, 4]`` while ``N`` is the number of RoIs. The data type is the same as ``scores``.
  887. - **rpn_roi_probs**: The scores of generated RoIs. 2-D Tensor with shape ``[N, 1]`` while ``N`` is the number of RoIs. The data type is the same as ``scores``.
  888. Examples:
  889. .. code-block:: python
  890. import paddle
  891. from ppdet.modeling import ops
  892. paddle.enable_static()
  893. scores = paddle.static.data(name='scores', shape=[None, 4, 5, 5], dtype='float32')
  894. bbox_deltas = paddle.static.data(name='bbox_deltas', shape=[None, 16, 5, 5], dtype='float32')
  895. im_shape = paddle.static.data(name='im_shape', shape=[None, 2], dtype='float32')
  896. anchors = paddle.static.data(name='anchors', shape=[None, 5, 4, 4], dtype='float32')
  897. variances = paddle.static.data(name='variances', shape=[None, 5, 10, 4], dtype='float32')
  898. rois, roi_probs = ops.generate_proposals(scores, bbox_deltas,
  899. im_shape, anchors, variances)
  900. """
  901. if in_dynamic_mode():
  902. assert return_rois_num, "return_rois_num should be True in dygraph mode."
  903. attrs = ('pre_nms_topN', pre_nms_top_n, 'post_nms_topN', post_nms_top_n,
  904. 'nms_thresh', nms_thresh, 'min_size', min_size, 'eta', eta,
  905. 'pixel_offset', pixel_offset)
  906. rpn_rois, rpn_roi_probs, rpn_rois_num = C_ops.generate_proposals_v2(
  907. scores, bbox_deltas, im_shape, anchors, variances, *attrs)
  908. if not return_rois_num:
  909. rpn_rois_num = None
  910. return rpn_rois, rpn_roi_probs, rpn_rois_num
  911. else:
  912. helper = LayerHelper('generate_proposals_v2', **locals())
  913. check_variable_and_dtype(scores, 'scores', ['float32'],
  914. 'generate_proposals_v2')
  915. check_variable_and_dtype(bbox_deltas, 'bbox_deltas', ['float32'],
  916. 'generate_proposals_v2')
  917. check_variable_and_dtype(im_shape, 'im_shape', ['float32', 'float64'],
  918. 'generate_proposals_v2')
  919. check_variable_and_dtype(anchors, 'anchors', ['float32'],
  920. 'generate_proposals_v2')
  921. check_variable_and_dtype(variances, 'variances', ['float32'],
  922. 'generate_proposals_v2')
  923. rpn_rois = helper.create_variable_for_type_inference(
  924. dtype=bbox_deltas.dtype)
  925. rpn_roi_probs = helper.create_variable_for_type_inference(
  926. dtype=scores.dtype)
  927. outputs = {
  928. 'RpnRois': rpn_rois,
  929. 'RpnRoiProbs': rpn_roi_probs,
  930. }
  931. if return_rois_num:
  932. rpn_rois_num = helper.create_variable_for_type_inference(
  933. dtype='int32')
  934. rpn_rois_num.stop_gradient = True
  935. outputs['RpnRoisNum'] = rpn_rois_num
  936. helper.append_op(
  937. type="generate_proposals_v2",
  938. inputs={
  939. 'Scores': scores,
  940. 'BboxDeltas': bbox_deltas,
  941. 'ImShape': im_shape,
  942. 'Anchors': anchors,
  943. 'Variances': variances
  944. },
  945. attrs={
  946. 'pre_nms_topN': pre_nms_top_n,
  947. 'post_nms_topN': post_nms_top_n,
  948. 'nms_thresh': nms_thresh,
  949. 'min_size': min_size,
  950. 'eta': eta,
  951. 'pixel_offset': pixel_offset
  952. },
  953. outputs=outputs)
  954. rpn_rois.stop_gradient = True
  955. rpn_roi_probs.stop_gradient = True
  956. if not return_rois_num:
  957. rpn_rois_num = None
  958. return rpn_rois, rpn_roi_probs, rpn_rois_num
  959. def sigmoid_cross_entropy_with_logits(input,
  960. label,
  961. ignore_index=-100,
  962. normalize=False):
  963. output = F.binary_cross_entropy_with_logits(input, label, reduction='none')
  964. mask_tensor = paddle.cast(label != ignore_index, 'float32')
  965. output = paddle.multiply(output, mask_tensor)
  966. if normalize:
  967. sum_valid_mask = paddle.sum(mask_tensor)
  968. output = output / sum_valid_mask
  969. return output
  970. def smooth_l1(input, label, inside_weight=None, outside_weight=None,
  971. sigma=None):
  972. input_new = paddle.multiply(input, inside_weight)
  973. label_new = paddle.multiply(label, inside_weight)
  974. delta = 1 / (sigma * sigma)
  975. out = F.smooth_l1_loss(input_new, label_new, reduction='none', delta=delta)
  976. out = paddle.multiply(out, outside_weight)
  977. out = out / delta
  978. out = paddle.reshape(out, shape=[out.shape[0], -1])
  979. out = paddle.sum(out, axis=1)
  980. return out
  981. def channel_shuffle(x, groups):
  982. batch_size, num_channels, height, width = x.shape[0:4]
  983. assert num_channels % groups == 0, 'num_channels should be divisible by groups'
  984. channels_per_group = num_channels // groups
  985. x = paddle.reshape(
  986. x=x, shape=[batch_size, groups, channels_per_group, height, width])
  987. x = paddle.transpose(x=x, perm=[0, 2, 1, 3, 4])
  988. x = paddle.reshape(x=x, shape=[batch_size, num_channels, height, width])
  989. return x
  990. def get_static_shape(tensor):
  991. shape = paddle.shape(tensor)
  992. shape.stop_gradient = True
  993. return shape