pose3d_metro.py 4.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123
  1. # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. from __future__ import absolute_import
  15. from __future__ import division
  16. from __future__ import print_function
  17. import paddle
  18. import paddle.nn as nn
  19. import paddle.nn.functional as F
  20. from ppdet.core.workspace import register, create
  21. from .meta_arch import BaseArch
  22. from .. import layers as L
  23. __all__ = ['METRO_Body']
  24. def orthographic_projection(X, camera):
  25. """Perform orthographic projection of 3D points X using the camera parameters
  26. Args:
  27. X: size = [B, N, 3]
  28. camera: size = [B, 3]
  29. Returns:
  30. Projected 2D points -- size = [B, N, 2]
  31. """
  32. camera = camera.reshape((-1, 1, 3))
  33. X_trans = X[:, :, :2] + camera[:, :, 1:]
  34. shape = paddle.shape(X_trans)
  35. X_2d = (camera[:, :, 0] * X_trans.reshape((shape[0], -1))).reshape(shape)
  36. return X_2d
  37. @register
  38. class METRO_Body(BaseArch):
  39. __category__ = 'architecture'
  40. __inject__ = ['loss']
  41. def __init__(
  42. self,
  43. num_joints,
  44. backbone='HRNet',
  45. trans_encoder='',
  46. loss='Pose3DLoss', ):
  47. """
  48. METRO network, see https://arxiv.org/abs/
  49. Args:
  50. backbone (nn.Layer): backbone instance
  51. """
  52. super(METRO_Body, self).__init__()
  53. self.num_joints = num_joints
  54. self.backbone = backbone
  55. self.loss = loss
  56. self.deploy = False
  57. self.trans_encoder = trans_encoder
  58. self.conv_learn_tokens = paddle.nn.Conv1D(49, 10 + num_joints, 1)
  59. self.cam_param_fc = paddle.nn.Linear(3, 1)
  60. self.cam_param_fc2 = paddle.nn.Linear(10, 250)
  61. self.cam_param_fc3 = paddle.nn.Linear(250, 3)
  62. @classmethod
  63. def from_config(cls, cfg, *args, **kwargs):
  64. # backbone
  65. backbone = create(cfg['backbone'])
  66. trans_encoder = create(cfg['trans_encoder'])
  67. return {'backbone': backbone, 'trans_encoder': trans_encoder}
  68. def _forward(self):
  69. batch_size = self.inputs['image'].shape[0]
  70. image_feat = self.backbone(self.inputs)
  71. image_feat_flatten = image_feat.reshape((batch_size, 2048, 49))
  72. image_feat_flatten = image_feat_flatten.transpose(perm=(0, 2, 1))
  73. # and apply a conv layer to learn image token for each 3d joint/vertex position
  74. features = self.conv_learn_tokens(image_feat_flatten)
  75. if self.training:
  76. # apply mask vertex/joint modeling
  77. # meta_masks is a tensor of all the masks, randomly generated in dataloader
  78. # we pre-define a [MASK] token, which is a floating-value vector with 0.01s
  79. meta_masks = self.inputs['mjm_mask'].expand((-1, -1, 2048))
  80. constant_tensor = paddle.ones_like(features) * 0.01
  81. features = features * meta_masks + constant_tensor * (1 - meta_masks
  82. )
  83. pred_out = self.trans_encoder(features)
  84. pred_3d_joints = pred_out[:, :self.num_joints, :]
  85. cam_features = pred_out[:, self.num_joints:, :]
  86. # learn camera parameters
  87. x = self.cam_param_fc(cam_features)
  88. x = x.transpose(perm=(0, 2, 1))
  89. x = self.cam_param_fc2(x)
  90. x = self.cam_param_fc3(x)
  91. cam_param = x.transpose(perm=(0, 2, 1))
  92. pred_camera = cam_param.squeeze()
  93. pred_2d_joints = orthographic_projection(pred_3d_joints, pred_camera)
  94. return pred_3d_joints, pred_2d_joints
  95. def get_loss(self):
  96. preds_3d, preds_2d = self._forward()
  97. loss = self.loss(preds_3d, preds_2d, self.inputs)
  98. output = {'loss': loss}
  99. return output
  100. def get_pred(self):
  101. preds_3d, preds_2d = self._forward()
  102. outputs = {'pose3d': preds_3d, 'pose2d': preds_2d}
  103. return outputs