vqa_token_pad.py 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104
  1. # copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. import paddle
  15. import numpy as np
  16. class VQATokenPad(object):
  17. def __init__(self,
  18. max_seq_len=512,
  19. pad_to_max_seq_len=True,
  20. return_attention_mask=True,
  21. return_token_type_ids=True,
  22. truncation_strategy="longest_first",
  23. return_overflowing_tokens=False,
  24. return_special_tokens_mask=False,
  25. infer_mode=False,
  26. **kwargs):
  27. self.max_seq_len = max_seq_len
  28. self.pad_to_max_seq_len = max_seq_len
  29. self.return_attention_mask = return_attention_mask
  30. self.return_token_type_ids = return_token_type_ids
  31. self.truncation_strategy = truncation_strategy
  32. self.return_overflowing_tokens = return_overflowing_tokens
  33. self.return_special_tokens_mask = return_special_tokens_mask
  34. self.pad_token_label_id = paddle.nn.CrossEntropyLoss().ignore_index
  35. self.infer_mode = infer_mode
  36. def __call__(self, data):
  37. needs_to_be_padded = self.pad_to_max_seq_len and len(data[
  38. "input_ids"]) < self.max_seq_len
  39. if needs_to_be_padded:
  40. if 'tokenizer_params' in data:
  41. tokenizer_params = data.pop('tokenizer_params')
  42. else:
  43. tokenizer_params = dict(
  44. padding_side='right', pad_token_type_id=0, pad_token_id=1)
  45. difference = self.max_seq_len - len(data["input_ids"])
  46. if tokenizer_params['padding_side'] == 'right':
  47. if self.return_attention_mask:
  48. data["attention_mask"] = [1] * len(data[
  49. "input_ids"]) + [0] * difference
  50. if self.return_token_type_ids:
  51. data["token_type_ids"] = (
  52. data["token_type_ids"] +
  53. [tokenizer_params['pad_token_type_id']] * difference)
  54. if self.return_special_tokens_mask:
  55. data["special_tokens_mask"] = data[
  56. "special_tokens_mask"] + [1] * difference
  57. data["input_ids"] = data["input_ids"] + [
  58. tokenizer_params['pad_token_id']
  59. ] * difference
  60. if not self.infer_mode:
  61. data["labels"] = data[
  62. "labels"] + [self.pad_token_label_id] * difference
  63. data["bbox"] = data["bbox"] + [[0, 0, 0, 0]] * difference
  64. elif tokenizer_params['padding_side'] == 'left':
  65. if self.return_attention_mask:
  66. data["attention_mask"] = [0] * difference + [
  67. 1
  68. ] * len(data["input_ids"])
  69. if self.return_token_type_ids:
  70. data["token_type_ids"] = (
  71. [tokenizer_params['pad_token_type_id']] * difference +
  72. data["token_type_ids"])
  73. if self.return_special_tokens_mask:
  74. data["special_tokens_mask"] = [
  75. 1
  76. ] * difference + data["special_tokens_mask"]
  77. data["input_ids"] = [tokenizer_params['pad_token_id']
  78. ] * difference + data["input_ids"]
  79. if not self.infer_mode:
  80. data["labels"] = [self.pad_token_label_id
  81. ] * difference + data["labels"]
  82. data["bbox"] = [[0, 0, 0, 0]] * difference + data["bbox"]
  83. else:
  84. if self.return_attention_mask:
  85. data["attention_mask"] = [1] * len(data["input_ids"])
  86. for key in data:
  87. if key in [
  88. 'input_ids', 'labels', 'token_type_ids', 'bbox',
  89. 'attention_mask'
  90. ]:
  91. if self.infer_mode:
  92. if key != 'labels':
  93. length = min(len(data[key]), self.max_seq_len)
  94. data[key] = data[key][:length]
  95. else:
  96. continue
  97. data[key] = np.array(data[key], dtype='int64')
  98. return data