bert_padding.py 4.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127
  1. # Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. import operator
  15. from functools import reduce
  16. import paddle
  17. import paddle.nn.functional as F
  18. class IndexFirstAxis(paddle.autograd.PyLayer):
  19. @staticmethod
  20. def forward(ctx, input, indices):
  21. from einops import rearrange, repeat
  22. ctx.save_for_backward(indices)
  23. assert input.ndim >= 2
  24. ctx.first_axis_dim, other_shape = input.shape[0], input.shape[1:]
  25. second_dim = reduce(operator.mul, other_shape, 1)
  26. return paddle.take_along_axis(
  27. arr=rearrange(input, "b ... -> b (...)"),
  28. axis=0,
  29. indices=repeat(indices, "z -> z d", d=second_dim),
  30. ).reshape([-1, *other_shape])
  31. @staticmethod
  32. def backward(ctx, grad_output):
  33. """Class Attribute: torch.autograd.function.FunctionCtx.saved_tensors, can not convert, please check whether it is torch.Tensor.*/torch.autograd.function.FunctionCtx.*/torch.distributions.Distribution.* and convert manually"""
  34. from einops import rearrange, repeat
  35. (indices,) = ctx.saved_tensor()
  36. assert grad_output.ndim >= 2
  37. other_shape = grad_output.shape[1:]
  38. grad_output = rearrange(grad_output, "b ... -> b (...)")
  39. grad_input = paddle.zeros(
  40. shape=[ctx.first_axis_dim, tuple(grad_output.shape)[1]],
  41. dtype=grad_output.dtype,
  42. )
  43. grad_input.put_along_axis_(
  44. axis=0,
  45. indices=repeat(indices, "z -> z d", d=tuple(grad_output.shape)[1]),
  46. values=grad_output,
  47. )
  48. return grad_input.reshape([ctx.first_axis_dim, *other_shape]), None
  49. index_first_axis = IndexFirstAxis.apply
  50. class IndexPutFirstAxis(paddle.autograd.PyLayer):
  51. @staticmethod
  52. def forward(ctx, values, indices, first_axis_dim):
  53. ctx.save_for_backward(indices)
  54. assert indices.ndim == 1
  55. assert values.ndim >= 2
  56. output = paddle.zeros(
  57. shape=[first_axis_dim, *tuple(values.shape)[1:]], dtype=values.dtype
  58. )
  59. output[indices] = values
  60. return output
  61. @staticmethod
  62. def backward(ctx, grad_output):
  63. """Class Attribute: torch.autograd.function.FunctionCtx.saved_tensors, can not convert, please check whether it is torch.Tensor.*/torch.autograd.function.FunctionCtx.*/torch.distributions.Distribution.* and convert manually"""
  64. (indices,) = ctx.saved_tensor()
  65. grad_values = grad_output[indices]
  66. return grad_values, None
  67. index_put_first_axis = IndexPutFirstAxis.apply
  68. def unpad_input(hidden_states, attention_mask):
  69. """
  70. Arguments:
  71. hidden_states: (batch, seqlen, ...)
  72. attention_mask: (batch, seqlen), bool / int, 1 means valid and 0 means not valid.
  73. Return:
  74. hidden_states: (total_nnz, ...), where total_nnz = number of tokens in selected in attention_mask.
  75. indices: (total_nnz), the indices of non-masked tokens from the flattened input sequence.
  76. cu_seqlens: (batch + 1), the cumulative sequence lengths, used to index into hidden_states.
  77. max_seqlen_in_batch: int
  78. """
  79. from einops import rearrange
  80. seqlens_in_batch = paddle.sum(attention_mask, axis=-1, dtype="int32")
  81. indices = paddle.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
  82. max_seqlen_in_batch = paddle.max(seqlens_in_batch).item()
  83. cu_seqlens = F.pad(paddle.cumsum(seqlens_in_batch, axis=0), [1, 0])
  84. return (
  85. index_first_axis(rearrange(hidden_states, "b s ... -> (b s) ..."), indices),
  86. indices,
  87. cu_seqlens,
  88. max_seqlen_in_batch,
  89. )
  90. def pad_input(hidden_states, indices, batch, seqlen):
  91. """
  92. Arguments:
  93. hidden_states: (total_nnz, ...), where total_nnz = number of tokens in selected in attention_mask.
  94. indices: (total_nnz), the indices that represent the non-masked tokens of the original padded input sequence.
  95. batch: int, batch size for the padded sequence.
  96. seqlen: int, maximum sequence length for the padded sequence.
  97. Return:
  98. hidden_states: (batch, seqlen, ...)
  99. """
  100. from einops import rearrange
  101. output = index_put_first_axis(hidden_states, indices, batch * seqlen)
  102. return rearrange(output, "(b s) ... -> b s ...", b=batch)