bert_tokenizer.py 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655
  1. # Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. import os
  15. import unicodedata
  16. from .tokenizer_utils import (
  17. PretrainedTokenizer,
  18. _is_control,
  19. _is_punctuation,
  20. _is_symbol,
  21. _is_whitespace,
  22. convert_to_unicode,
  23. whitespace_tokenize,
  24. )
  25. __all__ = [
  26. "BasicTokenizer",
  27. "BertTokenizer",
  28. "WordpieceTokenizer",
  29. ]
  30. class BasicTokenizer(object):
  31. """
  32. Runs basic tokenization (punctuation splitting, lower casing, etc.).
  33. Args:
  34. do_lower_case (bool):
  35. Whether to lowercase the input when tokenizing.
  36. Defaults to `True`.
  37. never_split (Iterable):
  38. Collection of tokens which will never be split during tokenization. Only has an effect when
  39. `do_basic_tokenize=True`
  40. tokenize_chinese_chars (bool):
  41. Whether to tokenize Chinese characters.
  42. strip_accents: (bool):
  43. Whether to strip all accents. If this option is not specified, then it will be determined by the
  44. value for `lowercase` (as in the original BERT).
  45. """
  46. def __init__(
  47. self,
  48. do_lower_case=True,
  49. never_split=None,
  50. tokenize_chinese_chars=True,
  51. strip_accents=None,
  52. ):
  53. """Constructs a BasicTokenizer."""
  54. if never_split is None:
  55. never_split = []
  56. self.do_lower_case = do_lower_case
  57. self.never_split = set(never_split)
  58. self.tokenize_chinese_chars = tokenize_chinese_chars
  59. self.strip_accents = strip_accents
  60. def tokenize(self, text, never_split=None):
  61. """
  62. Tokenizes a piece of text using basic tokenizer.
  63. Args:
  64. text (str): A piece of text.
  65. never_split (List[str]): List of token not to split.
  66. Returns:
  67. list(str): A list of tokens.
  68. Examples:
  69. .. code-block::
  70. from paddlenlp.transformers import BasicTokenizer
  71. basictokenizer = BasicTokenizer()
  72. tokens = basictokenizer.tokenize('He was a puppeteer')
  73. '''
  74. ['he', 'was', 'a', 'puppeteer']
  75. '''
  76. """
  77. text = convert_to_unicode(text)
  78. never_split = (
  79. self.never_split.union(set(never_split))
  80. if never_split
  81. else self.never_split
  82. )
  83. text = self._clean_text(text)
  84. if self.tokenize_chinese_chars:
  85. text = self._tokenize_chinese_chars(text)
  86. orig_tokens = whitespace_tokenize(text)
  87. split_tokens = []
  88. for token in orig_tokens:
  89. if token not in never_split:
  90. if self.do_lower_case:
  91. token = token.lower()
  92. if self.strip_accents is not False:
  93. token = self._run_strip_accents(token)
  94. elif self.strip_accents:
  95. token = self._run_strip_accents(token)
  96. split_tokens.extend(self._run_split_on_punc(token, never_split))
  97. output_tokens = whitespace_tokenize(" ".join(split_tokens))
  98. return output_tokens
  99. def _run_strip_accents(self, text):
  100. """
  101. Strips accents from a piece of text.
  102. """
  103. text = unicodedata.normalize("NFD", text)
  104. output = []
  105. for char in text:
  106. cat = unicodedata.category(char)
  107. if cat == "Mn":
  108. continue
  109. output.append(char)
  110. return "".join(output)
  111. def _run_split_on_punc(self, text, never_split=None):
  112. """
  113. Splits punctuation on a piece of text.
  114. """
  115. if never_split is not None and text in never_split:
  116. return [text]
  117. chars = list(text)
  118. i = 0
  119. start_new_word = True
  120. output = []
  121. while i < len(chars):
  122. char = chars[i]
  123. # punctuation and symbol should be treat as single char.
  124. if _is_punctuation(char) or _is_symbol(char):
  125. output.append([char])
  126. start_new_word = True
  127. else:
  128. if start_new_word:
  129. output.append([])
  130. start_new_word = False
  131. output[-1].append(char)
  132. i += 1
  133. return ["".join(x) for x in output]
  134. def _tokenize_chinese_chars(self, text):
  135. """
  136. Adds whitespace around any CJK character.
  137. """
  138. output = []
  139. for char in text:
  140. cp = ord(char)
  141. if self._is_chinese_char(cp):
  142. output.append(" ")
  143. output.append(char)
  144. output.append(" ")
  145. else:
  146. output.append(char)
  147. return "".join(output)
  148. def _is_chinese_char(self, cp):
  149. """
  150. Checks whether CP is the codepoint of a CJK character.
  151. """
  152. # This defines a "chinese character" as anything in the CJK Unicode block:
  153. # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
  154. #
  155. # Note that the CJK Unicode block is NOT all Japanese and Korean characters,
  156. # despite its name. The modern Korean Hangul alphabet is a different block,
  157. # as is Japanese Hiragana and Katakana. Those alphabets are used to write
  158. # space-separated words, so they are not treated specially and handled
  159. # like the all the other languages.
  160. if (
  161. (cp >= 0x4E00 and cp <= 0x9FFF)
  162. or (cp >= 0x3400 and cp <= 0x4DBF) #
  163. or (cp >= 0x20000 and cp <= 0x2A6DF) #
  164. or (cp >= 0x2A700 and cp <= 0x2B73F) #
  165. or (cp >= 0x2B740 and cp <= 0x2B81F) #
  166. or (cp >= 0x2B820 and cp <= 0x2CEAF) #
  167. or (cp >= 0xF900 and cp <= 0xFAFF)
  168. or (cp >= 0x2F800 and cp <= 0x2FA1F) #
  169. ): #
  170. return True
  171. return False
  172. def _clean_text(self, text):
  173. """
  174. Performs invalid character removal and whitespace cleanup on text.
  175. """
  176. output = []
  177. for char in text:
  178. cp = ord(char)
  179. if cp == 0 or cp == 0xFFFD or _is_control(char):
  180. continue
  181. if _is_whitespace(char):
  182. output.append(" ")
  183. else:
  184. output.append(char)
  185. return "".join(output)
  186. class WordpieceTokenizer(object):
  187. """
  188. Runs WordPiece tokenization.
  189. Args:
  190. vocab (Vocab|dict):
  191. Vocab of the word piece tokenizer.
  192. unk_token (str):
  193. A specific token to replace all unknown tokens.
  194. max_input_chars_per_word (int):
  195. If a word's length is more than
  196. max_input_chars_per_word, it will be dealt as unknown word.
  197. Defaults to 100.
  198. """
  199. def __init__(self, vocab, unk_token, max_input_chars_per_word=100):
  200. self.vocab = vocab
  201. self.unk_token = unk_token
  202. self.max_input_chars_per_word = max_input_chars_per_word
  203. def tokenize(self, text):
  204. """
  205. Tokenizes a piece of text into its word pieces.
  206. This uses a greedy longest-match-first algorithm to perform tokenization
  207. using the given vocabulary.
  208. Args:
  209. text: A single token or whitespace separated tokens. This should have
  210. already been passed through `BasicTokenizer`.
  211. Returns:
  212. list (str): A list of wordpiece tokens.
  213. Examples:
  214. .. code-block::
  215. from paddlenlp.transformers import BertTokenizer, WordpieceTokenizer
  216. berttokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
  217. vocab = berttokenizer.vocab
  218. unk_token = berttokenizer.unk_token
  219. wordpiecetokenizer = WordpieceTokenizer(vocab,unk_token)
  220. inputs = wordpiecetokenizer.tokenize("unaffable")
  221. print(inputs)
  222. '''
  223. ["un", "##aff", "##able"]
  224. '''
  225. """
  226. output_tokens = []
  227. for token in whitespace_tokenize(text):
  228. chars = list(token)
  229. if len(chars) > self.max_input_chars_per_word:
  230. output_tokens.append(self.unk_token)
  231. continue
  232. is_bad = False
  233. start = 0
  234. sub_tokens = []
  235. while start < len(chars):
  236. end = len(chars)
  237. cur_substr = None
  238. while start < end:
  239. substr = "".join(chars[start:end])
  240. if start > 0:
  241. substr = "##" + substr
  242. if substr in self.vocab:
  243. cur_substr = substr
  244. break
  245. end -= 1
  246. if cur_substr is None:
  247. is_bad = True
  248. break
  249. sub_tokens.append(cur_substr)
  250. start = end
  251. if is_bad:
  252. output_tokens.append(self.unk_token)
  253. else:
  254. output_tokens.extend(sub_tokens)
  255. return output_tokens
  256. class BertTokenizer(PretrainedTokenizer):
  257. """
  258. Constructs a BERT tokenizer. It uses a basic tokenizer to do punctuation
  259. splitting, lower casing and so on, and follows a WordPiece tokenizer to
  260. tokenize as subwords.
  261. Args:
  262. vocab_file (str):
  263. The vocabulary file path (ends with '.txt') required to instantiate
  264. a `WordpieceTokenizer`.
  265. do_lower_case (bool, optional):
  266. Whether to lowercase the input when tokenizing.
  267. Defaults to `True`.
  268. do_basic_tokenize (bool, optional):
  269. Whether to use a basic tokenizer before a WordPiece tokenizer.
  270. Defaults to `True`.
  271. never_split (Iterable, optional):
  272. Collection of tokens which will never be split during tokenization. Only has an effect when
  273. `do_basic_tokenize=True`. Defaults to `None`.
  274. unk_token (str, optional):
  275. A special token representing the *unknown (out-of-vocabulary)* token.
  276. An unknown token is set to be `unk_token` inorder to be converted to an ID.
  277. Defaults to "[UNK]".
  278. sep_token (str, optional):
  279. A special token separating two different sentences in the same input.
  280. Defaults to "[SEP]".
  281. pad_token (str, optional):
  282. A special token used to make arrays of tokens the same size for batching purposes.
  283. Defaults to "[PAD]".
  284. cls_token (str, optional):
  285. A special token used for sequence classification. It is the last token
  286. of the sequence when built with special tokens. Defaults to "[CLS]".
  287. mask_token (str, optional):
  288. A special token representing a masked token. This is the token used
  289. in the masked language modeling task which the model tries to predict the original unmasked ones.
  290. Defaults to "[MASK]".
  291. tokenize_chinese_chars (bool, optional):
  292. Whether to tokenize Chinese characters.
  293. Defaults to `True`.
  294. strip_accents: (bool, optional):
  295. Whether to strip all accents. If this option is not specified, then it will be determined by the
  296. value for `lowercase` (as in the original BERT).
  297. Defaults to `None`.
  298. Examples:
  299. .. code-block::
  300. from paddlenlp.transformers import BertTokenizer
  301. tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
  302. inputs = tokenizer('He was a puppeteer')
  303. print(inputs)
  304. '''
  305. {'input_ids': [101, 2002, 2001, 1037, 13997, 11510, 102], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0]}
  306. '''
  307. """
  308. resource_files_names = {"vocab_file": "vocab.txt"} # for save_pretrained
  309. pretrained_resource_files_map = {
  310. "vocab_file": {
  311. "bert-base-uncased": "https://bj.bcebos.com/paddle-hapi/models/bert/bert-base-uncased-vocab.txt",
  312. "bert-large-uncased": "https://bj.bcebos.com/paddle-hapi/models/bert/bert-large-uncased-vocab.txt",
  313. "bert-base-cased": "https://bj.bcebos.com/paddle-hapi/models/bert/bert-base-cased-vocab.txt",
  314. "bert-large-cased": "https://bj.bcebos.com/paddle-hapi/models/bert/bert-large-cased-vocab.txt",
  315. "bert-base-multilingual-uncased": "https://bj.bcebos.com/paddle-hapi/models/bert/bert-base-multilingual-uncased-vocab.txt",
  316. "bert-base-multilingual-cased": "https://bj.bcebos.com/paddle-hapi/models/bert/bert-base-multilingual-cased-vocab.txt",
  317. "bert-base-chinese": "https://bj.bcebos.com/paddle-hapi/models/bert/bert-base-chinese-vocab.txt",
  318. "bert-wwm-chinese": "http://bj.bcebos.com/paddlenlp/models/transformers/bert/bert-wwm-chinese-vocab.txt",
  319. "bert-wwm-ext-chinese": "http://bj.bcebos.com/paddlenlp/models/transformers/bert/bert-wwm-ext-chinese-vocab.txt",
  320. "macbert-large-chinese": "https://bj.bcebos.com/paddle-hapi/models/bert/bert-base-chinese-vocab.txt",
  321. "macbert-base-chinese": "https://bj.bcebos.com/paddle-hapi/models/bert/bert-base-chinese-vocab.txt",
  322. "simbert-base-chinese": "https://bj.bcebos.com/paddlenlp/models/transformers/simbert/vocab.txt",
  323. "uer/chinese-roberta-base": "https://bj.bcebos.com/paddlenlp/models/transformers/uer/chinese_roberta_vocab.txt",
  324. "uer/chinese-roberta-medium": "https://bj.bcebos.com/paddlenlp/models/transformers/uer/chinese_roberta_vocab.txt",
  325. "uer/chinese-roberta-6l-768h": "https://bj.bcebos.com/paddlenlp/models/transformers/uer/chinese_roberta_vocab.txt",
  326. "uer/chinese-roberta-small": "https://bj.bcebos.com/paddlenlp/models/transformers/uer/chinese_roberta_vocab.txt",
  327. "uer/chinese-roberta-mini": "https://bj.bcebos.com/paddlenlp/models/transformers/uer/chinese_roberta_vocab.txt",
  328. "uer/chinese-roberta-tiny": "https://bj.bcebos.com/paddlenlp/models/transformers/uer/chinese_roberta_vocab.txt",
  329. }
  330. }
  331. pretrained_init_configuration = {
  332. "bert-base-uncased": {"do_lower_case": True},
  333. "bert-large-uncased": {"do_lower_case": True},
  334. "bert-base-cased": {"do_lower_case": False},
  335. "bert-large-cased": {"do_lower_case": False},
  336. "bert-base-multilingual-uncased": {"do_lower_case": True},
  337. "bert-base-multilingual-cased": {"do_lower_case": False},
  338. "bert-base-chinese": {"do_lower_case": False},
  339. "bert-wwm-chinese": {"do_lower_case": False},
  340. "bert-wwm-ext-chinese": {"do_lower_case": False},
  341. "macbert-large-chinese": {"do_lower_case": False},
  342. "macbert-base-chinese": {"do_lower_case": False},
  343. "simbert-base-chinese": {"do_lower_case": True},
  344. "uer/chinese-roberta-base": {"do_lower_case": True},
  345. "uer/chinese-roberta-medium": {"do_lower_case": True},
  346. "uer/chinese-roberta-6l-768h": {"do_lower_case": True},
  347. "uer/chinese-roberta-small": {"do_lower_case": True},
  348. "uer/chinese-roberta-mini": {"do_lower_case": True},
  349. "uer/chinese-roberta-tiny": {"do_lower_case": True},
  350. }
  351. max_model_input_sizes = {
  352. "bert-base-uncased": 512,
  353. "bert-large-uncased": 512,
  354. "bert-base-cased": 512,
  355. "bert-large-cased": 512,
  356. "bert-base-multilingual-uncased": 512,
  357. "bert-base-multilingual-cased": 512,
  358. "bert-base-chinese": 512,
  359. "bert-wwm-chinese": 512,
  360. "bert-wwm-ext-chinese": 512,
  361. "macbert-large-chinese": 512,
  362. "macbert-base-chinese": 512,
  363. "simbert-base-chinese": 512,
  364. "uer/chinese-roberta-base": 512,
  365. "uer/chinese-roberta-medium": 512,
  366. "uer/chinese-roberta-6l-768h": 512,
  367. "uer/chinese-roberta-small": 512,
  368. "uer/chinese-roberta-mini": 512,
  369. "uer/chinese-roberta-tiny": 512,
  370. }
  371. padding_side = "right"
  372. def __init__(
  373. self,
  374. vocab_file,
  375. do_lower_case=True,
  376. do_basic_tokenize=True,
  377. never_split=None,
  378. unk_token="[UNK]",
  379. sep_token="[SEP]",
  380. pad_token="[PAD]",
  381. cls_token="[CLS]",
  382. mask_token="[MASK]",
  383. tokenize_chinese_chars=True,
  384. strip_accents=None,
  385. **kwargs
  386. ):
  387. if not os.path.isfile(vocab_file):
  388. raise ValueError(
  389. "Can't find a vocabulary file at path '{}'. To load the "
  390. "vocabulary from a pretrained model please use "
  391. "`tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`".format(
  392. vocab_file
  393. )
  394. )
  395. self.do_lower_case = do_lower_case
  396. self.vocab = self.load_vocabulary(vocab_file, unk_token=unk_token)
  397. self.do_basic_tokenize = do_basic_tokenize
  398. if do_basic_tokenize:
  399. self.basic_tokenizer = BasicTokenizer(
  400. do_lower_case=do_lower_case,
  401. never_split=never_split,
  402. tokenize_chinese_chars=tokenize_chinese_chars,
  403. strip_accents=strip_accents,
  404. )
  405. self.wordpiece_tokenizer = WordpieceTokenizer(
  406. vocab=self.vocab, unk_token=unk_token
  407. )
  408. @property
  409. def vocab_size(self):
  410. """
  411. Return the size of vocabulary.
  412. Returns:
  413. int: The size of vocabulary.
  414. """
  415. return len(self.vocab)
  416. def get_vocab(self):
  417. return dict(self.vocab.token_to_idx, **self.added_tokens_encoder)
  418. def _tokenize(self, text):
  419. """
  420. End-to-end tokenization for BERT models.
  421. Args:
  422. text (str): The text to be tokenized.
  423. Returns:
  424. list: A list of string representing converted tokens.
  425. """
  426. split_tokens = []
  427. if self.do_basic_tokenize:
  428. for token in self.basic_tokenizer.tokenize(
  429. text, never_split=self.all_special_tokens
  430. ):
  431. # If the token is part of the never_split set
  432. if token in self.basic_tokenizer.never_split:
  433. split_tokens.append(token)
  434. else:
  435. split_tokens += self.wordpiece_tokenizer.tokenize(token)
  436. else:
  437. split_tokens = self.wordpiece_tokenizer.tokenize(text)
  438. return split_tokens
  439. def convert_tokens_to_string(self, tokens):
  440. """
  441. Converts a sequence of tokens (list of string) to a single string. Since
  442. the usage of WordPiece introducing `##` to concat subwords, also removes
  443. `##` when converting.
  444. Args:
  445. tokens (list): A list of string representing tokens to be converted.
  446. Returns:
  447. str: Converted string from tokens.
  448. Examples:
  449. .. code-block::
  450. from paddlenlp.transformers import BertTokenizer
  451. berttokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
  452. tokens = berttokenizer.tokenize('He was a puppeteer')
  453. '''
  454. ['he', 'was', 'a', 'puppet', '##eer']
  455. '''
  456. strings = tokenizer.convert_tokens_to_string(tokens)
  457. '''
  458. he was a puppeteer
  459. '''
  460. """
  461. out_string = " ".join(tokens).replace(" ##", "").strip()
  462. return out_string
  463. def num_special_tokens_to_add(self, pair=False):
  464. """
  465. Returns the number of added tokens when encoding a sequence with special tokens.
  466. Args:
  467. pair(bool):
  468. Whether the input is a sequence pair or a single sequence.
  469. Defaults to `False` and the input is a single sequence.
  470. Returns:
  471. int: Number of tokens added to sequences.
  472. """
  473. token_ids_0 = []
  474. token_ids_1 = []
  475. return len(
  476. self.build_inputs_with_special_tokens(
  477. token_ids_0, token_ids_1 if pair else None
  478. )
  479. )
  480. def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
  481. """
  482. Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
  483. adding special tokens.
  484. A BERT sequence has the following format:
  485. - single sequence: ``[CLS] X [SEP]``
  486. - pair of sequences: ``[CLS] A [SEP] B [SEP]``
  487. Args:
  488. token_ids_0 (List[int]):
  489. List of IDs to which the special tokens will be added.
  490. token_ids_1 (List[int], optional):
  491. Optional second list of IDs for sequence pairs. Defaults to None.
  492. Returns:
  493. List[int]: List of input_id with the appropriate special tokens.
  494. """
  495. if token_ids_1 is None:
  496. return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
  497. _cls = [self.cls_token_id]
  498. _sep = [self.sep_token_id]
  499. return _cls + token_ids_0 + _sep + token_ids_1 + _sep
  500. def build_offset_mapping_with_special_tokens(
  501. self, offset_mapping_0, offset_mapping_1=None
  502. ):
  503. """
  504. Build offset map from a pair of offset map by concatenating and adding offsets of special tokens.
  505. A BERT offset_mapping has the following format:
  506. - single sequence: ``(0,0) X (0,0)``
  507. - pair of sequences: ``(0,0) A (0,0) B (0,0)``
  508. Args:
  509. offset_mapping_ids_0 (List[tuple]):
  510. List of wordpiece offsets to which the special tokens will be added.
  511. offset_mapping_ids_1 (List[tuple], optional):
  512. Optional second list of wordpiece offsets for offset mapping pairs. Defaults to None.
  513. Returns:
  514. List[tuple]: A list of wordpiece offsets with the appropriate offsets of special tokens.
  515. """
  516. if offset_mapping_1 is None:
  517. return [(0, 0)] + offset_mapping_0 + [(0, 0)]
  518. return [(0, 0)] + offset_mapping_0 + [(0, 0)] + offset_mapping_1 + [(0, 0)]
  519. def create_token_type_ids_from_sequences(self, token_ids_0, token_ids_1=None):
  520. """
  521. Create a mask from the two sequences passed to be used in a sequence-pair classification task.
  522. A BERT sequence pair mask has the following format:
  523. ::
  524. 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
  525. | first sequence | second sequence |
  526. If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
  527. Args:
  528. token_ids_0 (List[int]):
  529. A list of `inputs_ids` for the first sequence.
  530. token_ids_1 (List[int], optional):
  531. Optional second list of IDs for sequence pairs. Defaults to None.
  532. Returns:
  533. List[int]: List of token_type_id according to the given sequence(s).
  534. """
  535. _sep = [self.sep_token_id]
  536. _cls = [self.cls_token_id]
  537. if token_ids_1 is None:
  538. return len(_cls + token_ids_0 + _sep) * [0]
  539. return len(_cls + token_ids_0 + _sep) * [0] + len(token_ids_1 + _sep) * [1]
  540. def get_special_tokens_mask(
  541. self, token_ids_0, token_ids_1=None, already_has_special_tokens=False
  542. ):
  543. """
  544. Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding
  545. special tokens using the tokenizer ``encode`` methods.
  546. Args:
  547. token_ids_0 (List[int]):
  548. A list of `inputs_ids` for the first sequence.
  549. token_ids_1 (List[int], optional):
  550. Optional second list of IDs for sequence pairs. Defaults to None.
  551. already_has_special_tokens (bool, optional): Whether or not the token list is already
  552. formatted with special tokens for the model. Defaults to None.
  553. Returns:
  554. List[int]: The list of integers either be 0 or 1: 1 for a special token, 0 for a sequence token.
  555. """
  556. if already_has_special_tokens:
  557. if token_ids_1 is not None:
  558. raise ValueError(
  559. "You should not supply a second sequence if the provided sequence of "
  560. "ids is already formatted with special tokens for the model."
  561. )
  562. return list(
  563. map(lambda x: 1 if x in self.all_special_ids else 0, token_ids_0)
  564. )
  565. if token_ids_1 is not None:
  566. return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
  567. return [1] + ([0] * len(token_ids_0)) + [1]
  568. def _convert_id_to_token(self, index):
  569. """Converts an index (integer) in a token (str) using the vocab."""
  570. return self.vocab._idx_to_token.get(index, self.unk_token)