bert_tokenizer.py 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629
  1. # copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. import os
  15. import unicodedata
  16. from .tokenizer_utils import (
  17. PretrainedTokenizer,
  18. _is_control,
  19. _is_punctuation,
  20. _is_symbol,
  21. _is_whitespace,
  22. convert_to_unicode,
  23. whitespace_tokenize,
  24. )
  25. __all__ = [
  26. "BasicTokenizer",
  27. "BertTokenizer",
  28. "WordpieceTokenizer",
  29. ]
  30. class BasicTokenizer(object):
  31. """
  32. Runs basic tokenization (punctuation splitting, lower casing, etc.).
  33. Args:
  34. do_lower_case (bool):
  35. Whether to lowercase the input when tokenizing.
  36. Defaults to `True`.
  37. never_split (Iterable):
  38. Collection of tokens which will never be split during tokenization. Only has an effect when
  39. `do_basic_tokenize=True`
  40. tokenize_chinese_chars (bool):
  41. Whether to tokenize Chinese characters.
  42. strip_accents: (bool):
  43. Whether to strip all accents. If this option is not specified, then it will be determined by the
  44. value for `lowercase` (as in the original BERT).
  45. """
  46. def __init__(self, do_lower_case=True, never_split=None, tokenize_chinese_chars=True, strip_accents=None):
  47. """Constructs a BasicTokenizer."""
  48. if never_split is None:
  49. never_split = []
  50. self.do_lower_case = do_lower_case
  51. self.never_split = set(never_split)
  52. self.tokenize_chinese_chars = tokenize_chinese_chars
  53. self.strip_accents = strip_accents
  54. def tokenize(self, text, never_split=None):
  55. """
  56. Tokenizes a piece of text using basic tokenizer.
  57. Args:
  58. text (str): A piece of text.
  59. never_split (List[str]): List of token not to split.
  60. Returns:
  61. list(str): A list of tokens.
  62. Examples:
  63. .. code-block::
  64. from paddlenlp.transformers import BasicTokenizer
  65. basictokenizer = BasicTokenizer()
  66. tokens = basictokenizer.tokenize('He was a puppeteer')
  67. '''
  68. ['he', 'was', 'a', 'puppeteer']
  69. '''
  70. """
  71. text = convert_to_unicode(text)
  72. never_split = self.never_split.union(set(never_split)) if never_split else self.never_split
  73. text = self._clean_text(text)
  74. if self.tokenize_chinese_chars:
  75. text = self._tokenize_chinese_chars(text)
  76. orig_tokens = whitespace_tokenize(text)
  77. split_tokens = []
  78. for token in orig_tokens:
  79. if token not in never_split:
  80. if self.do_lower_case:
  81. token = token.lower()
  82. if self.strip_accents is not False:
  83. token = self._run_strip_accents(token)
  84. elif self.strip_accents:
  85. token = self._run_strip_accents(token)
  86. split_tokens.extend(self._run_split_on_punc(token, never_split))
  87. output_tokens = whitespace_tokenize(" ".join(split_tokens))
  88. return output_tokens
  89. def _run_strip_accents(self, text):
  90. """
  91. Strips accents from a piece of text.
  92. """
  93. text = unicodedata.normalize("NFD", text)
  94. output = []
  95. for char in text:
  96. cat = unicodedata.category(char)
  97. if cat == "Mn":
  98. continue
  99. output.append(char)
  100. return "".join(output)
  101. def _run_split_on_punc(self, text, never_split=None):
  102. """
  103. Splits punctuation on a piece of text.
  104. """
  105. if never_split is not None and text in never_split:
  106. return [text]
  107. chars = list(text)
  108. i = 0
  109. start_new_word = True
  110. output = []
  111. while i < len(chars):
  112. char = chars[i]
  113. # punctuation and symbol should be treat as single char.
  114. if _is_punctuation(char) or _is_symbol(char):
  115. output.append([char])
  116. start_new_word = True
  117. else:
  118. if start_new_word:
  119. output.append([])
  120. start_new_word = False
  121. output[-1].append(char)
  122. i += 1
  123. return ["".join(x) for x in output]
  124. def _tokenize_chinese_chars(self, text):
  125. """
  126. Adds whitespace around any CJK character.
  127. """
  128. output = []
  129. for char in text:
  130. cp = ord(char)
  131. if self._is_chinese_char(cp):
  132. output.append(" ")
  133. output.append(char)
  134. output.append(" ")
  135. else:
  136. output.append(char)
  137. return "".join(output)
  138. def _is_chinese_char(self, cp):
  139. """
  140. Checks whether CP is the codepoint of a CJK character.
  141. """
  142. # This defines a "chinese character" as anything in the CJK Unicode block:
  143. # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
  144. #
  145. # Note that the CJK Unicode block is NOT all Japanese and Korean characters,
  146. # despite its name. The modern Korean Hangul alphabet is a different block,
  147. # as is Japanese Hiragana and Katakana. Those alphabets are used to write
  148. # space-separated words, so they are not treated specially and handled
  149. # like the all the other languages.
  150. if (
  151. (cp >= 0x4E00 and cp <= 0x9FFF)
  152. or (cp >= 0x3400 and cp <= 0x4DBF) #
  153. or (cp >= 0x20000 and cp <= 0x2A6DF) #
  154. or (cp >= 0x2A700 and cp <= 0x2B73F) #
  155. or (cp >= 0x2B740 and cp <= 0x2B81F) #
  156. or (cp >= 0x2B820 and cp <= 0x2CEAF) #
  157. or (cp >= 0xF900 and cp <= 0xFAFF)
  158. or (cp >= 0x2F800 and cp <= 0x2FA1F) #
  159. ): #
  160. return True
  161. return False
  162. def _clean_text(self, text):
  163. """
  164. Performs invalid character removal and whitespace cleanup on text.
  165. """
  166. output = []
  167. for char in text:
  168. cp = ord(char)
  169. if cp == 0 or cp == 0xFFFD or _is_control(char):
  170. continue
  171. if _is_whitespace(char):
  172. output.append(" ")
  173. else:
  174. output.append(char)
  175. return "".join(output)
  176. class WordpieceTokenizer(object):
  177. """
  178. Runs WordPiece tokenization.
  179. Args:
  180. vocab (Vocab|dict):
  181. Vocab of the word piece tokenizer.
  182. unk_token (str):
  183. A specific token to replace all unknown tokens.
  184. max_input_chars_per_word (int):
  185. If a word's length is more than
  186. max_input_chars_per_word, it will be dealt as unknown word.
  187. Defaults to 100.
  188. """
  189. def __init__(self, vocab, unk_token, max_input_chars_per_word=100):
  190. self.vocab = vocab
  191. self.unk_token = unk_token
  192. self.max_input_chars_per_word = max_input_chars_per_word
  193. def tokenize(self, text):
  194. """
  195. Tokenizes a piece of text into its word pieces.
  196. This uses a greedy longest-match-first algorithm to perform tokenization
  197. using the given vocabulary.
  198. Args:
  199. text: A single token or whitespace separated tokens. This should have
  200. already been passed through `BasicTokenizer`.
  201. Returns:
  202. list (str): A list of wordpiece tokens.
  203. Examples:
  204. .. code-block::
  205. from paddlenlp.transformers import BertTokenizer, WordpieceTokenizer
  206. berttokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
  207. vocab = berttokenizer.vocab
  208. unk_token = berttokenizer.unk_token
  209. wordpiecetokenizer = WordpieceTokenizer(vocab,unk_token)
  210. inputs = wordpiecetokenizer.tokenize("unaffable")
  211. print(inputs)
  212. '''
  213. ["un", "##aff", "##able"]
  214. '''
  215. """
  216. output_tokens = []
  217. for token in whitespace_tokenize(text):
  218. chars = list(token)
  219. if len(chars) > self.max_input_chars_per_word:
  220. output_tokens.append(self.unk_token)
  221. continue
  222. is_bad = False
  223. start = 0
  224. sub_tokens = []
  225. while start < len(chars):
  226. end = len(chars)
  227. cur_substr = None
  228. while start < end:
  229. substr = "".join(chars[start:end])
  230. if start > 0:
  231. substr = "##" + substr
  232. if substr in self.vocab:
  233. cur_substr = substr
  234. break
  235. end -= 1
  236. if cur_substr is None:
  237. is_bad = True
  238. break
  239. sub_tokens.append(cur_substr)
  240. start = end
  241. if is_bad:
  242. output_tokens.append(self.unk_token)
  243. else:
  244. output_tokens.extend(sub_tokens)
  245. return output_tokens
  246. class BertTokenizer(PretrainedTokenizer):
  247. """
  248. Constructs a BERT tokenizer. It uses a basic tokenizer to do punctuation
  249. splitting, lower casing and so on, and follows a WordPiece tokenizer to
  250. tokenize as subwords.
  251. Args:
  252. vocab_file (str):
  253. The vocabulary file path (ends with '.txt') required to instantiate
  254. a `WordpieceTokenizer`.
  255. do_lower_case (bool, optional):
  256. Whether to lowercase the input when tokenizing.
  257. Defaults to `True`.
  258. do_basic_tokenize (bool, optional):
  259. Whether to use a basic tokenizer before a WordPiece tokenizer.
  260. Defaults to `True`.
  261. never_split (Iterable, optional):
  262. Collection of tokens which will never be split during tokenization. Only has an effect when
  263. `do_basic_tokenize=True`. Defaults to `None`.
  264. unk_token (str, optional):
  265. A special token representing the *unknown (out-of-vocabulary)* token.
  266. An unknown token is set to be `unk_token` inorder to be converted to an ID.
  267. Defaults to "[UNK]".
  268. sep_token (str, optional):
  269. A special token separating two different sentences in the same input.
  270. Defaults to "[SEP]".
  271. pad_token (str, optional):
  272. A special token used to make arrays of tokens the same size for batching purposes.
  273. Defaults to "[PAD]".
  274. cls_token (str, optional):
  275. A special token used for sequence classification. It is the last token
  276. of the sequence when built with special tokens. Defaults to "[CLS]".
  277. mask_token (str, optional):
  278. A special token representing a masked token. This is the token used
  279. in the masked language modeling task which the model tries to predict the original unmasked ones.
  280. Defaults to "[MASK]".
  281. tokenize_chinese_chars (bool, optional):
  282. Whether to tokenize Chinese characters.
  283. Defaults to `True`.
  284. strip_accents: (bool, optional):
  285. Whether to strip all accents. If this option is not specified, then it will be determined by the
  286. value for `lowercase` (as in the original BERT).
  287. Defaults to `None`.
  288. Examples:
  289. .. code-block::
  290. from paddlenlp.transformers import BertTokenizer
  291. tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
  292. inputs = tokenizer('He was a puppeteer')
  293. print(inputs)
  294. '''
  295. {'input_ids': [101, 2002, 2001, 1037, 13997, 11510, 102], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0]}
  296. '''
  297. """
  298. resource_files_names = {"vocab_file": "vocab.txt"} # for save_pretrained
  299. pretrained_resource_files_map = {
  300. "vocab_file": {
  301. "bert-base-uncased": "https://bj.bcebos.com/paddle-hapi/models/bert/bert-base-uncased-vocab.txt",
  302. "bert-large-uncased": "https://bj.bcebos.com/paddle-hapi/models/bert/bert-large-uncased-vocab.txt",
  303. "bert-base-cased": "https://bj.bcebos.com/paddle-hapi/models/bert/bert-base-cased-vocab.txt",
  304. "bert-large-cased": "https://bj.bcebos.com/paddle-hapi/models/bert/bert-large-cased-vocab.txt",
  305. "bert-base-multilingual-uncased": "https://bj.bcebos.com/paddle-hapi/models/bert/bert-base-multilingual-uncased-vocab.txt",
  306. "bert-base-multilingual-cased": "https://bj.bcebos.com/paddle-hapi/models/bert/bert-base-multilingual-cased-vocab.txt",
  307. "bert-base-chinese": "https://bj.bcebos.com/paddle-hapi/models/bert/bert-base-chinese-vocab.txt",
  308. "bert-wwm-chinese": "http://bj.bcebos.com/paddlenlp/models/transformers/bert/bert-wwm-chinese-vocab.txt",
  309. "bert-wwm-ext-chinese": "http://bj.bcebos.com/paddlenlp/models/transformers/bert/bert-wwm-ext-chinese-vocab.txt",
  310. "macbert-large-chinese": "https://bj.bcebos.com/paddle-hapi/models/bert/bert-base-chinese-vocab.txt",
  311. "macbert-base-chinese": "https://bj.bcebos.com/paddle-hapi/models/bert/bert-base-chinese-vocab.txt",
  312. "simbert-base-chinese": "https://bj.bcebos.com/paddlenlp/models/transformers/simbert/vocab.txt",
  313. "uer/chinese-roberta-base": "https://bj.bcebos.com/paddlenlp/models/transformers/uer/chinese_roberta_vocab.txt",
  314. "uer/chinese-roberta-medium": "https://bj.bcebos.com/paddlenlp/models/transformers/uer/chinese_roberta_vocab.txt",
  315. "uer/chinese-roberta-6l-768h": "https://bj.bcebos.com/paddlenlp/models/transformers/uer/chinese_roberta_vocab.txt",
  316. "uer/chinese-roberta-small": "https://bj.bcebos.com/paddlenlp/models/transformers/uer/chinese_roberta_vocab.txt",
  317. "uer/chinese-roberta-mini": "https://bj.bcebos.com/paddlenlp/models/transformers/uer/chinese_roberta_vocab.txt",
  318. "uer/chinese-roberta-tiny": "https://bj.bcebos.com/paddlenlp/models/transformers/uer/chinese_roberta_vocab.txt",
  319. }
  320. }
  321. pretrained_init_configuration = {
  322. "bert-base-uncased": {"do_lower_case": True},
  323. "bert-large-uncased": {"do_lower_case": True},
  324. "bert-base-cased": {"do_lower_case": False},
  325. "bert-large-cased": {"do_lower_case": False},
  326. "bert-base-multilingual-uncased": {"do_lower_case": True},
  327. "bert-base-multilingual-cased": {"do_lower_case": False},
  328. "bert-base-chinese": {"do_lower_case": False},
  329. "bert-wwm-chinese": {"do_lower_case": False},
  330. "bert-wwm-ext-chinese": {"do_lower_case": False},
  331. "macbert-large-chinese": {"do_lower_case": False},
  332. "macbert-base-chinese": {"do_lower_case": False},
  333. "simbert-base-chinese": {"do_lower_case": True},
  334. "uer/chinese-roberta-base": {"do_lower_case": True},
  335. "uer/chinese-roberta-medium": {"do_lower_case": True},
  336. "uer/chinese-roberta-6l-768h": {"do_lower_case": True},
  337. "uer/chinese-roberta-small": {"do_lower_case": True},
  338. "uer/chinese-roberta-mini": {"do_lower_case": True},
  339. "uer/chinese-roberta-tiny": {"do_lower_case": True},
  340. }
  341. max_model_input_sizes = {
  342. "bert-base-uncased": 512,
  343. "bert-large-uncased": 512,
  344. "bert-base-cased": 512,
  345. "bert-large-cased": 512,
  346. "bert-base-multilingual-uncased": 512,
  347. "bert-base-multilingual-cased": 512,
  348. "bert-base-chinese": 512,
  349. "bert-wwm-chinese": 512,
  350. "bert-wwm-ext-chinese": 512,
  351. "macbert-large-chinese": 512,
  352. "macbert-base-chinese": 512,
  353. "simbert-base-chinese": 512,
  354. "uer/chinese-roberta-base": 512,
  355. "uer/chinese-roberta-medium": 512,
  356. "uer/chinese-roberta-6l-768h": 512,
  357. "uer/chinese-roberta-small": 512,
  358. "uer/chinese-roberta-mini": 512,
  359. "uer/chinese-roberta-tiny": 512,
  360. }
  361. padding_side = "right"
  362. def __init__(
  363. self,
  364. vocab_file,
  365. do_lower_case=True,
  366. do_basic_tokenize=True,
  367. never_split=None,
  368. unk_token="[UNK]",
  369. sep_token="[SEP]",
  370. pad_token="[PAD]",
  371. cls_token="[CLS]",
  372. mask_token="[MASK]",
  373. tokenize_chinese_chars=True,
  374. strip_accents=None,
  375. **kwargs
  376. ):
  377. if not os.path.isfile(vocab_file):
  378. raise ValueError(
  379. "Can't find a vocabulary file at path '{}'. To load the "
  380. "vocabulary from a pretrained model please use "
  381. "`tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`".format(vocab_file)
  382. )
  383. self.do_lower_case = do_lower_case
  384. self.vocab = self.load_vocabulary(vocab_file, unk_token=unk_token)
  385. self.do_basic_tokenize = do_basic_tokenize
  386. if do_basic_tokenize:
  387. self.basic_tokenizer = BasicTokenizer(
  388. do_lower_case=do_lower_case,
  389. never_split=never_split,
  390. tokenize_chinese_chars=tokenize_chinese_chars,
  391. strip_accents=strip_accents,
  392. )
  393. self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=unk_token)
  394. @property
  395. def vocab_size(self):
  396. """
  397. Return the size of vocabulary.
  398. Returns:
  399. int: The size of vocabulary.
  400. """
  401. return len(self.vocab)
  402. def get_vocab(self):
  403. return dict(self.vocab.token_to_idx, **self.added_tokens_encoder)
  404. def _tokenize(self, text):
  405. """
  406. End-to-end tokenization for BERT models.
  407. Args:
  408. text (str): The text to be tokenized.
  409. Returns:
  410. list: A list of string representing converted tokens.
  411. """
  412. split_tokens = []
  413. if self.do_basic_tokenize:
  414. for token in self.basic_tokenizer.tokenize(text, never_split=self.all_special_tokens):
  415. # If the token is part of the never_split set
  416. if token in self.basic_tokenizer.never_split:
  417. split_tokens.append(token)
  418. else:
  419. split_tokens += self.wordpiece_tokenizer.tokenize(token)
  420. else:
  421. split_tokens = self.wordpiece_tokenizer.tokenize(text)
  422. return split_tokens
  423. def convert_tokens_to_string(self, tokens):
  424. """
  425. Converts a sequence of tokens (list of string) to a single string. Since
  426. the usage of WordPiece introducing `##` to concat subwords, also removes
  427. `##` when converting.
  428. Args:
  429. tokens (list): A list of string representing tokens to be converted.
  430. Returns:
  431. str: Converted string from tokens.
  432. Examples:
  433. .. code-block::
  434. from paddlenlp.transformers import BertTokenizer
  435. berttokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
  436. tokens = berttokenizer.tokenize('He was a puppeteer')
  437. '''
  438. ['he', 'was', 'a', 'puppet', '##eer']
  439. '''
  440. strings = tokenizer.convert_tokens_to_string(tokens)
  441. '''
  442. he was a puppeteer
  443. '''
  444. """
  445. out_string = " ".join(tokens).replace(" ##", "").strip()
  446. return out_string
  447. def num_special_tokens_to_add(self, pair=False):
  448. """
  449. Returns the number of added tokens when encoding a sequence with special tokens.
  450. Args:
  451. pair(bool):
  452. Whether the input is a sequence pair or a single sequence.
  453. Defaults to `False` and the input is a single sequence.
  454. Returns:
  455. int: Number of tokens added to sequences.
  456. """
  457. token_ids_0 = []
  458. token_ids_1 = []
  459. return len(self.build_inputs_with_special_tokens(token_ids_0, token_ids_1 if pair else None))
  460. def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
  461. """
  462. Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
  463. adding special tokens.
  464. A BERT sequence has the following format:
  465. - single sequence: ``[CLS] X [SEP]``
  466. - pair of sequences: ``[CLS] A [SEP] B [SEP]``
  467. Args:
  468. token_ids_0 (List[int]):
  469. List of IDs to which the special tokens will be added.
  470. token_ids_1 (List[int], optional):
  471. Optional second list of IDs for sequence pairs. Defaults to None.
  472. Returns:
  473. List[int]: List of input_id with the appropriate special tokens.
  474. """
  475. if token_ids_1 is None:
  476. return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
  477. _cls = [self.cls_token_id]
  478. _sep = [self.sep_token_id]
  479. return _cls + token_ids_0 + _sep + token_ids_1 + _sep
  480. def build_offset_mapping_with_special_tokens(self, offset_mapping_0, offset_mapping_1=None):
  481. """
  482. Build offset map from a pair of offset map by concatenating and adding offsets of special tokens.
  483. A BERT offset_mapping has the following format:
  484. - single sequence: ``(0,0) X (0,0)``
  485. - pair of sequences: ``(0,0) A (0,0) B (0,0)``
  486. Args:
  487. offset_mapping_ids_0 (List[tuple]):
  488. List of wordpiece offsets to which the special tokens will be added.
  489. offset_mapping_ids_1 (List[tuple], optional):
  490. Optional second list of wordpiece offsets for offset mapping pairs. Defaults to None.
  491. Returns:
  492. List[tuple]: A list of wordpiece offsets with the appropriate offsets of special tokens.
  493. """
  494. if offset_mapping_1 is None:
  495. return [(0, 0)] + offset_mapping_0 + [(0, 0)]
  496. return [(0, 0)] + offset_mapping_0 + [(0, 0)] + offset_mapping_1 + [(0, 0)]
  497. def create_token_type_ids_from_sequences(self, token_ids_0, token_ids_1=None):
  498. """
  499. Create a mask from the two sequences passed to be used in a sequence-pair classification task.
  500. A BERT sequence pair mask has the following format:
  501. ::
  502. 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
  503. | first sequence | second sequence |
  504. If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
  505. Args:
  506. token_ids_0 (List[int]):
  507. A list of `inputs_ids` for the first sequence.
  508. token_ids_1 (List[int], optional):
  509. Optional second list of IDs for sequence pairs. Defaults to None.
  510. Returns:
  511. List[int]: List of token_type_id according to the given sequence(s).
  512. """
  513. _sep = [self.sep_token_id]
  514. _cls = [self.cls_token_id]
  515. if token_ids_1 is None:
  516. return len(_cls + token_ids_0 + _sep) * [0]
  517. return len(_cls + token_ids_0 + _sep) * [0] + len(token_ids_1 + _sep) * [1]
  518. def get_special_tokens_mask(self, token_ids_0, token_ids_1=None, already_has_special_tokens=False):
  519. """
  520. Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding
  521. special tokens using the tokenizer ``encode`` methods.
  522. Args:
  523. token_ids_0 (List[int]):
  524. A list of `inputs_ids` for the first sequence.
  525. token_ids_1 (List[int], optional):
  526. Optional second list of IDs for sequence pairs. Defaults to None.
  527. already_has_special_tokens (bool, optional): Whether or not the token list is already
  528. formatted with special tokens for the model. Defaults to None.
  529. Returns:
  530. List[int]: The list of integers either be 0 or 1: 1 for a special token, 0 for a sequence token.
  531. """
  532. if already_has_special_tokens:
  533. if token_ids_1 is not None:
  534. raise ValueError(
  535. "You should not supply a second sequence if the provided sequence of "
  536. "ids is already formatted with special tokens for the model."
  537. )
  538. return list(map(lambda x: 1 if x in self.all_special_ids else 0, token_ids_0))
  539. if token_ids_1 is not None:
  540. return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
  541. return [1] + ([0] * len(token_ids_0)) + [1]
  542. def _convert_id_to_token(self, index):
  543. """Converts an index (integer) in a token (str) using the vocab."""
  544. return self.vocab._idx_to_token.get(index, self.unk_token)