tokenizer_utils.py 84 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159
  1. # Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. import bisect
  15. import functools
  16. import inspect
  17. import io
  18. import itertools
  19. import json
  20. import os
  21. import re
  22. import unicodedata
  23. from collections import OrderedDict
  24. from dataclasses import asdict, dataclass
  25. from functools import lru_cache
  26. from typing import Any, Dict, List, Literal, Optional, Tuple, Union
  27. import numpy as np
  28. from .....utils import logging
  29. from .....utils.deps import class_requires_deps, is_dep_available
  30. from .tokenizer_utils_base import (
  31. CHAT_TEMPLATE_CONFIG_NAME,
  32. AddedToken,
  33. BatchEncoding,
  34. EncodedInput,
  35. EncodedInputPair,
  36. PaddingStrategy,
  37. PreTokenizedInput,
  38. PreTokenizedInputPair,
  39. PretrainedTokenizerBase,
  40. TensorType,
  41. TextInput,
  42. TextInputPair,
  43. TruncationStrategy,
  44. )
  45. from .utils import convert_to_dict_message, fn_args_to_dict
  46. from .vocab import Vocab
  47. if is_dep_available("Jinja2"):
  48. from jinja2 import Template
  49. from jinja2.exceptions import TemplateError, TemplateSyntaxError
  50. from jinja2.sandbox import ImmutableSandboxedEnvironment
  51. __all__ = [
  52. "ChatTemplate",
  53. "Trie",
  54. "ChatTemplateMixin",
  55. "PretrainedTokenizer",
  56. "InitTrackerMeta",
  57. ]
  58. @class_requires_deps("Jinja2")
  59. @dataclass
  60. class ChatTemplate:
  61. conversation: Union[List[str], None] = None
  62. system: Union[str, None] = None
  63. query: str = None
  64. @staticmethod
  65. @lru_cache()
  66. def _compile_jinja_template(chat_template) -> "Template":
  67. def raise_exception(message):
  68. raise TemplateError(message)
  69. jinja_env = ImmutableSandboxedEnvironment(
  70. trim_blocks=True, lstrip_blocks=True, keep_trailing_newline=True
  71. )
  72. jinja_env.globals["raise_exception"] = raise_exception
  73. return jinja_env.from_string(chat_template)
  74. def render_conversation(
  75. self,
  76. conversation_data: Union[List[str], Dict[str, str]],
  77. index: int = 0,
  78. context_data: Dict[str, Any] = {},
  79. ) -> List[str]:
  80. """
  81. Args:
  82. conversation_data (list[str]): the conversation data which must be two parts
  83. index (int): the index of current conversation
  84. Returns:
  85. list[str]: the rendered conversation data
  86. """
  87. if self.conversation is None:
  88. raise ValueError(
  89. "The template for multi-turns is invalid, please check `conversation` filed in your chat-template."
  90. )
  91. if isinstance(conversation_data, (list, tuple)):
  92. assert (
  93. len(conversation_data) == 2
  94. ), "Each round/turn of conversation must be two participants, eg: [user-query, bot-query]"
  95. conversation_data = {
  96. "user": conversation_data[0],
  97. "bot": conversation_data[1],
  98. "index": index,
  99. }
  100. conversation_data.update(context_data)
  101. one_turn_conversation = []
  102. for conversation in self.conversation:
  103. template = self._compile_jinja_template(conversation)
  104. result = template.render(conversation_data)
  105. one_turn_conversation.append(result)
  106. return one_turn_conversation
  107. def render_query(
  108. self, query: str, index: int = 0, context_data: Dict[str, Union[int, str]] = {}
  109. ):
  110. if self.query is None:
  111. return query
  112. template = self._compile_jinja_template(self.query)
  113. return template.render(query=query, index=index, **context_data)
  114. def _init_context_data(
  115. self, context_data: Dict[str, Union[int, str]] = {}
  116. ) -> Dict[str, Union[int, str]]:
  117. """init the context data for chat-template"""
  118. context_data["is_training"] = context_data.get("is_training", False)
  119. return context_data
  120. def render_system(self, context_data: Dict[str, Union[int, str]] = {}) -> str:
  121. if self.system is None:
  122. return ""
  123. template = self._compile_jinja_template(self.system)
  124. return template.render(**context_data)
  125. def __call__(
  126. self,
  127. conversations: Union[List[List[str]], str],
  128. context_data: Dict[str, Union[int, str]] = {},
  129. ) -> str:
  130. """render the conversations by chat-template
  131. Args:
  132. conversations (list[list[str]]): the conversations of use and bot
  133. Returns:
  134. str: the result of conversation
  135. """
  136. if isinstance(conversations, str):
  137. conversations = [[conversations]]
  138. # [1 ... n-1] conversation
  139. final_query = self.render_system(context_data=context_data)
  140. context_data["length"] = len(conversations)
  141. for index, conversation in enumerate(conversations[:-1]):
  142. context_data["is_first"] = index == 0
  143. context_data["is_last"] = False
  144. final_query += "".join(
  145. self.render_conversation(
  146. conversation, index=index, context_data=context_data
  147. )
  148. )
  149. if not isinstance(conversations[-1], list) and not len(conversations[-1]) != 1:
  150. raise ValueError(
  151. "The length of last conversation must be one, eg: [[user-query, bot-answer], [user-query, bot-answer], ..., [user-query]]"
  152. )
  153. if len(conversations[-1]) > 1:
  154. logging.warning(
  155. f"The last conversation is not a single-round, chat-template will skip the conversation: {conversations[-1][1:]}"
  156. )
  157. final_query += self.render_query(
  158. conversations[-1][0],
  159. index=len(conversations) - 1,
  160. context_data=context_data,
  161. )
  162. return final_query
  163. @classmethod
  164. def from_dict(cls, config: Dict):
  165. return cls(**config)
  166. @classmethod
  167. def from_file(cls, file: str):
  168. with open(file, "r", encoding="utf-8") as f:
  169. config = json.load(f)
  170. return cls.from_dict(config)
  171. def adapt_stale_fwd_patch(self, name, value):
  172. """
  173. Since there are some monkey patches for forward of PretrainedModel, such as
  174. model compression, we make these patches compatible with the latest forward
  175. method.
  176. """
  177. if name == "forward":
  178. # NOTE(guosheng): In dygraph to static, `layer.forward` would be patched
  179. # by an instance of `StaticFunction`. And use string compare to avoid to
  180. # import fluid.
  181. if type(value).__name__.endswith(
  182. "StaticFunction"
  183. ) or self.forward.__class__.__name__.endswith("StaticFunction"):
  184. return value
  185. (
  186. patch_spec_args,
  187. patch_spec_varargs,
  188. patch_spec_varkw,
  189. patch_spec_defaults,
  190. _,
  191. _,
  192. _,
  193. ) = inspect.getfullargspec(value)
  194. (spec_args, spec_varargs, spec_varkw, spec_defaults, _, _, _) = (
  195. inspect.getfullargspec(self.forward)
  196. )
  197. new_args = [
  198. arg
  199. for arg in ("output_hidden_states", "output_attentions", "return_dict")
  200. if arg not in patch_spec_args and arg in spec_args
  201. ]
  202. if new_args:
  203. import paddle
  204. if self.__module__.startswith("paddlenlp"):
  205. logging.warning(
  206. f"The `forward` method of {self.__class__ if isinstance(self, paddle.nn.Layer) else self} is patched and the patch "
  207. "might be based on an old oversion which missing some "
  208. f"arguments compared with the latest, such as {new_args}. "
  209. "We automatically add compatibility on the patch for "
  210. "these arguments, and maybe the patch should be updated."
  211. )
  212. else:
  213. logging.warning(
  214. f"The `forward` method of {self.__class__ if isinstance(self, paddle.nn.Layer) else self} "
  215. "is patched and the patch might be conflict with patches made "
  216. f"by paddlenlp which seems have more arguments such as {new_args}. "
  217. "We automatically add compatibility on the patch for "
  218. "these arguments, and maybe the patch should be updated."
  219. )
  220. if isinstance(self, paddle.nn.Layer) and inspect.isfunction(value):
  221. @functools.wraps(value)
  222. def wrap_fwd(*args, **kwargs):
  223. for arg in new_args:
  224. kwargs.pop(arg, None)
  225. return value(self, *args, **kwargs)
  226. else:
  227. @functools.wraps(value)
  228. def wrap_fwd(*args, **kwargs):
  229. for arg in new_args:
  230. kwargs.pop(arg, None)
  231. return value(*args, **kwargs)
  232. return wrap_fwd
  233. return value
  234. # NOTE:
  235. # Modification:
  236. # class InitTrackerMeta(type(paddle.nn.Layer)) -> class InitTrackerMeta(type)
  237. # Context:
  238. # 1. In paddle 3.0rc, type(paddle.nn.Layer) == type
  239. # 2. Solve the conflict between ultra-infer and paddle
  240. class InitTrackerMeta(type):
  241. """
  242. This metaclass wraps the `__init__` method of a class to add `init_config`
  243. attribute for instances of that class, and `init_config` use a dict to track
  244. the initial configuration. If the class has `_pre_init` or `_post_init`
  245. method, it would be hooked before or after `__init__` and called as
  246. `_pre_init(self, init_fn, init_args)` or `_post_init(self, init_fn, init_args)`.
  247. Since InitTrackerMeta would be used as metaclass for pretrained model classes,
  248. which always are Layer and `type(Layer)` is not `type`, thus use `type(Layer)`
  249. rather than `type` as base class for it to avoid inheritance metaclass
  250. conflicts.
  251. """
  252. def __init__(cls, name, bases, attrs):
  253. init_func = cls.__init__
  254. # If attrs has `__init__`, wrap it using accessible `_pre_init, _post_init`.
  255. # Otherwise, no need to wrap again since the super cls has been wrapped.
  256. # TODO: remove reduplicated tracker if using super cls `__init__`
  257. pre_init_func = getattr(cls, "_pre_init", None) if "__init__" in attrs else None
  258. post_init_func = (
  259. getattr(cls, "_post_init", None) if "__init__" in attrs else None
  260. )
  261. cls.__init__ = InitTrackerMeta.init_and_track_conf(
  262. init_func, pre_init_func, post_init_func
  263. )
  264. super(InitTrackerMeta, cls).__init__(name, bases, attrs)
  265. @staticmethod
  266. def init_and_track_conf(init_func, pre_init_func=None, post_init_func=None):
  267. """
  268. wraps `init_func` which is `__init__` method of a class to add `init_config`
  269. attribute for instances of that class.
  270. Args:
  271. init_func (callable): It should be the `__init__` method of a class.
  272. warning: `self` always is the class type of down-stream model, eg: BertForTokenClassification
  273. pre_init_func (callable, optional): If provided, it would be hooked after
  274. `init_func` and called as `pre_init_func(self, init_func, *init_args, **init_args)`.
  275. Default None.
  276. post_init_func (callable, optional): If provided, it would be hooked after
  277. `init_func` and called as `post_init_func(self, init_func, *init_args, **init_args)`.
  278. Default None.
  279. Returns:
  280. function: the wrapped function
  281. """
  282. @functools.wraps(init_func)
  283. def __impl__(self, *args, **kwargs):
  284. # registered helper by `pre_init_func`
  285. if pre_init_func:
  286. pre_init_func(self, init_func, *args, **kwargs)
  287. # keep full configuration
  288. init_func(self, *args, **kwargs)
  289. # registered helper by `post_init_func`
  290. if post_init_func:
  291. post_init_func(self, init_func, *args, **kwargs)
  292. self.init_config = kwargs
  293. if args:
  294. kwargs["init_args"] = args
  295. kwargs["init_class"] = self.__class__.__name__
  296. return __impl__
  297. def __setattr__(self, name, value):
  298. value = adapt_stale_fwd_patch(self, name, value)
  299. return super(InitTrackerMeta, self).__setattr__(name, value)
  300. class Trie:
  301. """
  302. Trie in Python. Creates a Trie out of a list of words. The trie is used to split on `added_tokens` in one pass
  303. Loose reference https://en.wikipedia.org/wiki/Trie
  304. """
  305. def __init__(self):
  306. self.data = {}
  307. def add(self, word: str):
  308. """
  309. Passes over every char (utf-8 char) on word and recursively adds it to the internal `data` trie representation.
  310. The special key `""` is used to represent termination.
  311. This function is idempotent, adding twice the same word will leave the trie unchanged
  312. Example:
  313. ```python
  314. >>> trie = Trie()
  315. >>> trie.add("Hello 友達")
  316. >>> trie.data
  317. {"H": {"e": {"l": {"l": {"o": {" ": {"友": {"達": {"": 1}}}}}}}}}
  318. >>> trie.add("Hello")
  319. >>> trie.data
  320. {"H": {"e": {"l": {"l": {"o": {"": 1, " ": {"友": {"達": {"": 1}}}}}}}}}
  321. ```
  322. """
  323. if not word:
  324. # Prevent empty string
  325. return
  326. ref = self.data
  327. for char in word:
  328. ref[char] = char in ref and ref[char] or {}
  329. ref = ref[char]
  330. ref[""] = 1
  331. def split(self, text: str) -> List[str]:
  332. """
  333. Will look for the words added to the trie within `text`. Output is the original string splitted along the
  334. boundaries of the words found.
  335. This trie will match the longest possible word first !
  336. Example:
  337. ```python
  338. >>> trie = Trie()
  339. >>> trie.split("[CLS] This is a extra_id_100")
  340. ["[CLS] This is a extra_id_100"]
  341. >>> trie.add("[CLS]")
  342. >>> trie.add("extra_id_1")
  343. >>> trie.add("extra_id_100")
  344. >>> trie.split("[CLS] This is a extra_id_100")
  345. ["[CLS]", " This is a ", "extra_id_100"]
  346. ```
  347. """
  348. # indexes are counted left of the chars index.
  349. # "hello", index 0, is left of h, index 1 is between h and e.
  350. # index 5 is right of the "o".
  351. # States are going to capture every possible start (indexes as above)
  352. # as keys, and have as values, a pointer to the position in the trie
  353. # where we're at. This is a partial match for now.
  354. # This enables to keep track of multiple matches while we're iterating
  355. # the string
  356. # If the trie contains, "blowing", and "lower" and we encounter the
  357. # string "blower", we need to split into ["b", "lower"].
  358. # This is where we need to keep track of multiple possible starts.
  359. states = OrderedDict()
  360. # This will contain every indices where we need
  361. # to cut.
  362. # We force to cut at offset 0 and len(text) (added later)
  363. offsets = [0]
  364. # This is used by the lookahead which needs to skip over
  365. # some text where the full match exceeded the place in the initial
  366. # for loop
  367. skip = 0
  368. # Main loop, Giving this algorithm O(n) complexity
  369. for current, current_char in enumerate(text):
  370. if skip and current < skip:
  371. # Prevents the lookahead for matching twice
  372. # like extra_id_100 and id_100
  373. continue
  374. # This will track every state
  375. # that stop matching, we need to stop tracking them.
  376. # If we look at "lowball", we're going to match "l" (add it to states), "o", "w", then
  377. # fail on "b", we need to remove 0 from the valid states.
  378. to_remove = set()
  379. # Whenever we found a match, we need to drop everything
  380. # this is a greedy algorithm, it will match on the first found token
  381. reset = False
  382. # In this case, we already have partial matches (But unfinished)
  383. for start, trie_pointer in states.items():
  384. if "" in trie_pointer:
  385. # This is a final match, we need to reset and
  386. # store the results in `offsets`.
  387. # Lookahead to match longest first
  388. # Important in case of extra_id_1 vs extra_id_100
  389. # Here we are also actively looking for other earlier partial
  390. # matches
  391. # "[CLS]", "L", we need to match CLS even if L is special
  392. for lookstart, looktrie_pointer in states.items():
  393. if lookstart > start:
  394. # This partial match is later, we can stop looking
  395. break
  396. elif lookstart < start:
  397. # This partial match is earlier, the trie pointer
  398. # was already updated, so index is + 1
  399. lookahead_index = current + 1
  400. end = current + 1
  401. else:
  402. # Here lookstart == start and
  403. # looktrie_pointer == trie_pointer
  404. # It wasn't updated yet so indices are current ones
  405. lookahead_index = current
  406. end = current
  407. next_char = (
  408. text[lookahead_index]
  409. if lookahead_index < len(text)
  410. else None
  411. )
  412. if "" in looktrie_pointer:
  413. start = lookstart
  414. end = lookahead_index
  415. skip = lookahead_index
  416. while next_char in looktrie_pointer:
  417. looktrie_pointer = looktrie_pointer[next_char]
  418. lookahead_index += 1
  419. if "" in looktrie_pointer:
  420. start = lookstart
  421. end = lookahead_index
  422. skip = lookahead_index
  423. if lookahead_index == len(text):
  424. # End of string
  425. break
  426. next_char = text[lookahead_index]
  427. # End lookahead
  428. # Storing and resetting
  429. offsets.append(start)
  430. offsets.append(end)
  431. reset = True
  432. break
  433. elif current_char in trie_pointer:
  434. # The current character being looked at has a match within the trie
  435. # update the pointer (it will be stored back into states later).
  436. trie_pointer = trie_pointer[current_char]
  437. # Storing back the new pointer into the states.
  438. # Partial matches got longer by one.
  439. states[start] = trie_pointer
  440. else:
  441. # The new character has not match in the trie, we need
  442. # to stop keeping track of this partial match.
  443. # We can't do it directly within the loop because of how
  444. # python iteration works
  445. to_remove.add(start)
  446. # Either clearing the full start (we found a real match)
  447. # Or clearing only the partial matches that didn't work.
  448. if reset:
  449. states = {}
  450. else:
  451. for start in to_remove:
  452. del states[start]
  453. # If this character is a starting character within the trie
  454. # start keeping track of this partial match.
  455. if current >= skip and current_char in self.data:
  456. states[current] = self.data[current_char]
  457. # We have a cut at the end with states.
  458. for start, trie_pointer in states.items():
  459. if "" in trie_pointer:
  460. # This is a final match, we need to reset and
  461. # store the results in `offsets`.
  462. end = len(text)
  463. offsets.append(start)
  464. offsets.append(end)
  465. # Longest cut is always the one with lower start so the first
  466. # item so we need to break.
  467. break
  468. return self.cut_text(text, offsets)
  469. def cut_text(self, text, offsets):
  470. # We have all the offsets now, we just need to do the actual splitting.
  471. # We need to eventually add the first part of the string and the eventual
  472. # last part.
  473. offsets.append(len(text))
  474. tokens = []
  475. start = 0
  476. for end in offsets:
  477. if start > end:
  478. logging.error(
  479. "There was a bug in Trie algorithm in tokenization. Attempting to recover. Please report it anyway."
  480. )
  481. continue
  482. elif start == end:
  483. # This might happen if there's a match at index 0
  484. # we're also preventing zero-width cuts in case of two
  485. # consecutive matches
  486. continue
  487. tokens.append(text[start:end])
  488. start = end
  489. return tokens
  490. def _insert_one_token_to_ordered_list(token_list: List[str], new_token: str):
  491. """
  492. Inserts one token to an ordered list if it does not already exist. Note: token_list must be sorted.
  493. """
  494. insertion_idx = bisect.bisect_left(token_list, new_token)
  495. # Checks if new_token is already in the ordered token_list
  496. if insertion_idx < len(token_list) and token_list[insertion_idx] == new_token:
  497. # new_token is in token_list, don't add
  498. return
  499. else:
  500. token_list.insert(insertion_idx, new_token)
  501. def _is_control(char):
  502. """Checks whether `chars` is a control character."""
  503. # These are technically control characters but we count them as whitespace
  504. # characters.
  505. if char == "\t" or char == "\n" or char == "\r":
  506. return False
  507. cat = unicodedata.category(char)
  508. if cat.startswith("C"):
  509. return True
  510. return False
  511. def _is_nonnormalized_char(char):
  512. """Check whether `chars` is a non-normalized character."""
  513. cp = ord(char)
  514. if (
  515. (0xFF00 <= cp <= 0xFFEF)
  516. or (0xFE50 <= cp <= 0xFE6B) # Halfwidth and Fullwidth Forms
  517. or (0x3358 <= cp <= 0x33FF) # Small Form Variants
  518. or (0x249C <= cp <= 0x24E9) # CJK Compatibility
  519. or (0x3200 <= cp <= 0x32FF) # Enclosed Alphanumerics: Ⓛ ⒰
  520. ): # Enclosed CJK Letters and Months
  521. return True
  522. return False
  523. def _is_nonnormalized_numeric(char):
  524. """Check whether `chars` is a non-normalized numeric character."""
  525. cp = ord(char)
  526. if (
  527. (0x2460 <= cp <= 0x249B)
  528. or (0x24EA <= cp <= 0x24FF) #
  529. or (0x2776 <= cp <= 0x2793) #
  530. or (0x2160 <= cp <= 0x217F) # Enclosed Alphanumerics
  531. ): # Number Forms
  532. return True
  533. return False
  534. def normalize_chars(text):
  535. """
  536. Normalize the text for multiligual and chinese models. Unicode range:
  537. https://www.ling.upenn.edu/courses/Spring_2003/ling538/UnicodeRanges.html
  538. """
  539. output = []
  540. for char in text:
  541. if _is_nonnormalized_char(char):
  542. for c in unicodedata.normalize("NFKC", char):
  543. output.append(c)
  544. elif _is_nonnormalized_numeric(char):
  545. output.append(" ")
  546. for c in str(int(unicodedata.numeric(char))):
  547. output.append(c)
  548. output.append(" ")
  549. elif ord(char) == 0xF979: # https://www.zhihu.com/question/20697984
  550. output.append("凉")
  551. else:
  552. output.append(char)
  553. return "".join(output)
  554. class ChatTemplateMixin:
  555. chat_template: Optional[ChatTemplate] = None
  556. def apply_chat_template(
  557. self,
  558. conversation: Union[List[List[str]], Dict[str, str], str],
  559. tokenize: bool = True,
  560. context_data: Dict[str, Any] = {},
  561. **tokenizer_kwargs,
  562. ):
  563. """apply chat_template rules to conversation which should not be batched data
  564. Args:
  565. conversation (List[List[str]] , str): the conversation messages between user and bot
  566. context_data (Dict[str, Any]): the context data for chat_template.json
  567. tokenize (bool, optional): whether do tokenization. Defaults to True.
  568. Returns:
  569. str | dict[str, Union[numpy.ndarray, paddle.Tensor]]: return the result of applied data
  570. """
  571. if not self.chat_template:
  572. raise ValueError(
  573. "chat_template is not set, please set chat_template first."
  574. )
  575. elif isinstance(self.chat_template, Template):
  576. add_generation_prompt = tokenizer_kwargs.pop("add_generation_prompt", True)
  577. query = self._apply_chat_template(
  578. conversation, add_generation_prompt=add_generation_prompt
  579. )
  580. elif isinstance(self.chat_template, ChatTemplate):
  581. query = self._apply_chat_template_paddle(conversation, context_data)
  582. if not tokenize:
  583. return query
  584. # chat_template should not add special tokens
  585. tokenizer_kwargs["add_special_tokens"] = False
  586. return self(query, **tokenizer_kwargs)
  587. def _apply_chat_template_paddle(
  588. self,
  589. conversation: Union[List[List[str]], str],
  590. context_data: Dict[str, Any] = {},
  591. ):
  592. context_data = self.chat_template._init_context_data(context_data)
  593. if isinstance(conversation, str):
  594. conversation = [[conversation]]
  595. elif isinstance(conversation, list) and isinstance(conversation[0], str):
  596. raise ValueError(
  597. "apply_chat_template do not support applying batch conversations, "
  598. "so you should apply the conversation one by one."
  599. )
  600. query = self.chat_template(conversation, context_data=context_data)
  601. return query
  602. def _apply_chat_template(
  603. self,
  604. conversation: Union[List[List[str]], Dict[str, str], str],
  605. add_generation_prompt=True,
  606. ):
  607. if isinstance(conversation, str):
  608. conversations = [{"role": "user", "content": conversation}]
  609. elif isinstance(conversation, list):
  610. assert len(conversation) > 0, "empty conversation is not allowed"
  611. if isinstance(conversation[0], list):
  612. conversations = convert_to_dict_message(conversation)
  613. elif isinstance(conversation[0], dict):
  614. conversations = conversation
  615. else:
  616. raise ValueError(
  617. "apply_chat_template do not support applying batch conversations, "
  618. "so you should apply the conversation one by one."
  619. )
  620. try:
  621. query = self.chat_template.render(
  622. messages=conversations,
  623. **self.special_tokens_map,
  624. add_generation_prompt=add_generation_prompt,
  625. )
  626. except TypeError:
  627. for i in range(len(conversations)):
  628. content = conversations[i]["content"]
  629. if isinstance(content, list):
  630. new_content = ""
  631. for part in content:
  632. if part.get("type") == "text":
  633. new_content = part["text"]
  634. break
  635. conversations[i]["content"] = new_content
  636. query = self.chat_template.render(
  637. messages=conversations,
  638. **self.special_tokens_map,
  639. add_generation_prompt=add_generation_prompt,
  640. )
  641. return query
  642. def encode_chat_inputs(
  643. self,
  644. conversations: List[List[str]],
  645. context_data: Dict[str, Any] = {},
  646. **kwargs,
  647. ):
  648. """Encodes conversation to pairs of token ids.
  649. Turn 0: bos + system + sep + user bot + eos
  650. Turn t: sep + bot + query bot + eos
  651. Args:
  652. conversation (List[List[str]]): the conversation of data
  653. context_data (Dict[str, Any]): the context data of conversation
  654. Returns:
  655. List[list[int], list[int]]: the pair of input_ids and target_ids
  656. """
  657. if not self.chat_template:
  658. raise ValueError(
  659. "chat_template is not set, please set chat_template first."
  660. )
  661. elif isinstance(self.chat_template, Template):
  662. add_generation_prompt = kwargs.pop("add_generation_prompt", True)
  663. query = self._encode_chat_inputs(
  664. conversations, context_data, add_generation_prompt=add_generation_prompt
  665. )
  666. elif isinstance(self.chat_template, ChatTemplate):
  667. query = self._encode_chat_inputs_paddle(conversations, context_data)
  668. return query
  669. def _encode_chat_inputs_paddle(
  670. self, conversations: List[List[str]], context_data: Dict[str, Any] = {}
  671. ):
  672. context_data = self.chat_template._init_context_data(context_data)
  673. # encode system
  674. result = {}
  675. if self.chat_template.system:
  676. system = self.chat_template.render_system(context_data)
  677. result["system"] = self.encode(system, add_special_tokens=False)[
  678. "input_ids"
  679. ]
  680. # encode conversation
  681. conversation_ids = []
  682. for index, conversation in enumerate(conversations):
  683. # give more control to chat_template
  684. context_data["is_first"] = index == 0
  685. context_data["is_last"] = index == len(conversations) - 1
  686. user_input, bot_output = self.chat_template.render_conversation(
  687. conversation, index=index, context_data=context_data
  688. )
  689. user_ids = self.encode(user_input, add_special_tokens=False)["input_ids"]
  690. bot_ids = self.encode(bot_output, add_special_tokens=False)["input_ids"]
  691. conversation_ids.append([user_ids, bot_ids])
  692. result["conversations"] = conversation_ids
  693. return result
  694. def _encode_chat_inputs(
  695. self,
  696. conversations: List[List[str]],
  697. context_data: Dict[str, Any] = {},
  698. system: str = None,
  699. add_generation_prompt=True,
  700. ):
  701. result = {}
  702. # Some template do not support system msg, so we need to check it first.
  703. if system:
  704. try:
  705. self.chat_template.render(
  706. messages={"role": "system", "content": system}
  707. )
  708. except Exception as e:
  709. raise ValueError("System is not supported in this tokenizer.", e)
  710. # convert list msg to role dict msg
  711. conversation_dict = []
  712. origin_msg = []
  713. for round in conversations:
  714. round_role = [
  715. {"role": "user", "content": round[0]},
  716. {"role": "assistant", "content": round[1]},
  717. ]
  718. origin_msg.extend(round_role)
  719. conversation_dict.append(round_role)
  720. ans = []
  721. # get answer in single round, then compile the chat entirely and split by single round ans
  722. # attention: answer should include end token!
  723. for conv in conversation_dict:
  724. roundi = [system] + conv if system else conv
  725. roundi_str = self.chat_template.render(
  726. messages=roundi, add_generation_prompt=False, **self.special_tokens_map
  727. )
  728. roundi_no_ans = [system] + [conv[0]] if system else [conv[0]]
  729. roundi_no_ans_str = self.chat_template.render(
  730. messages=roundi_no_ans,
  731. add_generation_prompt=add_generation_prompt,
  732. **self.special_tokens_map,
  733. )
  734. ans_roundi = roundi_str[len(roundi_no_ans_str) :]
  735. ans.append(ans_roundi)
  736. non_learnable_parts = self._extract_non_learnable_parts(origin_msg, ans)
  737. assert len(non_learnable_parts) == len(
  738. ans
  739. ), f"Get non_learnable_parts len: {len(non_learnable_parts)}, but ans len: {len(ans)}."
  740. conversation_ids = []
  741. for i in range(len(non_learnable_parts)):
  742. conversation_ids.append(
  743. self.batch_encode(
  744. [non_learnable_parts[i], ans[i]],
  745. add_special_tokens=False,
  746. padding=False,
  747. )["input_ids"]
  748. )
  749. result["conversations"] = conversation_ids
  750. return result
  751. def _extract_non_learnable_parts(
  752. self, origin_msg: List[Dict[str, str]], split_s: List[str]
  753. ):
  754. """Split the entire chat by specified words. Extract the non-learnable parts."""
  755. # distinguish and replace the special words in original string to an uncompiled form: Like | -> \|
  756. regex_pattern = "|".join(map(re.escape, split_s))
  757. # splited by replaced specified words
  758. non_learnable_parts = re.split(
  759. r"(?:%s)" % regex_pattern,
  760. self.chat_template.render(
  761. messages=origin_msg,
  762. add_generation_prompt=False,
  763. **self.special_tokens_map,
  764. ),
  765. )
  766. if non_learnable_parts[-1] == "":
  767. non_learnable_parts.pop()
  768. return non_learnable_parts
  769. @classmethod
  770. def from_pretrained(cls, pretrained_model_name_or_path, *args, **kwargs):
  771. cache_dir = kwargs.pop("cache_dir", None)
  772. from_hf_hub = kwargs.pop("from_hf_hub", False)
  773. from_aistudio = kwargs.pop("from_aistudio", False)
  774. subfolder = kwargs.pop("subfolder", "")
  775. if subfolder is None:
  776. subfolder = ""
  777. kwargs["subfolder"] = subfolder
  778. kwargs["cache_dir"] = cache_dir
  779. kwargs["from_hf_hub"] = from_hf_hub
  780. kwargs["from_aistudio"] = from_aistudio
  781. kwargs["return_tokenizer_file_dir"] = True
  782. tokenizer, tokenizer_config_file_dir = super().from_pretrained(
  783. pretrained_model_name_or_path, *args, **kwargs
  784. )
  785. # load chat-template
  786. chat_template_file = os.path.join(
  787. tokenizer_config_file_dir, CHAT_TEMPLATE_CONFIG_NAME
  788. )
  789. if not os.path.exists(chat_template_file):
  790. return tokenizer
  791. if tokenizer.chat_template is not None:
  792. logging.warning(
  793. "Chat-template already exists in config file, it will be overwritten by chat_template.json file."
  794. )
  795. logging.warning(
  796. "`chat_template.json` will be deprecated in the future! Please set it in `tokenizer_config.json`."
  797. )
  798. tokenizer.init_chat_template(chat_template_file)
  799. return tokenizer
  800. def init_chat_template(self, chat_template: Union[str, dict]):
  801. """init chat_tempalte by file_path or template dict data
  802. Args:
  803. chat_template (str, dict): file_path or template dict data
  804. """
  805. if isinstance(chat_template, str):
  806. if not os.path.exists(chat_template):
  807. try:
  808. self.chat_template: Template = ChatTemplate._compile_jinja_template(
  809. chat_template
  810. )
  811. except TemplateSyntaxError:
  812. # It is neither jinjia string nor path string
  813. raise TemplateSyntaxError(
  814. "The chat-template in json is not valid jinja string: {}".format(
  815. chat_template
  816. ),
  817. lineno=0, # fake lineno, useless required msg
  818. )
  819. else:
  820. self.chat_template = ChatTemplate.from_file(chat_template)
  821. elif isinstance(chat_template, dict):
  822. self.chat_template = ChatTemplate.from_dict(chat_template)
  823. elif isinstance(chat_template, ChatTemplate):
  824. self.chat_template = chat_template
  825. else:
  826. raise ValueError("Receive error chat_template data: ", chat_template)
  827. def save_resources(self, save_directory):
  828. super().save_resources(save_directory)
  829. if isinstance(
  830. self.chat_template, ChatTemplate
  831. ): # Future remove if ChatTemplate is deprecated
  832. chat_template_file = os.path.join(save_directory, CHAT_TEMPLATE_CONFIG_NAME)
  833. with open(chat_template_file, "w", encoding="utf-8") as f:
  834. json.dump(asdict(self.chat_template), f, ensure_ascii=False, indent=4)
  835. logging.info("Chat-template config file saved in " + chat_template_file)
  836. class PretrainedTokenizer(
  837. ChatTemplateMixin, PretrainedTokenizerBase, metaclass=InitTrackerMeta
  838. ):
  839. """
  840. Base class for all tokenizers.
  841. Inherits from [`~tokenizer_utils_base.PretrainedTokenizerBase`].
  842. Handle all the shared methods for tokenization and special tokens as well as methods downloading/caching/loading
  843. pretrained tokenizers as well as adding tokens to the vocabulary.
  844. This class also contain the added tokens in a unified way on top of all tokenizers so we don't have to handle the
  845. specific vocabulary augmentation methods of the various underlying dictionary structures (BPE, sentencepiece...).
  846. - **resource_files_names** (`Dict[str, str]`) -- A dictionary with, as keys, the `__init__` keyword name of each
  847. vocabulary file required by the model, and as associated values, the filename for saving the associated file
  848. (string).
  849. - **pretrained_resource_files_map** (`Dict[str, Dict[str, str]]`) -- A dictionary of dictionaries, with the
  850. high-level keys being the `__init__` keyword name of each vocabulary file required by the model, the
  851. low-level being the `short-cut-names` of the pretrained models with, as associated values, the `url` to the
  852. associated pretrained vocabulary file.
  853. - **max_model_input_sizes** (`Dict[str, Optional[int]]`) -- A dictionary with, as keys, the `short-cut-names`
  854. of the pretrained models, and as associated values, the maximum length of the sequence inputs of this model,
  855. or `None` if the model has no maximum input size.
  856. - **pretrained_init_configuration** (`Dict[str, Dict[str, Any]]`) -- A dictionary with, as keys, the
  857. `short-cut-names` of the pretrained models, and as associated values, a dictionary of specific arguments to
  858. pass to the `__init__` method of the tokenizer class for this pretrained model when loading the tokenizer
  859. with the [`~tokenizer_utils_base.PretrainedTokenizerBase.from_pretrained`] method.
  860. - **model_input_names** (`List[str]`) -- A list of inputs expected in the forward pass of the model.
  861. - **padding_side** (`str`) -- The default value for the side on which the model should have padding applied.
  862. Should be `'right'` or `'left'`.
  863. - **truncation_side** (`str`) -- The default value for the side on which the model should have truncation
  864. applied. Should be `'right'` or `'left'`.
  865. Moreover, methods common to tokenizers for tokenization, token/id conversion
  866. and encoding as model inputs are also provided here.
  867. Besides, metaclass `InitTrackerMeta` is used to create `PretrainedTokenizer`,
  868. by which subclasses can track arguments for initialization automatically
  869. and expose special tokens initialization used as attributes.
  870. """
  871. added_tokens_encoder: Dict[str, int] = {}
  872. added_tokens_decoder: Dict[int, str] = {}
  873. unique_no_split_tokens: List[str] = []
  874. tokens_trie = Trie()
  875. _decode_use_source_tokenizer = False
  876. def _pre_init(self, original_init, *args, **kwargs):
  877. """
  878. It would be hooked before `__init__` to add specials tokens (arguments of
  879. `__init__` whose name ends with `_token`) as attributes of the tokenizer
  880. instance.
  881. """
  882. init_dict = fn_args_to_dict(original_init, *((self,) + args), **kwargs)
  883. init_dict.pop("self", None)
  884. super(PretrainedTokenizer, self).__init__(**init_dict)
  885. self.added_tokens_decoder: Dict[int, AddedToken] = {}
  886. self.added_tokens_decoder.update(kwargs.pop("added_tokens_decoder", {}))
  887. self.added_tokens_encoder: Dict[str, int] = {
  888. k.content: v for v, k in self.added_tokens_decoder.items()
  889. }
  890. self.unique_no_split_tokens: List[str] = []
  891. self.tokens_trie = Trie()
  892. self._decode_use_source_tokenizer = False
  893. def _build_special_tokens_map_extended(self, **kwargs):
  894. for key, value in kwargs.items():
  895. if value is None:
  896. continue
  897. if key in self.SPECIAL_TOKENS_ATTRIBUTES:
  898. if key == "additional_special_tokens":
  899. assert isinstance(
  900. value, (list, tuple)
  901. ), f"Value {value} is not a list or tuple"
  902. assert all(
  903. isinstance(t, (str, AddedToken)) for t in value
  904. ), "One of the tokens is not a string or an AddedToken"
  905. setattr(self, key, value)
  906. elif isinstance(value, (str, AddedToken)):
  907. setattr(self, key, value)
  908. else:
  909. raise TypeError(
  910. f"special token {key} has to be either str or AddedToken but got: {type(value)}"
  911. )
  912. @property
  913. def vocab_size(self) -> int:
  914. """
  915. `int`: Size of the base vocabulary (without the added tokens).
  916. """
  917. raise NotImplementedError
  918. @property
  919. def is_fast(self) -> bool:
  920. return False
  921. def get_added_vocab(self) -> Dict[str, int]:
  922. """
  923. Returns the added tokens in the vocabulary as a dictionary of token to index.
  924. Returns:
  925. `Dict[str, int]`: The added tokens.
  926. """
  927. return self.added_tokens_encoder
  928. def __len__(self):
  929. """
  930. Size of the full vocabulary with the added tokens.
  931. """
  932. return self.vocab_size + len(self.added_tokens_encoder)
  933. def _add_tokens(
  934. self,
  935. new_tokens: Union[List[str], List[AddedToken]],
  936. special_tokens: bool = False,
  937. ) -> int:
  938. """
  939. Add a list of new tokens to the tokenizer class. If the new tokens are not in the vocabulary, they are added to
  940. it with indices starting from length of the current vocabulary.
  941. Args:
  942. new_tokens (`List[str]`or `List[AddedToken]`):
  943. Token(s) to add in vocabulary. A token is only added if it's not already in the vocabulary (tested by
  944. checking if the tokenizer assign the index of the `unk_token` to them).
  945. special_tokens (`bool`, *optional*, defaults to `False`):
  946. Whether or not the tokens should be added as special tokens.
  947. Returns:
  948. `int`: The number of tokens actually added to the vocabulary.
  949. Examples:
  950. ```python
  951. # Let's see how to increase the vocabulary of Bert model and tokenizer
  952. tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
  953. model = BertModel.from_pretrained("bert-base-uncased")
  954. num_added_toks = tokenizer.add_tokens(["new_tok1", "my_new-tok2"])
  955. print("We have added", num_added_toks, "tokens")
  956. ```"""
  957. new_tokens = [str(tok) for tok in new_tokens]
  958. tokens_to_add = []
  959. for token in new_tokens:
  960. if not isinstance(token, str):
  961. raise TypeError(f"Token {token} is not a string but a {type(token)}.")
  962. if (
  963. not special_tokens
  964. and hasattr(self, "do_lower_case")
  965. and self.do_lower_case
  966. ):
  967. token = token.lower()
  968. if (
  969. token != self.unk_token
  970. and self.convert_tokens_to_ids(token)
  971. == self.convert_tokens_to_ids(self.unk_token)
  972. and token not in tokens_to_add
  973. and token not in self.added_tokens_encoder.keys()
  974. ):
  975. tokens_to_add.append(token)
  976. if self.verbose:
  977. logging.info(f"Adding {token} to the vocabulary")
  978. added_tok_encoder = dict(
  979. (tok, len(self) + i) for i, tok in enumerate(tokens_to_add)
  980. )
  981. added_tok_decoder = {v: k for k, v in added_tok_encoder.items()}
  982. self.added_tokens_encoder.update(added_tok_encoder)
  983. self.added_tokens_decoder.update(added_tok_decoder)
  984. # Make sure we don't split on any special tokens (even they were already in the vocab before e.g. for Albert)
  985. if special_tokens:
  986. if len(new_tokens) == 1:
  987. _insert_one_token_to_ordered_list(
  988. self.unique_no_split_tokens, new_tokens[0]
  989. )
  990. else:
  991. self.unique_no_split_tokens = sorted(
  992. set(self.unique_no_split_tokens).union(set(new_tokens))
  993. )
  994. else:
  995. # Or on the newly added tokens
  996. if len(tokens_to_add) == 1:
  997. _insert_one_token_to_ordered_list(
  998. self.unique_no_split_tokens, tokens_to_add[0]
  999. )
  1000. else:
  1001. self.unique_no_split_tokens = sorted(
  1002. set(self.unique_no_split_tokens).union(set(tokens_to_add))
  1003. )
  1004. self._create_trie(self.unique_no_split_tokens)
  1005. return len(tokens_to_add)
  1006. def _create_trie(self, unique_no_split_tokens):
  1007. trie = Trie()
  1008. for token in unique_no_split_tokens:
  1009. if (
  1010. hasattr(self, "do_lower_case")
  1011. and self.do_lower_case
  1012. and token not in self.all_special_tokens
  1013. ):
  1014. trie.add(token.lower())
  1015. else:
  1016. trie.add(token)
  1017. self.tokens_trie = trie
  1018. def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs):
  1019. """
  1020. Performs any necessary transformations before tokenization.
  1021. This method should pop the arguments from kwargs and return the remaining `kwargs` as well. We test the
  1022. `kwargs` at the end of the encoding process to be sure all the arguments have been used.
  1023. Args:
  1024. text (`str`):
  1025. The text to prepare.
  1026. is_split_into_words (`bool`, *optional*, defaults to `False`):
  1027. Whether or not the input is already pre-tokenized (e.g., split into words). If set to `True`, the
  1028. tokenizer assumes the input is already split into words (for instance, by splitting it on whitespace)
  1029. which it will tokenize. This is useful for NER or token classification.
  1030. kwargs:
  1031. Keyword arguments to use for the tokenization.
  1032. Returns:
  1033. `Tuple[str, Dict[str, Any]]`: The prepared text and the unused kwargs.
  1034. """
  1035. return (text, kwargs)
  1036. def tokenize(self, text: TextInput, **kwargs) -> List[str]:
  1037. """
  1038. Converts a string in a sequence of tokens, using the tokenizer.
  1039. Split in words for word-based vocabulary or sub-words for sub-word-based vocabularies
  1040. (BPE/SentencePieces/WordPieces). Takes care of added tokens.
  1041. Args:
  1042. text (`str`):
  1043. The sequence to be encoded.
  1044. **kwargs (additional keyword arguments):
  1045. Passed along to the model-specific `prepare_for_tokenization` preprocessing method.
  1046. Returns:
  1047. `List[str]`: The list of tokens.
  1048. """
  1049. split_special_tokens = kwargs.pop(
  1050. "split_special_tokens", self.split_special_tokens
  1051. )
  1052. # Simple mapping string => AddedToken for special tokens with specific tokenization behaviors
  1053. all_special_tokens_extended = dict(
  1054. (str(t), t)
  1055. for t in self.all_special_tokens_extended
  1056. if isinstance(t, AddedToken)
  1057. )
  1058. text, kwargs = self.prepare_for_tokenization(text, **kwargs)
  1059. # TODO: should this be in the base class?
  1060. if hasattr(self, "do_lower_case") and self.do_lower_case:
  1061. # convert non-special tokens to lowercase
  1062. escaped_special_toks = [
  1063. re.escape(s_tok)
  1064. for s_tok in (self.unique_no_split_tokens + self.all_special_tokens)
  1065. ]
  1066. pattern = r"(" + r"|".join(escaped_special_toks) + r")|" + r"(.+?)"
  1067. text = re.sub(
  1068. pattern, lambda m: m.groups()[0] or m.groups()[1].lower(), text
  1069. )
  1070. if split_special_tokens:
  1071. no_split_token = []
  1072. tokens = [text]
  1073. else:
  1074. no_split_token = set(
  1075. self.unique_no_split_tokens
  1076. ) # don't split on any of the added tokens
  1077. # "This is something<special_token_1> else"
  1078. tokens = self.tokens_trie.split(text)
  1079. # ["This is something", "<special_token_1>", " else"]
  1080. for i, token in enumerate(tokens):
  1081. if token in no_split_token:
  1082. tok_extended = all_special_tokens_extended.get(token, None)
  1083. left = tokens[i - 1] if i > 0 else None
  1084. right = tokens[i + 1] if i < len(tokens) - 1 else None
  1085. if isinstance(tok_extended, AddedToken):
  1086. if tok_extended.rstrip and right:
  1087. # A bit counter-intuitive but we strip the left of the string
  1088. # since tok_extended.rstrip means the special token is eating all white spaces on its right
  1089. tokens[i + 1] = right.lstrip()
  1090. # Strip white spaces on the left
  1091. if tok_extended.lstrip and left:
  1092. tokens[i - 1] = left.rstrip() # Opposite here
  1093. # ["This is something", "<special_token_1>", "else"]
  1094. tokenized_text = []
  1095. for token in tokens:
  1096. # Need to skip eventual empty (fully stripped) tokens
  1097. if not token:
  1098. continue
  1099. if token in no_split_token:
  1100. tokenized_text.append(token)
  1101. else:
  1102. tokenized_text.extend(self._tokenize(token))
  1103. # ["This", " is", " something", "<special_token_1>", "else"]
  1104. return tokenized_text
  1105. def _tokenize(self, text, **kwargs):
  1106. """
  1107. Converts a string in a sequence of tokens (string), using the tokenizer. Split in words for word-based
  1108. vocabulary or sub-words for sub-word-based vocabularies (BPE/SentencePieces/WordPieces).
  1109. Do NOT take care of added tokens.
  1110. """
  1111. raise NotImplementedError
  1112. def convert_tokens_to_ids(self, tokens):
  1113. if tokens is None:
  1114. return None
  1115. if isinstance(tokens, str):
  1116. return self._convert_token_to_id_with_added_voc(tokens)
  1117. ids = []
  1118. for token in tokens:
  1119. ids.append(self._convert_token_to_id_with_added_voc(token))
  1120. return ids
  1121. def _convert_token_to_id_with_added_voc(self, token):
  1122. if token is None:
  1123. return None
  1124. if token in self.added_tokens_encoder:
  1125. return self.added_tokens_encoder[token]
  1126. return self._convert_token_to_id(token)
  1127. def _convert_token_to_id(self, token):
  1128. return self.vocab.to_indices(token)
  1129. def convert_tokens_to_string(self, tokens):
  1130. """
  1131. Converts a sequence of tokens (list of string) to a single string by
  1132. using ``' '.join(tokens)`` .
  1133. Args:
  1134. tokens (list[str]): A sequence of tokens.
  1135. Returns:
  1136. str: Converted string.
  1137. """
  1138. return " ".join(tokens)
  1139. def convert_ids_to_tokens(self, ids, skip_special_tokens=False):
  1140. if isinstance(ids, int):
  1141. if ids in self.added_tokens_decoder:
  1142. token = self.added_tokens_decoder[ids]
  1143. token = token.content if isinstance(token, AddedToken) else token
  1144. return token
  1145. else:
  1146. return self._convert_id_to_token(ids)
  1147. tokens = []
  1148. for index in ids:
  1149. index = int(index)
  1150. if skip_special_tokens and index in self.all_special_ids:
  1151. continue
  1152. if index in self.added_tokens_decoder:
  1153. token = self.added_tokens_decoder[index]
  1154. token = token.content if isinstance(token, AddedToken) else token
  1155. tokens.append(token)
  1156. else:
  1157. tokens.append(self._convert_id_to_token(index))
  1158. return tokens
  1159. def _convert_id_to_token(self, index):
  1160. return self.vocab.to_tokens(index)
  1161. @staticmethod
  1162. def load_vocabulary(
  1163. filepath,
  1164. unk_token=None,
  1165. pad_token=None,
  1166. bos_token=None,
  1167. eos_token=None,
  1168. **kwargs,
  1169. ):
  1170. """
  1171. Instantiate an instance of `Vocab` from a file reserving all tokens
  1172. by using `Vocab.from_dict`. The file contains a token per line, and the
  1173. line number would be the index of corresponding token.
  1174. Args:
  1175. filepath (str): path of file to construct vocabulary.
  1176. unk_token (str): special token for unknown token. If no need, it also
  1177. could be `None`. Defaults to `None`.
  1178. pad_token (str): special token for padding token. If no need, it also
  1179. could be `None`. Defaults to `None`.
  1180. bos_token (str): special token for bos token. If no need, it also
  1181. could be `None`. Defaults to `None`.
  1182. eos_token (str): special token for eos token. If no need, it also
  1183. could be `None`. Defaults to `None`.
  1184. **kwargs (dict): keyword arguments for `Vocab.from_dict`.
  1185. Returns:
  1186. Vocab: An instance of `Vocab`.
  1187. """
  1188. token_to_idx = {}
  1189. with io.open(filepath, "r", encoding="utf-8") as f:
  1190. for index, line in enumerate(f):
  1191. token = line.rstrip("\n")
  1192. token_to_idx[token] = int(index)
  1193. vocab = Vocab.from_dict(
  1194. token_to_idx,
  1195. unk_token=unk_token,
  1196. pad_token=pad_token,
  1197. bos_token=bos_token,
  1198. eos_token=eos_token,
  1199. **kwargs,
  1200. )
  1201. return vocab
  1202. @staticmethod
  1203. def save_vocabulary(filepath, vocab):
  1204. """
  1205. Save all tokens to a vocabulary file. The file contains a token per line,
  1206. and the line number would be the index of corresponding token.
  1207. Args:
  1208. filepath (str): File path to be saved to.
  1209. vocab (Vocab|dict): The `Vocab` or `dict` instance to be saved.
  1210. """
  1211. if isinstance(vocab, Vocab):
  1212. tokens = vocab.idx_to_token
  1213. else:
  1214. tokens = sorted(vocab.keys(), key=lambda token: vocab[token])
  1215. with io.open(filepath, "w", encoding="utf-8") as f:
  1216. for token in tokens:
  1217. f.write(token + "\n")
  1218. def get_special_tokens_mask(
  1219. self, token_ids_0, token_ids_1=None, already_has_special_tokens=False
  1220. ):
  1221. """
  1222. Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding
  1223. special tokens using the tokenizer ``encode`` methods.
  1224. Args:
  1225. token_ids_0 (List[int]): List of ids of the first sequence.
  1226. token_ids_1 (List[int], optional): List of ids of the second sequence.
  1227. already_has_special_tokens (bool, optional): Whether or not the token list is already
  1228. formatted with special tokens for the model. Defaults to None.
  1229. Returns:
  1230. results (List[int]): The list of integers in the range [0, 1]:
  1231. 1 for a special token, 0 for a sequence token.
  1232. """
  1233. if already_has_special_tokens:
  1234. if token_ids_1 is not None:
  1235. raise ValueError(
  1236. "You should not supply a second sequence if the provided sequence of "
  1237. "ids is already formatted with special tokens for the model."
  1238. )
  1239. return super().get_special_tokens_mask(
  1240. token_ids_0=token_ids_0,
  1241. token_ids_1=token_ids_1,
  1242. already_has_special_tokens=True,
  1243. )
  1244. return [0] * ((len(token_ids_1) if token_ids_1 else 0) + len(token_ids_0))
  1245. def num_special_tokens_to_add(self, pair):
  1246. """
  1247. Returns the number of added tokens when encoding a sequence with special tokens.
  1248. Args:
  1249. pair (bool, optional):
  1250. Whether the number of added tokens should be computed in the case of a sequence pair or a single
  1251. sequence. Defaults to `False`.
  1252. Returns:
  1253. int: Number of special tokens added to sequences.
  1254. """
  1255. token_ids_0 = []
  1256. token_ids_1 = []
  1257. return len(
  1258. self.build_inputs_with_special_tokens(
  1259. token_ids_0, token_ids_1 if pair else None
  1260. )
  1261. )
  1262. def _encode_plus(
  1263. self,
  1264. text: Union[TextInput, PreTokenizedInput, EncodedInput],
  1265. text_pair: Optional[Union[TextInput, PreTokenizedInput, EncodedInput]] = None,
  1266. add_special_tokens: bool = True,
  1267. padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
  1268. truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
  1269. max_length: Optional[int] = None,
  1270. stride: int = 0,
  1271. is_split_into_words: bool = False,
  1272. pad_to_multiple_of: Optional[int] = None,
  1273. padding_side: Optional[Literal["right", "left"]] = None,
  1274. return_tensors: Optional[Union[str, TensorType]] = None,
  1275. return_position_ids: Optional[bool] = None,
  1276. return_token_type_ids: Optional[bool] = None,
  1277. return_attention_mask: Optional[bool] = None,
  1278. return_overflowing_tokens: bool = False,
  1279. return_special_tokens_mask: bool = False,
  1280. return_offsets_mapping: bool = False,
  1281. return_length: bool = False,
  1282. verbose: bool = True,
  1283. **kwargs,
  1284. ) -> BatchEncoding:
  1285. def get_input_ids(text):
  1286. if isinstance(text, str):
  1287. tokens = self.tokenize(text, **kwargs)
  1288. return self.convert_tokens_to_ids(tokens)
  1289. elif (
  1290. isinstance(text, (list, tuple))
  1291. and len(text) > 0
  1292. and isinstance(text[0], str)
  1293. ):
  1294. if is_split_into_words:
  1295. tokens = list(
  1296. itertools.chain(
  1297. *(
  1298. self.tokenize(t, is_split_into_words=True, **kwargs)
  1299. for t in text
  1300. )
  1301. )
  1302. )
  1303. return self.convert_tokens_to_ids(tokens)
  1304. else:
  1305. return self.convert_tokens_to_ids(text)
  1306. elif (
  1307. isinstance(text, (list, tuple))
  1308. and len(text) > 0
  1309. and isinstance(text[0], int)
  1310. ):
  1311. return text
  1312. else:
  1313. if is_split_into_words:
  1314. raise ValueError(
  1315. f"Input {text} is not valid. Should be a string or a list/tuple of strings when `is_split_into_words=True`."
  1316. )
  1317. else:
  1318. raise ValueError(
  1319. f"Input {text} is not valid. Should be a string, a list/tuple of strings or a list/tuple of integers."
  1320. )
  1321. first_ids = get_input_ids(text)
  1322. second_ids = get_input_ids(text_pair) if text_pair is not None else None
  1323. if return_offsets_mapping:
  1324. kwargs["text"] = text
  1325. kwargs["text_pair"] = text_pair
  1326. return self.prepare_for_model(
  1327. first_ids,
  1328. pair_ids=second_ids,
  1329. add_special_tokens=add_special_tokens,
  1330. padding=padding_strategy.value,
  1331. truncation=truncation_strategy.value,
  1332. max_length=max_length,
  1333. stride=stride,
  1334. pad_to_multiple_of=pad_to_multiple_of,
  1335. padding_side=padding_side,
  1336. return_tensors=return_tensors,
  1337. prepend_batch_axis=True,
  1338. return_position_ids=return_position_ids,
  1339. return_attention_mask=return_attention_mask,
  1340. return_token_type_ids=return_token_type_ids,
  1341. return_overflowing_tokens=return_overflowing_tokens,
  1342. return_special_tokens_mask=return_special_tokens_mask,
  1343. return_offsets_mapping=return_offsets_mapping,
  1344. return_length=return_length,
  1345. verbose=verbose,
  1346. **kwargs,
  1347. )
  1348. def _batch_encode_plus(
  1349. self,
  1350. batch_text_or_text_pairs: Union[
  1351. List[TextInput],
  1352. List[TextInputPair],
  1353. List[PreTokenizedInput],
  1354. List[PreTokenizedInputPair],
  1355. List[EncodedInput],
  1356. List[EncodedInputPair],
  1357. ],
  1358. add_special_tokens: bool = True,
  1359. padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
  1360. truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
  1361. max_length: Optional[int] = None,
  1362. stride: int = 0,
  1363. is_split_into_words: bool = False,
  1364. pad_to_multiple_of: Optional[int] = None,
  1365. padding_side: Optional[Literal["right", "left"]] = None,
  1366. return_position_ids: Optional[bool] = None,
  1367. return_tensors: Optional[Union[str, TensorType]] = None,
  1368. return_token_type_ids: Optional[bool] = None,
  1369. return_attention_mask: Optional[bool] = None,
  1370. return_overflowing_tokens: bool = False,
  1371. return_special_tokens_mask: bool = False,
  1372. return_dict: bool = True,
  1373. return_offsets_mapping: bool = False,
  1374. return_length: bool = False,
  1375. verbose: bool = True,
  1376. **kwargs,
  1377. ) -> BatchEncoding:
  1378. def get_input_ids(text):
  1379. if isinstance(text, str):
  1380. tokens = self.tokenize(text, **kwargs)
  1381. return self.convert_tokens_to_ids(tokens)
  1382. elif (
  1383. isinstance(text, (list, tuple))
  1384. and len(text) > 0
  1385. and isinstance(text[0], str)
  1386. ):
  1387. if is_split_into_words:
  1388. tokens = list(
  1389. itertools.chain(
  1390. *(
  1391. self.tokenize(t, is_split_into_words=True, **kwargs)
  1392. for t in text
  1393. )
  1394. )
  1395. )
  1396. return self.convert_tokens_to_ids(tokens)
  1397. else:
  1398. return self.convert_tokens_to_ids(text)
  1399. elif (
  1400. isinstance(text, (list, tuple))
  1401. and len(text) > 0
  1402. and isinstance(text[0], int)
  1403. ):
  1404. return text
  1405. else:
  1406. raise ValueError(
  1407. "Input is not valid. Should be a string, a list/tuple of strings or a list/tuple of integers."
  1408. )
  1409. input_ids = []
  1410. for ids_or_pair_ids in batch_text_or_text_pairs:
  1411. if not isinstance(ids_or_pair_ids, (list, tuple)):
  1412. ids, pair_ids = ids_or_pair_ids, None
  1413. elif is_split_into_words and not isinstance(
  1414. ids_or_pair_ids[0], (list, tuple)
  1415. ):
  1416. ids, pair_ids = ids_or_pair_ids, None
  1417. else:
  1418. ids, pair_ids = ids_or_pair_ids
  1419. first_ids = get_input_ids(ids)
  1420. second_ids = get_input_ids(pair_ids) if pair_ids is not None else None
  1421. input_ids.append((first_ids, second_ids))
  1422. if stride > 0 and second_ids is not None:
  1423. kwargs["batch_text_or_text_pairs"] = batch_text_or_text_pairs
  1424. else:
  1425. if return_offsets_mapping:
  1426. has_pair = False
  1427. if len(batch_text_or_text_pairs) > 0:
  1428. if isinstance(batch_text_or_text_pairs[0], (list, tuple)):
  1429. has_pair = True
  1430. kwargs["texts"] = None
  1431. kwargs["text_pairs"] = None
  1432. if has_pair:
  1433. kwargs["texts"] = [text[0] for text in batch_text_or_text_pairs]
  1434. kwargs["text_pairs"] = [
  1435. text[1] for text in batch_text_or_text_pairs
  1436. ]
  1437. else:
  1438. kwargs["texts"] = [text for text in batch_text_or_text_pairs]
  1439. batch_outputs = self._batch_prepare_for_model(
  1440. input_ids,
  1441. add_special_tokens=add_special_tokens,
  1442. padding_strategy=padding_strategy,
  1443. truncation_strategy=truncation_strategy,
  1444. max_length=max_length,
  1445. stride=stride,
  1446. pad_to_multiple_of=pad_to_multiple_of,
  1447. padding_side=padding_side,
  1448. return_position_ids=return_position_ids,
  1449. return_attention_mask=return_attention_mask,
  1450. return_token_type_ids=return_token_type_ids,
  1451. return_overflowing_tokens=return_overflowing_tokens,
  1452. return_special_tokens_mask=return_special_tokens_mask,
  1453. return_dict=return_dict,
  1454. return_offsets_mapping=return_offsets_mapping,
  1455. return_length=return_length,
  1456. return_tensors=return_tensors,
  1457. verbose=verbose,
  1458. **kwargs,
  1459. )
  1460. return batch_outputs
  1461. def _batch_prepare_for_model(
  1462. self,
  1463. batch_ids_pairs: List[Union[PreTokenizedInputPair, Tuple[List[int], None]]],
  1464. add_special_tokens: bool = True,
  1465. padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
  1466. truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
  1467. max_length: Optional[int] = None,
  1468. stride: int = 0,
  1469. pad_to_multiple_of: Optional[int] = None,
  1470. padding_side: Optional[Literal["right", "left"]] = None,
  1471. return_position_ids: Optional[bool] = None,
  1472. return_tensors: Optional[str] = None,
  1473. return_token_type_ids: Optional[bool] = None,
  1474. return_attention_mask: Optional[bool] = None,
  1475. return_overflowing_tokens: bool = False,
  1476. return_special_tokens_mask: bool = False,
  1477. return_dict: bool = True,
  1478. return_offsets_mapping: bool = False,
  1479. return_length: bool = False,
  1480. verbose: bool = True,
  1481. **kwargs,
  1482. ) -> BatchEncoding:
  1483. """
  1484. Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model. It
  1485. adds special tokens, truncates sequences if overflowing while taking into account the special tokens and
  1486. manages a moving window (with user defined stride) for overflowing tokens
  1487. Args:
  1488. batch_ids_pairs: list of tokenized input ids or input ids pairs
  1489. """
  1490. if return_token_type_ids and not add_special_tokens:
  1491. raise ValueError(
  1492. "Asking to return token_type_ids while setting add_special_tokens to False "
  1493. "results in an undefined behavior. Please set add_special_tokens to True or "
  1494. "set return_token_type_ids to None."
  1495. )
  1496. batch_outputs = {}
  1497. batch_outputs_list = []
  1498. for example_id, (first_ids, second_ids) in enumerate(batch_ids_pairs):
  1499. if stride > 0 and second_ids is not None:
  1500. if return_token_type_ids is None:
  1501. return_token_type_ids = "token_type_ids" in self.model_input_names
  1502. if return_attention_mask is None:
  1503. return_attention_mask = "attention_mask" in self.model_input_names
  1504. max_len_for_pair = (
  1505. max_length
  1506. - len(first_ids)
  1507. - (
  1508. self.num_special_tokens_to_add(pair=True)
  1509. if add_special_tokens
  1510. else 0
  1511. )
  1512. )
  1513. text, text_pair = kwargs["batch_text_or_text_pairs"][example_id]
  1514. token_offset_mapping = self.get_offset_mapping(text)
  1515. token_pair_offset_mapping = self.get_offset_mapping(text_pair)
  1516. offset = 0
  1517. while offset < len(second_ids):
  1518. encoded_inputs = {}
  1519. length = len(second_ids) - offset
  1520. if length > max_len_for_pair:
  1521. length = max_len_for_pair
  1522. ids = first_ids
  1523. pair_ids = second_ids[offset : offset + length]
  1524. pair = bool(pair_ids is not None)
  1525. mapping = token_offset_mapping
  1526. pair_mapping = token_pair_offset_mapping[offset : offset + length]
  1527. if add_special_tokens:
  1528. offset_mapping = self.build_offset_mapping_with_special_tokens(
  1529. mapping, pair_mapping
  1530. )
  1531. sequence = self.build_inputs_with_special_tokens(ids, pair_ids)
  1532. token_type_ids = self.create_token_type_ids_from_sequences(
  1533. ids, pair_ids
  1534. )
  1535. else:
  1536. offset_mapping = mapping + pair_mapping
  1537. sequence = ids + pair_ids if pair else ids
  1538. token_type_ids = [0] * len(ids) + (
  1539. [0] * len(pair_ids) if pair else []
  1540. )
  1541. encoded_inputs["offset_mapping"] = offset_mapping
  1542. # Build output dictionary
  1543. encoded_inputs["input_ids"] = sequence
  1544. if return_token_type_ids:
  1545. encoded_inputs["token_type_ids"] = token_type_ids
  1546. if return_special_tokens_mask:
  1547. if add_special_tokens:
  1548. encoded_inputs["special_tokens_mask"] = (
  1549. self.get_special_tokens_mask(ids, pair_ids)
  1550. )
  1551. else:
  1552. encoded_inputs["special_tokens_mask"] = [0] * len(sequence)
  1553. # Check lengths
  1554. self._eventual_warn_about_too_long_sequence(
  1555. encoded_inputs["input_ids"], max_length, verbose
  1556. )
  1557. if return_position_ids:
  1558. encoded_inputs["position_ids"] = list(
  1559. range(len(encoded_inputs["input_ids"]))
  1560. )
  1561. if return_length:
  1562. encoded_inputs["length"] = len(encoded_inputs["input_ids"])
  1563. encoded_inputs["seq_len"] = encoded_inputs["length"]
  1564. encoded_inputs["overflow_to_sample"] = example_id
  1565. for key, value in encoded_inputs.items():
  1566. if key not in batch_outputs:
  1567. batch_outputs[key] = []
  1568. batch_outputs[key].append(value)
  1569. if offset + length == len(second_ids):
  1570. break
  1571. offset += min(length, stride)
  1572. else:
  1573. if return_offsets_mapping:
  1574. kwargs["text"] = kwargs["texts"][example_id]
  1575. kwargs["text_pair"] = None
  1576. if kwargs["text_pairs"] is not None:
  1577. kwargs["text_pair"] = kwargs["text_pairs"][example_id]
  1578. encoded_inputs = self.prepare_for_model(
  1579. first_ids,
  1580. second_ids,
  1581. add_special_tokens=add_special_tokens,
  1582. padding=PaddingStrategy.DO_NOT_PAD.value, # we pad in batch afterward
  1583. truncation=truncation_strategy.value,
  1584. max_length=max_length,
  1585. stride=stride,
  1586. pad_to_multiple_of=None, # we pad in batch afterward
  1587. padding_side=padding_side, # we pad in batch afterward
  1588. return_position_ids=return_position_ids, # we pad in batch afterward
  1589. return_attention_mask=False, # we pad in batch afterward
  1590. return_token_type_ids=return_token_type_ids,
  1591. return_overflowing_tokens=return_overflowing_tokens,
  1592. return_special_tokens_mask=return_special_tokens_mask,
  1593. return_offsets_mapping=return_offsets_mapping,
  1594. return_length=return_length,
  1595. return_tensors=None, # We convert the whole batch to tensors at the end
  1596. prepend_batch_axis=False,
  1597. verbose=verbose,
  1598. **kwargs,
  1599. )
  1600. for key, value in encoded_inputs.items():
  1601. if key not in batch_outputs:
  1602. batch_outputs[key] = []
  1603. batch_outputs[key].append(value)
  1604. batch_outputs = self.pad(
  1605. batch_outputs,
  1606. padding=padding_strategy.value,
  1607. max_length=max_length,
  1608. pad_to_multiple_of=pad_to_multiple_of,
  1609. padding_side=padding_side,
  1610. return_attention_mask=return_attention_mask,
  1611. )
  1612. if return_dict:
  1613. batch_outputs = BatchEncoding(batch_outputs, tensor_type=return_tensors)
  1614. return batch_outputs
  1615. else:
  1616. for k, v in batch_outputs.items():
  1617. for i in range(len(v)):
  1618. if i >= len(batch_outputs_list):
  1619. batch_outputs_list.append({k: v[i]})
  1620. else:
  1621. batch_outputs_list[i][k] = v[i]
  1622. return batch_outputs_list
  1623. def _get_bert_like_offset_mapping(self, text: str):
  1624. """
  1625. Returns the map of tokens and the start and end index of their start and end character.
  1626. Modified from https://github.com/bojone/bert4keras/blob/master/bert4keras/tokenizers.py#L372
  1627. Args:
  1628. text (str):
  1629. Input text.
  1630. Returns:
  1631. list: The offset map of input text.
  1632. """
  1633. if text is None:
  1634. return None
  1635. split_tokens = self.tokenize(text)
  1636. normalized_text, char_mapping = "", []
  1637. for i, ch in enumerate(text):
  1638. if hasattr(self, "do_lower_case") and self.do_lower_case:
  1639. ch = ch.lower()
  1640. if self.basic_tokenizer.strip_accents is not False:
  1641. ch = unicodedata.normalize("NFD", ch)
  1642. ch = "".join([c for c in ch if unicodedata.category(c) != "Mn"])
  1643. elif self.basic_tokenizer.strip_accents:
  1644. ch = unicodedata.normalize("NFD", ch)
  1645. ch = "".join([c for c in ch if unicodedata.category(c) != "Mn"])
  1646. ch = "".join(
  1647. [
  1648. c
  1649. for c in ch
  1650. if not (ord(c) == 0 or ord(c) == 0xFFFD or _is_control(c))
  1651. ]
  1652. )
  1653. normalized_text += ch
  1654. char_mapping.extend([i] * len(ch))
  1655. text, token_mapping, offset = normalized_text, [], 0
  1656. char_mapping_indexes = []
  1657. for index, token in enumerate(split_tokens):
  1658. if token[:2] == "##":
  1659. token = token[2:]
  1660. if token in self.all_special_tokens:
  1661. token = (
  1662. token.lower()
  1663. if hasattr(self, "do_lower_case") and self.do_lower_case
  1664. else token
  1665. )
  1666. # The greek letter "sigma" has 2 forms of lowercase, σ and ς respectively.
  1667. # When used as a final letter of a word, the final form (ς) is used. Otherwise, the form (σ) is used.
  1668. # https://latin.stackexchange.com/questions/6168/how-and-when-did-we-get-two-forms-of-sigma
  1669. if "σ" in token or "ς" in token:
  1670. start = (
  1671. text[offset:].replace("ς", "σ").index(token.replace("ς", "σ"))
  1672. + offset
  1673. )
  1674. else:
  1675. # try to fix: https://github.com/PaddlePaddle/PaddleNLP/issues/3985
  1676. if token not in text[offset:]:
  1677. # check whether there are consecutive UNK tokens, eg: ['好', '[UNK]', '[UNK]', 'good']
  1678. if (
  1679. index < len(split_tokens) - 1
  1680. and split_tokens[index + 1] in self.all_special_tokens
  1681. ):
  1682. start = offset
  1683. token = " " # only contains one char
  1684. else:
  1685. start = -1
  1686. else:
  1687. start = text[offset:].index(token) + offset
  1688. end = start + len(token)
  1689. char_mapping_indexes.append([start, end])
  1690. if start != -1:
  1691. offset = end
  1692. token_mapping = []
  1693. for index, (start, end) in enumerate(char_mapping_indexes):
  1694. if start == -1:
  1695. # init start
  1696. if index == 0:
  1697. start = 0
  1698. else:
  1699. start = char_mapping_indexes[index - 1][1]
  1700. # init end
  1701. if index == len(char_mapping_indexes) - 1:
  1702. end = len(char_mapping)
  1703. else:
  1704. # next start
  1705. end = char_mapping_indexes[index + 1][0]
  1706. token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1))
  1707. return token_mapping
  1708. def get_offset_mapping(self, text: str, split_tokens: Optional[List[str]] = None):
  1709. """
  1710. Returns the map of tokens and the start and end index of their start and end character.
  1711. Modified from https://github.com/bojone/bert4keras/blob/master/bert4keras/tokenizers.py#L372
  1712. Args:
  1713. text (str):
  1714. Input text.
  1715. split_tokens (Optional[List[str]]):
  1716. the tokens which has been split which can accelerate the operation.
  1717. Returns:
  1718. list: The offset map of input text.
  1719. """
  1720. if text is None:
  1721. return None
  1722. split_tokens = self.tokenize(text)
  1723. # bert-like tokenizer use the old-school code block
  1724. if hasattr(self, "basic_tokenizer") or hasattr(self, "wordpiece_tokenizer"):
  1725. return self._get_bert_like_offset_mapping(text)
  1726. if not split_tokens:
  1727. split_tokens = self.tokenize(text)
  1728. normalized_text, char_mapping = "", []
  1729. for i, ch in enumerate(text):
  1730. normalized_text += normalize_chars(ch)
  1731. char_mapping.extend([i] * len(ch))
  1732. text, token_mapping, offset = normalized_text, [], 0
  1733. do_lower_case = getattr(self, "do_lower_case", False)
  1734. # lower the text if the token is lower-cased
  1735. # keep align with token
  1736. if do_lower_case:
  1737. text = text.lower()
  1738. char_mapping_indexes = []
  1739. for token in split_tokens:
  1740. # convert tokens into original string
  1741. token: str = self.convert_tokens_to_string(token).strip()
  1742. if token in self.all_special_tokens:
  1743. if do_lower_case:
  1744. token = token.lower()
  1745. # The greek letter "sigma" has 2 forms of lowercase, σ and ς respectively.
  1746. # When used as a final letter of a word, the final form (ς) is used. Otherwise, the form (σ) is used.
  1747. # https://latin.stackexchange.com/questions/6168/how-and-when-did-we-get-two-forms-of-sigma
  1748. if "σ" in token or "ς" in token:
  1749. start = (
  1750. text[offset:].replace("ς", "σ").index(token.replace("ς", "σ"))
  1751. + offset
  1752. )
  1753. else:
  1754. # try to fix: https://github.com/PaddlePaddle/PaddleNLP/issues/3985
  1755. if token not in text[offset:]:
  1756. start = -1
  1757. else:
  1758. start = text[offset:].index(token) + offset
  1759. end = start + len(token)
  1760. char_mapping_indexes.append([start, end])
  1761. if start != -1:
  1762. offset = end
  1763. token_mapping = []
  1764. for index, (start, end) in enumerate(char_mapping_indexes):
  1765. if start == -1:
  1766. # init start
  1767. if index == 0:
  1768. start = 0
  1769. else:
  1770. start = char_mapping_indexes[index - 1][1]
  1771. # init end
  1772. if index == len(char_mapping_indexes) - 1:
  1773. end = len(char_mapping)
  1774. else:
  1775. # next start
  1776. end = char_mapping_indexes[index + 1][0]
  1777. token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1))
  1778. return token_mapping
  1779. def _decode(
  1780. self,
  1781. token_ids: List[int],
  1782. skip_special_tokens: bool = False,
  1783. clean_up_tokenization_spaces: bool = True,
  1784. spaces_between_special_tokens: bool = True,
  1785. **kwargs,
  1786. ) -> str:
  1787. if isinstance(token_ids, np.ndarray):
  1788. token_ids = token_ids.tolist()
  1789. self._decode_use_source_tokenizer = kwargs.pop("use_source_tokenizer", False)
  1790. filtered_tokens = self.convert_ids_to_tokens(
  1791. token_ids, skip_special_tokens=skip_special_tokens
  1792. )
  1793. # To avoid mixing byte-level and unicode for byte-level BPT
  1794. # we need to build string separately for added tokens and byte-level tokens
  1795. # cf. https://github.com/huggingface/transformers/issues/1133
  1796. sub_texts = []
  1797. current_sub_text = []
  1798. for token in filtered_tokens:
  1799. if skip_special_tokens and token in self.all_special_ids:
  1800. continue
  1801. if token in self.added_tokens_encoder:
  1802. if current_sub_text:
  1803. sub_texts.append(self.convert_tokens_to_string(current_sub_text))
  1804. current_sub_text = []
  1805. sub_texts.append(token)
  1806. else:
  1807. current_sub_text.append(token)
  1808. if current_sub_text:
  1809. sub_texts.append(self.convert_tokens_to_string(current_sub_text))
  1810. if spaces_between_special_tokens:
  1811. text = " ".join(sub_texts)
  1812. else:
  1813. text = "".join(sub_texts)
  1814. if clean_up_tokenization_spaces:
  1815. clean_text = self.clean_up_tokenization(text)
  1816. return clean_text
  1817. else:
  1818. return text
  1819. def _is_control(char):
  1820. """Checks whether `chars` is a control character."""
  1821. # These are technically control characters but we count them as whitespace
  1822. # characters.
  1823. if char == "\t" or char == "\n" or char == "\r":
  1824. return False
  1825. cat = unicodedata.category(char)
  1826. if cat.startswith("C"):
  1827. return True
  1828. return False
  1829. def _is_punctuation(char):
  1830. """Checks whether `chars` is a punctuation character."""
  1831. cp = ord(char)
  1832. # We treat all non-letter/number ASCII as punctuation.
  1833. # Characters such as "^", "$", and "`" are not in the Unicode
  1834. # Punctuation class but we treat them as punctuation anyways, for
  1835. # consistency.
  1836. if (
  1837. (cp >= 33 and cp <= 47)
  1838. or (cp >= 58 and cp <= 64)
  1839. or (cp >= 91 and cp <= 96)
  1840. or (cp >= 123 and cp <= 126)
  1841. ):
  1842. return True
  1843. cat = unicodedata.category(char)
  1844. if cat.startswith("P"):
  1845. return True
  1846. return False
  1847. def _is_symbol(char):
  1848. """Check whether CP is the codepoint of a Symbol character."""
  1849. cp = ord(char)
  1850. if unicodedata.category(char).startswith("S") or (
  1851. cp in [0x00AD, 0x00B2, 0x00BA, 0x3007, 0x00B5, 0x00D8, 0x014B, 0x01B1]
  1852. ):
  1853. return True
  1854. return False
  1855. def _is_whitespace(char):
  1856. """
  1857. Checks whether `chars` is a whitespace character.
  1858. """
  1859. # \t, \n, and \r are technically control characters but we treat them
  1860. # as whitespace since they are generally considered as such.
  1861. if char == " " or char == "\t" or char == "\n" or char == "\r":
  1862. return True
  1863. cat = unicodedata.category(char)
  1864. if cat == "Zs":
  1865. return True
  1866. return False
  1867. def convert_to_unicode(text):
  1868. """
  1869. Converts `text` to Unicode (if it's not already), assuming utf-8 input.
  1870. Args:
  1871. text (str|bytes): Text to be converted to unicode.
  1872. Returns:
  1873. str: converted text.
  1874. """
  1875. if isinstance(text, str):
  1876. return text
  1877. elif isinstance(text, bytes):
  1878. return text.decode("utf-8", "ignore")
  1879. else:
  1880. raise ValueError("Unsupported string type: %s" % (type(text)))
  1881. def whitespace_tokenize(text):
  1882. """
  1883. Runs basic whitespace cleaning and splitting on a piece of text.
  1884. Args:
  1885. text (str): Text to be tokenized.
  1886. Returns:
  1887. list(str): Token list.
  1888. """
  1889. text = text.strip()
  1890. if not text:
  1891. return []
  1892. tokens = text.split()
  1893. return tokens