| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609 |
- # Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved.
- #
- # Licensed under the Apache License, Version 2.0 (the "License");
- # you may not use this file except in compliance with the License.
- # You may obtain a copy of the License at
- #
- # http://www.apache.org/licenses/LICENSE-2.0
- #
- # Unless required by applicable law or agreed to in writing, software
- # distributed under the License is distributed on an "AS IS" BASIS,
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- # See the License for the specific language governing permissions and
- # limitations under the License.
- import json
- import logging
- import os
- import shutil
- import unicodedata
- from functools import lru_cache
- from typing import List, Optional
- from .tokenizer_utils import (
- PretrainedTokenizer,
- _is_control,
- _is_punctuation,
- _is_whitespace,
- )
- from .tokenizer_utils_base import AddedToken
- __all__ = ["CLIPTokenizer"]
- @lru_cache()
- def bytes_to_unicode():
- """
- Returns list of utf-8 byte and a mapping to unicode strings. We specifically avoids mapping to whitespace/control
- characters the bpe code barfs on.
- The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab
- if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for
- decent coverage. This is a significant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup
- tables between utf-8 bytes and unicode strings.
- """
- bs = (
- list(range(ord("!"), ord("~") + 1))
- + list(range(ord("¡"), ord("¬") + 1))
- + list(range(ord("®"), ord("ÿ") + 1))
- )
- cs = bs[:]
- n = 0
- for b in range(2**8):
- if b not in bs:
- bs.append(b)
- cs.append(2**8 + n)
- n += 1
- cs = [chr(n) for n in cs]
- return dict(zip(bs, cs))
- def get_pairs(word):
- """
- Return set of symbol pairs in a word.
- Word is represented as tuple of symbols (symbols being variable-length strings).
- """
- pairs = set()
- prev_char = word[0]
- for char in word[1:]:
- pairs.add((prev_char, char))
- prev_char = char
- return pairs
- def whitespace_clean(text, re):
- text = re.sub(r"\s+", " ", text)
- text = text.strip()
- return text
- def whitespace_tokenize(text):
- """Runs basic whitespace cleaning and splitting on a piece of text."""
- text = text.strip()
- if not text:
- return []
- tokens = text.split()
- return tokens
- # Copied from transformers.models.bert.tokenization_bert.BasicTokenizer
- class BasicTokenizer(object):
- """
- Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.).
- Args:
- do_lower_case (`bool`, *optional*, defaults to `True`):
- Whether or not to lowercase the input when tokenizing.
- never_split (`Iterable`, *optional*):
- Collection of tokens which will never be split during tokenization. Only has an effect when
- `do_basic_tokenize=True`
- tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
- Whether or not to tokenize Chinese characters.
- This should likely be deactivated for Japanese (see this
- [issue](https://github.com/huggingface/transformers/issues/328)).
- strip_accents (`bool`, *optional*):
- Whether or not to strip all accents. If this option is not specified, then it will be determined by the
- value for `lowercase` (as in the original BERT).
- do_split_on_punc (`bool`, *optional*, defaults to `True`):
- In some instances we want to skip the basic punctuation splitting so that later tokenization can capture
- the full context of the words, such as contractions.
- """
- def __init__(
- self,
- do_lower_case=True,
- never_split=None,
- tokenize_chinese_chars=True,
- strip_accents=None,
- do_split_on_punc=True,
- ):
- if never_split is None:
- never_split = []
- self.do_lower_case = do_lower_case
- self.never_split = set(never_split)
- self.tokenize_chinese_chars = tokenize_chinese_chars
- self.strip_accents = strip_accents
- self.do_split_on_punc = do_split_on_punc
- def tokenize(self, text, never_split=None):
- """
- Basic Tokenization of a piece of text. For sub-word tokenization, see WordPieceTokenizer.
- Args:
- never_split (`List[str]`, *optional*)
- Kept for backward compatibility purposes. Now implemented directly at the base class level (see
- [`PreTrainedTokenizer.tokenize`]) List of token not to split.
- """
- # union() returns a new set by concatenating the two sets.
- never_split = (
- self.never_split.union(set(never_split))
- if never_split
- else self.never_split
- )
- text = self._clean_text(text)
- # This was added on November 1st, 2018 for the multilingual and Chinese
- # models. This is also applied to the English models now, but it doesn't
- # matter since the English models were not trained on any Chinese data
- # and generally don't have any Chinese data in them (there are Chinese
- # characters in the vocabulary because Wikipedia does have some Chinese
- # words in the English Wikipedia.).
- if self.tokenize_chinese_chars:
- text = self._tokenize_chinese_chars(text)
- # prevents treating the same character with different unicode codepoints as different characters
- unicode_normalized_text = unicodedata.normalize("NFC", text)
- orig_tokens = whitespace_tokenize(unicode_normalized_text)
- split_tokens = []
- for token in orig_tokens:
- if token not in never_split:
- if self.do_lower_case:
- token = token.lower()
- if self.strip_accents is not False:
- token = self._run_strip_accents(token)
- elif self.strip_accents:
- token = self._run_strip_accents(token)
- split_tokens.extend(self._run_split_on_punc(token, never_split))
- output_tokens = whitespace_tokenize(" ".join(split_tokens))
- return output_tokens
- def _run_strip_accents(self, text):
- """Strips accents from a piece of text."""
- text = unicodedata.normalize("NFD", text)
- output = []
- for char in text:
- cat = unicodedata.category(char)
- if cat == "Mn":
- continue
- output.append(char)
- return "".join(output)
- def _run_split_on_punc(self, text, never_split=None):
- """Splits punctuation on a piece of text."""
- if not self.do_split_on_punc or (
- never_split is not None and text in never_split
- ):
- return [text]
- chars = list(text)
- i = 0
- start_new_word = True
- output = []
- while i < len(chars):
- char = chars[i]
- if _is_punctuation(char):
- output.append([char])
- start_new_word = True
- else:
- if start_new_word:
- output.append([])
- start_new_word = False
- output[-1].append(char)
- i += 1
- return ["".join(x) for x in output]
- def _tokenize_chinese_chars(self, text):
- """Adds whitespace around any CJK character."""
- output = []
- for char in text:
- cp = ord(char)
- if self._is_chinese_char(cp):
- output.append(" ")
- output.append(char)
- output.append(" ")
- else:
- output.append(char)
- return "".join(output)
- def _is_chinese_char(self, cp):
- """Checks whether CP is the codepoint of a CJK character."""
- # This defines a "chinese character" as anything in the CJK Unicode block:
- # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
- #
- # Note that the CJK Unicode block is NOT all Japanese and Korean characters,
- # despite its name. The modern Korean Hangul alphabet is a different block,
- # as is Japanese Hiragana and Katakana. Those alphabets are used to write
- # space-separated words, so they are not treated specially and handled
- # like the all of the other languages.
- if (
- (cp >= 0x4E00 and cp <= 0x9FFF)
- or (cp >= 0x3400 and cp <= 0x4DBF) #
- or (cp >= 0x20000 and cp <= 0x2A6DF) #
- or (cp >= 0x2A700 and cp <= 0x2B73F) #
- or (cp >= 0x2B740 and cp <= 0x2B81F) #
- or (cp >= 0x2B820 and cp <= 0x2CEAF) #
- or (cp >= 0xF900 and cp <= 0xFAFF)
- or (cp >= 0x2F800 and cp <= 0x2FA1F) #
- ): #
- return True
- return False
- def _clean_text(self, text):
- """Performs invalid character removal and whitespace cleanup on text."""
- output = []
- for char in text:
- cp = ord(char)
- if cp == 0 or cp == 0xFFFD or _is_control(char):
- continue
- if _is_whitespace(char):
- output.append(" ")
- else:
- output.append(char)
- return "".join(output)
- class CLIPTokenizer(PretrainedTokenizer):
- r"""
- Construct a CLIP tokenizer based on byte-level Byte-Pair-Encoding.
- This tokenizer inherits from :class:`~paddlenlp.transformers.gpt.tokenizer.GPTTokenizer`.
- For more information regarding those methods, please refer to this superclass.
- Args:
- vocab_file (str):
- Path to the vocabulary file.
- The vocab file contains a mapping from vocabulary strings to indices.
- merges_file (str):
- Path to the merge file.
- The merge file is used to split the input sentence into "subword" units.
- The vocab file is then used to encode those units as intices.
- errors (str):
- Paradigm to follow when decoding bytes to UTF-8.
- Defaults to `'replace'`.
- max_len (int, optional):
- The maximum value of the input sequence length.
- Defaults to `77`.
- bos_token (str, optional):
- The beginning of sequence token that was used during pretraining. Can be
- used a sequence classifier token.
- Defaults to `"<|startoftext|>"`.
- eos_token (str, optional):
- A special token representing the end of a sequence that was used during pretraining.
- Defaults to `"<|endoftext|>"`.
- unk_token (str, optional):
- A special token representing the *unknown (out-of-vocabulary)* token.
- An unknown token is set to be `unk_token` inorder to be converted to an ID.
- Defaults to `"<|endoftext|>"`.
- pad_token (str, optional):
- A special token used to make arrays of tokens the same size for batching purposes.
- Defaults to `"<|endoftext|>"`.
- Examples:
- .. code-block::
- from paddlenlp.transformers import AutoTokenizer
- tokenizer = AutoTokenizer.from_pretrained('openai/clip-vit-base-patch32')
- print(tokenizer('He was a puppeteer'))
- '''
- {'input_ids': [49406, 797, 739, 320, 7116, 38820, 528, 49407]}
- '''
- """
- # merges and vocab same as GPT2
- resource_files_names = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
- pretrained_resource_files_map = {"vocab_file": {}, "merges_file": {}}
- pretrained_init_configuration = {}
- model_input_names = [
- "input_ids",
- "attention_mask",
- ]
- def __init__(
- self,
- vocab_file,
- merges_file,
- errors="replace",
- max_len=77,
- bos_token="<|startoftext|>",
- eos_token="<|endoftext|>",
- unk_token="<|endoftext|>",
- pad_token="<|endoftext|>",
- **kwargs
- ):
- from paddle.utils import try_import
- bos_token = (
- AddedToken(bos_token, lstrip=False, rstrip=False)
- if isinstance(bos_token, str)
- else bos_token
- )
- eos_token = (
- AddedToken(eos_token, lstrip=False, rstrip=False)
- if isinstance(eos_token, str)
- else eos_token
- )
- unk_token = (
- AddedToken(unk_token, lstrip=False, rstrip=False)
- if isinstance(unk_token, str)
- else unk_token
- )
- pad_token = (
- AddedToken(pad_token, lstrip=False, rstrip=False)
- if isinstance(pad_token, str)
- else pad_token
- )
- self._build_special_tokens_map_extended(
- bos_token=bos_token,
- eos_token=eos_token,
- unk_token=unk_token,
- pad_token=pad_token,
- )
- try:
- import ftfy
- self.fix_text = ftfy.fix_text
- except ImportError:
- logging.info(
- "ftfy or spacy is not installed using custom BasicTokenizer instead of ftfy."
- )
- self.nlp = BasicTokenizer(
- strip_accents=False, do_split_on_punc=False, do_lower_case=True
- )
- self.fix_text = None
- self.re = try_import("regex")
- self._vocab_file = vocab_file
- self._merges_file = merges_file
- self.max_len = max_len if max_len is not None else int(1e12)
- with open(vocab_file, encoding="utf-8") as vocab_handle:
- self.encoder = json.load(vocab_handle)
- self.decoder = {v: k for k, v in self.encoder.items()}
- self.errors = errors # how to handle errors in decoding
- self.byte_encoder = bytes_to_unicode()
- self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
- with open(merges_file, encoding="utf-8") as merges_handle:
- bpe_merges = (
- merges_handle.read().strip().split("\n")[1 : 49152 - 256 - 2 + 1]
- )
- bpe_merges = [tuple(merge.split()) for merge in bpe_merges]
- self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))
- self.cache = {
- "<|startoftext|>": "<|startoftext|>",
- "<|endoftext|>": "<|endoftext|>",
- }
- self.pat = self.re.compile(
- r"""<\|startoftext\|>|<\|endoftext\|>|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""",
- self.re.IGNORECASE,
- )
- @property
- def vocab_size(self):
- """
- Returns the size of vocabulary.
- Returns:
- int: The sum of size of vocabulary and the size of speical tokens.
- """
- return len(self.encoder)
- def get_vocab(self):
- return dict(self.encoder, **self.added_tokens_encoder)
- def build_inputs_with_special_tokens(
- self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
- ) -> List[int]:
- """
- Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
- adding special tokens. A CLIP sequence has the following format:
- - single sequence: `<|startoftext|> X <|endoftext|>`
- Pairs of sequences are not the expected use case, but they will be handled without a separator.
- Args:
- token_ids_0 (`List[int]`):
- List of IDs to which the special tokens will be added.
- token_ids_1 (`List[int]`, *optional*):
- Optional second list of IDs for sequence pairs.
- Returns:
- `List[int]`: List of input IDs with the appropriate special tokens.
- """
- bos_token = [self.bos_token_id]
- eos_token = [self.eos_token_id]
- if token_ids_1 is None:
- return bos_token + token_ids_0 + eos_token
- return bos_token + token_ids_0 + eos_token + eos_token + token_ids_1 + eos_token
- def build_offset_mapping_with_special_tokens(
- self, offset_mapping_0, offset_mapping_1=None
- ):
- """
- Build offset map from a pair of offset map by concatenating and adding offsets of special tokens.
- Should be overridden in a subclass if the model has a special way of building those.
- Args:
- offset_mapping_0 (List[tuple]):
- List of char offsets to which the special tokens will be added.
- offset_mapping_1 (List[tuple], optional):
- Optional second list of char offsets for offset mapping pairs.
- Returns:
- List[tuple]: List of char offsets with the appropriate offsets of special tokens.
- """
- if offset_mapping_1 is None:
- return [(0, 0)] + offset_mapping_0 + [(0, 0)]
- return (
- [(0, 0)] + offset_mapping_0 + [(0, 0), (0, 0)] + offset_mapping_1 + [(0, 0)]
- )
- def get_special_tokens_mask(
- self, token_ids_0, token_ids_1=None, already_has_special_tokens=False
- ):
- """
- Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
- special tokens using the tokenizer `prepare_for_model` method.
- Args:
- token_ids_0 (`List[int]`):
- List of IDs.
- token_ids_1 (`List[int]`, *optional*):
- Optional second list of IDs for sequence pairs.
- already_has_special_tokens (`bool`, *optional*, defaults to `False`):
- Whether or not the token list is already formatted with special tokens for the model.
- Returns:
- `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
- """
- if already_has_special_tokens:
- return super().get_special_tokens_mask(
- token_ids_0=token_ids_0,
- token_ids_1=token_ids_1,
- already_has_special_tokens=True,
- )
- if token_ids_1 is None:
- return [1] + ([0] * len(token_ids_0)) + [1]
- return (
- [1] + ([0] * len(token_ids_0)) + [1] + [1] + ([0] * len(token_ids_1)) + [1]
- )
- def create_token_type_ids_from_sequences(
- self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
- ) -> List[int]:
- """
- Create a mask from the two sequences passed. CLIP does not make use of token type ids, therefore a list of
- zeros is returned.
- Args:
- token_ids_0 (`List[int]`):
- List of IDs.
- token_ids_1 (`List[int]`, *optional*):
- Optional second list of IDs for sequence pairs.
- Returns:
- `List[int]`: List of zeros.
- """
- bos_token = [self.bos_token_id]
- eos_token = [self.eos_token_id]
- if token_ids_1 is None:
- return len(bos_token + token_ids_0 + eos_token) * [0]
- return len(
- bos_token + token_ids_0 + eos_token + eos_token + token_ids_1 + eos_token
- ) * [0]
- def bpe(self, token):
- if token in self.cache:
- return self.cache[token]
- word = tuple(token[:-1]) + (token[-1] + "</w>",)
- pairs = get_pairs(word)
- if not pairs:
- return token + "</w>"
- while True:
- bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
- if bigram not in self.bpe_ranks:
- break
- first, second = bigram
- new_word = []
- i = 0
- while i < len(word):
- try:
- j = word.index(first, i)
- except ValueError:
- new_word.extend(word[i:])
- break
- else:
- new_word.extend(word[i:j])
- i = j
- if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
- new_word.append(first + second)
- i += 2
- else:
- new_word.append(word[i])
- i += 1
- new_word = tuple(new_word)
- word = new_word
- if len(word) == 1:
- break
- else:
- pairs = get_pairs(word)
- word = " ".join(word)
- self.cache[token] = word
- return word
- def _tokenize(self, text):
- """Tokenize a string."""
- bpe_tokens = []
- if self.fix_text is None:
- text = " ".join(self.nlp.tokenize(text))
- else:
- text = whitespace_clean(self.fix_text(text), self.re).lower()
- for token in self.re.findall(self.pat, text):
- token = "".join(
- self.byte_encoder[b] for b in token.encode("utf-8")
- ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
- bpe_tokens.extend(bpe_token for bpe_token in self.bpe(token).split(" "))
- return bpe_tokens
- def _convert_token_to_id(self, token):
- """Converts a token (str) in an id using the vocab."""
- return self.encoder.get(token, self.encoder.get(self.unk_token))
- def _convert_id_to_token(self, index):
- """Converts an index (integer) in a token (str) using the vocab."""
- return self.decoder.get(index)
- def convert_tokens_to_string(self, tokens):
- """Converts a sequence of tokens (string) in a single string."""
- text = "".join(tokens)
- byte_array = bytearray([self.byte_decoder[c] for c in text])
- text = (
- byte_array.decode("utf-8", errors=self.errors).replace("</w>", " ").strip()
- )
- return text
- def save_resources(self, save_directory):
- """
- Saves `SentencePiece <https://github.com/google/sentencepiece>`__ file
- (ends with '.spm') under `save_directory`.
- Args:
- save_directory (str): Directory to save files into.
- """
- for name, file_name in self.resource_files_names.items():
- source_path = getattr(self, "_%s" % name)
- save_path = os.path.join(save_directory, file_name)
- if os.path.abspath(source_path) != os.path.abspath(save_path):
- shutil.copyfile(source_path, save_path)
|