| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253 |
- # Copyright (c) 2025 PaddlePaddle Authors. All Rights Reserved.
- #
- # Licensed under the Apache License, Version 2.0 (the "License");
- # you may not use this file except in compliance with the License.
- # You may obtain a copy of the License at
- #
- # http://www.apache.org/licenses/LICENSE-2.0
- #
- # Unless required by applicable law or agreed to in writing, software
- # distributed under the License is distributed on an "AS IS" BASIS,
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- # See the License for the specific language governing permissions and
- # limitations under the License.
- import os
- from shutil import copyfile
- from typing import List, Optional, Tuple
- from paddlex.inference.models.common.tokenizer.tokenizer_utils import (
- PretrainedTokenizer,
- )
- class LlamaTokenizer(PretrainedTokenizer):
- model_input_names = ["input_ids", "attention_mask", "position_ids"]
- resource_files_names = {
- "vocab_file": "sentencepiece.bpe.model",
- }
- pretrained_resource_files_map = {
- "vocab_file": {
- "__internal_testing__/micro-random-llama": "https://bj.bcebos.com/paddlenlp/models/transformers/llama/sentencepiece.bpe.model",
- "__internal_testing__/tiny-random-llama": "https://bj.bcebos.com/paddlenlp/models/transformers/llama/sentencepiece.bpe.model",
- "facebook/llama-7b": "https://bj.bcebos.com/paddlenlp/models/transformers/llama/sentencepiece.bpe.model",
- "facebook/llama-13b": "https://bj.bcebos.com/paddlenlp/models/transformers/llama/sentencepiece.bpe.model",
- "facebook/llama-30b": "https://bj.bcebos.com/paddlenlp/models/transformers/llama/sentencepiece.bpe.model",
- "facebook/llama-65b": "https://bj.bcebos.com/paddlenlp/models/transformers/llama/sentencepiece.bpe.model",
- },
- }
- pretrained_init_configuration = {
- "__internal_testing__/micro-random-llama": {},
- "__internal_testing__/tiny-random-llama": {},
- "facebook/llama-7b": {},
- "facebook/llama-13b": {},
- "facebook/llama-30b": {},
- "facebook/llama-65b": {},
- }
- padding_side = "left"
- def __init__(
- self,
- vocab_file,
- unk_token="<unk>",
- bos_token="<s>",
- eos_token="</s>",
- add_bos_token=True,
- add_eos_token=False,
- sp_model_kwargs=None,
- decode_with_prefix_space=False,
- **kwargs,
- ):
- self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
- super().__init__(
- bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, **kwargs
- )
- self.vocab_file = vocab_file
- self.add_bos_token = add_bos_token
- self.add_eos_token = add_eos_token
- self.decode_with_prefix_space = decode_with_prefix_space
- self.sp_model = self.get_spm_processor(kwargs.pop("from_slow", True))
- @property
- def vocab_size(self):
- """Returns vocab size"""
- return self.sp_model.get_piece_size()
- def __len__(self):
- """
- Returns the vocabulary size. added_tokens_encoder has to be added in the sp_model
- """
- added_size = 0
- for id in self.added_tokens_decoder:
- if id >= self.sp_model.get_piece_size():
- added_size += 1
- return self.vocab_size + added_size
- @property
- def bos_token_id(self) -> Optional[int]:
- return self.sp_model.bos_id()
- @property
- def eos_token_id(self) -> Optional[int]:
- return self.sp_model.eos_id()
- def get_spm_processor(self, from_slow=True):
- import sentencepiece as spm
- from sentencepiece import sentencepiece_model_pb2 as model_pb2
- tokenizer = spm.SentencePieceProcessor(**self.sp_model_kwargs)
- if from_slow: # no dependency on protobuf
- tokenizer.Load(self.vocab_file)
- return tokenizer
- with open(self.vocab_file, "rb") as f:
- sp_model = f.read()
- model = model_pb2.ModelProto.FromString(sp_model)
- normalizer_spec = model_pb2.NormalizerSpec()
- normalizer_spec.add_dummy_prefix = False
- model.normalizer_spec.MergeFrom(normalizer_spec)
- sp_model = model.SerializeToString()
- tokenizer.LoadFromSerializedProto(sp_model)
- return tokenizer
- def get_vocab(self):
- """Returns vocab as a dict"""
- vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
- vocab.update(self.added_tokens_encoder)
- return vocab
- def _tokenize(self, text):
- """Returns a tokenized string."""
- return self.sp_model.encode(text, out_type=str)
- def _convert_token_to_id(self, token):
- """Converts a token (str) in an id using the vocab."""
- return self.sp_model.piece_to_id(token)
- def _convert_id_to_token(self, index):
- """Converts an index (integer) in a token (str) using the vocab."""
- token = self.sp_model.id_to_piece(index)
- return token
- def convert_tokens_to_string(self, tokens):
- """Converts a sequence of tokens (string) in a single string."""
- current_sub_tokens = []
- out_string = ""
- prev_is_special = False
- for i, token in enumerate(tokens):
- # make sure that special tokens are not decoded using sentencepiece model
- if token in self.all_special_tokens:
- if not prev_is_special and i != 0:
- out_string += " "
- out_string += self.sp_model.decode(current_sub_tokens) + token
- prev_is_special = True
- current_sub_tokens = []
- else:
- current_sub_tokens.append(token)
- prev_is_special = False
- out_string += self.sp_model.decode(current_sub_tokens)
- return out_string
- def save_vocabulary(
- self, save_directory, filename_prefix: Optional[str] = None
- ) -> Tuple[str]:
- """
- Save the vocabulary and special tokens file to a directory.
- Args:
- save_directory (`str`):
- The directory in which to save the vocabulary.
- Returns:
- `Tuple(str)`: Paths to the files saved.
- """
- if not os.path.isdir(save_directory):
- raise ValueError(
- f"Vocabulary path ({save_directory}) should be a directory"
- )
- out_vocab_file = os.path.join(
- save_directory,
- (filename_prefix + "-" if filename_prefix else "")
- + self.resource_files_names["vocab_file"],
- )
- if os.path.abspath(self.vocab_file) != os.path.abspath(
- out_vocab_file
- ) and os.path.isfile(self.vocab_file):
- copyfile(self.vocab_file, out_vocab_file)
- elif not os.path.isfile(self.vocab_file):
- with open(out_vocab_file, "wb") as fi:
- content_spiece_model = self.sp_model.serialized_model_proto()
- fi.write(content_spiece_model)
- return (out_vocab_file,)
- def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
- if self.add_bos_token:
- bos_token_ids = [self.bos_token_id]
- else:
- bos_token_ids = []
- output = bos_token_ids + token_ids_0
- if token_ids_1 is not None:
- output = output + token_ids_1
- if self.add_eos_token:
- output = output + [self.eos_token_id]
- return output
- def get_special_tokens_mask(
- self,
- token_ids_0: List[int],
- token_ids_1: Optional[List[int]] = None,
- already_has_special_tokens: bool = False,
- ) -> List[int]:
- """
- Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
- special tokens using the tokenizer `prepare_for_model` method.
- Args:
- token_ids_0 (`List[int]`):
- List of IDs.
- token_ids_1 (`List[int]`, *optional*):
- Optional second list of IDs for sequence pairs.
- already_has_special_tokens (`bool`, *optional*, defaults to `False`):
- Whether or not the token list is already formatted with special tokens for the model.
- Returns:
- `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
- """
- if already_has_special_tokens:
- return super().get_special_tokens_mask(
- token_ids_0=token_ids_0,
- token_ids_1=token_ids_1,
- already_has_special_tokens=True,
- )
- if token_ids_1 is None:
- return [1] + ([0] * len(token_ids_0)) + [1]
- return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1]
- def create_token_type_ids_from_sequences(
- self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
- ) -> List[int]:
- """
- Create a mask from the two sequences passed to be used in a sequence-pair classification task. T5 does not make
- use of token type ids, therefore a list of zeros is returned.
- Args:
- token_ids_0 (`List[int]`):
- List of IDs.
- token_ids_1 (`List[int]`, *optional*):
- Optional second list of IDs for sequence pairs.
- Returns:
- `List[int]`: List of zeros.
- """
- eos = [self.eos_token_id]
- if token_ids_1 is None:
- return len(token_ids_0 + eos) * [0]
- return len(token_ids_0 + eos + token_ids_1 + eos) * [0]
|