浏览代码

Add yolow; DON'T merge!! ; WAIT for rc1 (#3258)

* fix reviewd docs

* add yolow
Zhang Zelun 8 月之前
父节点
当前提交
74d316ad95

+ 10 - 1
docs/module_usage/tutorials/cv_modules/open_vocabulary_detection.en.md

@@ -26,7 +26,16 @@ Open-vocabulary object detection is an advanced object detection technology aime
 <td>253.72</td>
 <td>1807.4</td>
 <td>658.3</td>
-<td rowspan="3">This is an open-vocabulary object detection model trained on the O365, GoldG, and Cap4M datasets. It uses Bert for text encoding and DINO for the visual model, with additional cross-modal fusion modules, achieving good performance in open-vocabulary object detection.</td>
+<td rowspan="1">This is an open-vocabulary object detection model trained on the O365, GoldG, and Cap4M datasets. It uses Bert for text encoding and DINO for the visual model, with additional cross-modal fusion modules, achieving good performance in open-vocabulary object detection.</td>
+</tr>
+<tr>
+<td>YOLO-Worldv2-L</td><td><a href="https://paddle-model-ecology.bj.bcebos.com/paddlex/official_inference_model/paddle3.0rc0/YOLO-Worldv2-L_infer.tar">Inference Model</a></td>
+<td>44.4</td>
+<td>59.8</td>
+<td>24.32</td>
+<td>374.89</td>
+<td>421.4</td>
+<td rowspan="1">This is an open-vocabulary object detection model trained on the O365 and GoldG datasets. It uses CLIP for text encoding and YOLOv8 for the visual model, with additional light-weight cross-modal fusion modules, achieving a good balance between speed and performance.</td>
 </tr>
 </table>
 

+ 10 - 1
docs/module_usage/tutorials/cv_modules/open_vocabulary_detection.md

@@ -27,7 +27,16 @@ comments: true
 <td>253.72</td>
 <td>1807.4</td>
 <td>658.3</td>
-<td rowspan="3">基于O365,GoldG,Cap4M三个数据集训练的开放词汇目标目标检测模型。文本编码器采用Bert,视觉模型部份整体采用DINO,额外设计了一些跨模态融合模块,在开放词汇目标检测领域取得了较好的效果。</td>
+<td rowspan="1">基于O365,GoldG,Cap4M三个数据集训练的开放词汇目标目标检测模型。文本编码器采用Bert,视觉模型部份整体采用DINO,额外设计了一些跨模态融合模块,在开放词汇目标检测领域取得了较好的效果。</td>
+</tr>
+<tr>
+<td>YOLO-Worldv2-L</td><td><a href="https://paddle-model-ecology.bj.bcebos.com/paddlex/official_inference_model/paddle3.0rc0/YOLO-Worldv2-L_infer.tar">推理模型</a></td>
+<td>44.4</td>
+<td>59.8</td>
+<td>24.32</td>
+<td>374.89</td>
+<td>421.4</td>
+<td rowspan="1">基于O365,GoldG两个数据集训练的开放词汇目标目标检测模型。文本编码器采用CLIP,视觉模型部份整体采用YOLOv8,额外设计了一些轻量化的跨模态融合模块,在精度和速度取得了较好的均衡。</td>
 </tr>
 </table>
 

+ 13 - 0
paddlex/configs/modules/open_vocabulary_detection/YOLO-Worldv2-L.yaml

@@ -0,0 +1,13 @@
+Global:
+  model: YOLO-Worldv2-L
+  mode: predict # only support predict
+  device: gpu:0
+  output: "output"
+
+Predict:
+  batch_size: 1
+  model_dir: "/ChartQA/PaddleX_refactor/PaddleX/_zzl_test_yolow/YOLO-Worldv2-L"
+  input: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/open_vocabulary_detection.jpg"
+  prompt: "bus, car, rearview mirror"
+  kernel_option:
+    run_mode: paddle

+ 1 - 0
paddlex/inference/models/common/tokenizer/__init__.py

@@ -15,3 +15,4 @@
 from .tokenizer_utils import PretrainedTokenizer
 from .gpt_tokenizer import GPTTokenizer
 from .bert_tokenizer import BertTokenizer
+from .clip_tokenizer import CLIPTokenizer

+ 606 - 0
paddlex/inference/models/common/tokenizer/clip_tokenizer.py

@@ -0,0 +1,606 @@
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import json
+import os
+import shutil
+import logging
+import unicodedata
+from functools import lru_cache
+from typing import List, Optional
+
+from paddle.utils import try_import
+
+from .tokenizer_utils_base import AddedToken
+from .tokenizer_utils import PretrainedTokenizer
+from .tokenizer_utils import _is_control, _is_punctuation, _is_whitespace
+
+__all__ = ["CLIPTokenizer"]
+
+
+@lru_cache()
+def bytes_to_unicode():
+    """
+    Returns list of utf-8 byte and a mapping to unicode strings. We specifically avoids mapping to whitespace/control
+    characters the bpe code barfs on.
+
+    The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab
+    if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for
+    decent coverage. This is a significant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup
+    tables between utf-8 bytes and unicode strings.
+    """
+    bs = (
+        list(range(ord("!"), ord("~") + 1))
+        + list(range(ord("¡"), ord("¬") + 1))
+        + list(range(ord("®"), ord("ÿ") + 1))
+    )
+    cs = bs[:]
+    n = 0
+    for b in range(2**8):
+        if b not in bs:
+            bs.append(b)
+            cs.append(2**8 + n)
+            n += 1
+    cs = [chr(n) for n in cs]
+    return dict(zip(bs, cs))
+
+
+def get_pairs(word):
+    """
+    Return set of symbol pairs in a word.
+
+    Word is represented as tuple of symbols (symbols being variable-length strings).
+    """
+    pairs = set()
+    prev_char = word[0]
+    for char in word[1:]:
+        pairs.add((prev_char, char))
+        prev_char = char
+    return pairs
+
+
+def whitespace_clean(text, re):
+    text = re.sub(r"\s+", " ", text)
+    text = text.strip()
+    return text
+
+
+def whitespace_tokenize(text):
+    """Runs basic whitespace cleaning and splitting on a piece of text."""
+    text = text.strip()
+    if not text:
+        return []
+    tokens = text.split()
+    return tokens
+
+
+# Copied from transformers.models.bert.tokenization_bert.BasicTokenizer
+class BasicTokenizer(object):
+    """
+    Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.).
+
+    Args:
+        do_lower_case (`bool`, *optional*, defaults to `True`):
+            Whether or not to lowercase the input when tokenizing.
+        never_split (`Iterable`, *optional*):
+            Collection of tokens which will never be split during tokenization. Only has an effect when
+            `do_basic_tokenize=True`
+        tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
+            Whether or not to tokenize Chinese characters.
+
+            This should likely be deactivated for Japanese (see this
+            [issue](https://github.com/huggingface/transformers/issues/328)).
+        strip_accents (`bool`, *optional*):
+            Whether or not to strip all accents. If this option is not specified, then it will be determined by the
+            value for `lowercase` (as in the original BERT).
+        do_split_on_punc (`bool`, *optional*, defaults to `True`):
+            In some instances we want to skip the basic punctuation splitting so that later tokenization can capture
+            the full context of the words, such as contractions.
+    """
+
+    def __init__(
+        self,
+        do_lower_case=True,
+        never_split=None,
+        tokenize_chinese_chars=True,
+        strip_accents=None,
+        do_split_on_punc=True,
+    ):
+        if never_split is None:
+            never_split = []
+        self.do_lower_case = do_lower_case
+        self.never_split = set(never_split)
+        self.tokenize_chinese_chars = tokenize_chinese_chars
+        self.strip_accents = strip_accents
+        self.do_split_on_punc = do_split_on_punc
+
+    def tokenize(self, text, never_split=None):
+        """
+        Basic Tokenization of a piece of text. For sub-word tokenization, see WordPieceTokenizer.
+
+        Args:
+            never_split (`List[str]`, *optional*)
+                Kept for backward compatibility purposes. Now implemented directly at the base class level (see
+                [`PreTrainedTokenizer.tokenize`]) List of token not to split.
+        """
+        # union() returns a new set by concatenating the two sets.
+        never_split = (
+            self.never_split.union(set(never_split))
+            if never_split
+            else self.never_split
+        )
+        text = self._clean_text(text)
+
+        # This was added on November 1st, 2018 for the multilingual and Chinese
+        # models. This is also applied to the English models now, but it doesn't
+        # matter since the English models were not trained on any Chinese data
+        # and generally don't have any Chinese data in them (there are Chinese
+        # characters in the vocabulary because Wikipedia does have some Chinese
+        # words in the English Wikipedia.).
+        if self.tokenize_chinese_chars:
+            text = self._tokenize_chinese_chars(text)
+        # prevents treating the same character with different unicode codepoints as different characters
+        unicode_normalized_text = unicodedata.normalize("NFC", text)
+        orig_tokens = whitespace_tokenize(unicode_normalized_text)
+        split_tokens = []
+        for token in orig_tokens:
+            if token not in never_split:
+                if self.do_lower_case:
+                    token = token.lower()
+                    if self.strip_accents is not False:
+                        token = self._run_strip_accents(token)
+                elif self.strip_accents:
+                    token = self._run_strip_accents(token)
+            split_tokens.extend(self._run_split_on_punc(token, never_split))
+
+        output_tokens = whitespace_tokenize(" ".join(split_tokens))
+        return output_tokens
+
+    def _run_strip_accents(self, text):
+        """Strips accents from a piece of text."""
+        text = unicodedata.normalize("NFD", text)
+        output = []
+        for char in text:
+            cat = unicodedata.category(char)
+            if cat == "Mn":
+                continue
+            output.append(char)
+        return "".join(output)
+
+    def _run_split_on_punc(self, text, never_split=None):
+        """Splits punctuation on a piece of text."""
+        if not self.do_split_on_punc or (
+            never_split is not None and text in never_split
+        ):
+            return [text]
+        chars = list(text)
+        i = 0
+        start_new_word = True
+        output = []
+        while i < len(chars):
+            char = chars[i]
+            if _is_punctuation(char):
+                output.append([char])
+                start_new_word = True
+            else:
+                if start_new_word:
+                    output.append([])
+                start_new_word = False
+                output[-1].append(char)
+            i += 1
+
+        return ["".join(x) for x in output]
+
+    def _tokenize_chinese_chars(self, text):
+        """Adds whitespace around any CJK character."""
+        output = []
+        for char in text:
+            cp = ord(char)
+            if self._is_chinese_char(cp):
+                output.append(" ")
+                output.append(char)
+                output.append(" ")
+            else:
+                output.append(char)
+        return "".join(output)
+
+    def _is_chinese_char(self, cp):
+        """Checks whether CP is the codepoint of a CJK character."""
+        # This defines a "chinese character" as anything in the CJK Unicode block:
+        #   https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
+        #
+        # Note that the CJK Unicode block is NOT all Japanese and Korean characters,
+        # despite its name. The modern Korean Hangul alphabet is a different block,
+        # as is Japanese Hiragana and Katakana. Those alphabets are used to write
+        # space-separated words, so they are not treated specially and handled
+        # like the all of the other languages.
+        if (
+            (cp >= 0x4E00 and cp <= 0x9FFF)
+            or (cp >= 0x3400 and cp <= 0x4DBF)  #
+            or (cp >= 0x20000 and cp <= 0x2A6DF)  #
+            or (cp >= 0x2A700 and cp <= 0x2B73F)  #
+            or (cp >= 0x2B740 and cp <= 0x2B81F)  #
+            or (cp >= 0x2B820 and cp <= 0x2CEAF)  #
+            or (cp >= 0xF900 and cp <= 0xFAFF)
+            or (cp >= 0x2F800 and cp <= 0x2FA1F)  #
+        ):  #
+            return True
+
+        return False
+
+    def _clean_text(self, text):
+        """Performs invalid character removal and whitespace cleanup on text."""
+        output = []
+        for char in text:
+            cp = ord(char)
+            if cp == 0 or cp == 0xFFFD or _is_control(char):
+                continue
+            if _is_whitespace(char):
+                output.append(" ")
+            else:
+                output.append(char)
+        return "".join(output)
+
+
+class CLIPTokenizer(PretrainedTokenizer):
+    r"""
+    Construct a CLIP tokenizer based on byte-level Byte-Pair-Encoding.
+
+    This tokenizer inherits from :class:`~paddlenlp.transformers.gpt.tokenizer.GPTTokenizer`.
+    For more information regarding those methods, please refer to this superclass.
+
+    Args:
+        vocab_file (str):
+            Path to the vocabulary file.
+            The vocab file contains a mapping from vocabulary strings to indices.
+        merges_file (str):
+            Path to the merge file.
+            The merge file is used to split the input sentence into "subword" units.
+            The vocab file is then used to encode those units as intices.
+        errors (str):
+            Paradigm to follow when decoding bytes to UTF-8.
+            Defaults to `'replace'`.
+        max_len (int, optional):
+            The maximum value of the input sequence length.
+            Defaults to `77`.
+        bos_token (str, optional):
+            The beginning of sequence token that was used during pretraining. Can be
+            used a sequence classifier token.
+            Defaults to `"<|startoftext|>"`.
+        eos_token (str, optional):
+            A special token representing the end of a sequence that was used during pretraining.
+            Defaults to `"<|endoftext|>"`.
+        unk_token (str, optional):
+            A special token representing the *unknown (out-of-vocabulary)* token.
+            An unknown token is set to be `unk_token` inorder to be converted to an ID.
+            Defaults to `"<|endoftext|>"`.
+        pad_token (str, optional):
+            A special token used to make arrays of tokens the same size for batching purposes.
+            Defaults to `"<|endoftext|>"`.
+
+    Examples:
+        .. code-block::
+
+            from paddlenlp.transformers import AutoTokenizer
+
+            tokenizer = AutoTokenizer.from_pretrained('openai/clip-vit-base-patch32')
+            print(tokenizer('He was a puppeteer'))
+
+            '''
+            {'input_ids': [49406, 797, 739, 320, 7116, 38820, 528, 49407]}
+            '''
+
+    """
+
+    # merges and vocab same as GPT2
+    resource_files_names = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
+    pretrained_resource_files_map = {"vocab_file": {}, "merges_file": {}}
+    pretrained_init_configuration = {}
+    model_input_names = [
+        "input_ids",
+        "attention_mask",
+    ]
+
+    def __init__(
+        self,
+        vocab_file,
+        merges_file,
+        errors="replace",
+        max_len=77,
+        bos_token="<|startoftext|>",
+        eos_token="<|endoftext|>",
+        unk_token="<|endoftext|>",
+        pad_token="<|endoftext|>",
+        **kwargs
+    ):
+
+        bos_token = (
+            AddedToken(bos_token, lstrip=False, rstrip=False)
+            if isinstance(bos_token, str)
+            else bos_token
+        )
+        eos_token = (
+            AddedToken(eos_token, lstrip=False, rstrip=False)
+            if isinstance(eos_token, str)
+            else eos_token
+        )
+        unk_token = (
+            AddedToken(unk_token, lstrip=False, rstrip=False)
+            if isinstance(unk_token, str)
+            else unk_token
+        )
+        pad_token = (
+            AddedToken(pad_token, lstrip=False, rstrip=False)
+            if isinstance(pad_token, str)
+            else pad_token
+        )
+
+        self._build_special_tokens_map_extended(
+            bos_token=bos_token,
+            eos_token=eos_token,
+            unk_token=unk_token,
+            pad_token=pad_token,
+        )
+
+        try:
+            import ftfy
+
+            self.fix_text = ftfy.fix_text
+        except ImportError:
+            logging.info(
+                "ftfy or spacy is not installed using custom BasicTokenizer instead of ftfy."
+            )
+            self.nlp = BasicTokenizer(
+                strip_accents=False, do_split_on_punc=False, do_lower_case=True
+            )
+            self.fix_text = None
+        self.re = try_import("regex")
+
+        self._vocab_file = vocab_file
+        self._merges_file = merges_file
+        self.max_len = max_len if max_len is not None else int(1e12)
+
+        with open(vocab_file, encoding="utf-8") as vocab_handle:
+            self.encoder = json.load(vocab_handle)
+        self.decoder = {v: k for k, v in self.encoder.items()}
+        self.errors = errors  # how to handle errors in decoding
+        self.byte_encoder = bytes_to_unicode()
+        self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
+        with open(merges_file, encoding="utf-8") as merges_handle:
+            bpe_merges = (
+                merges_handle.read().strip().split("\n")[1 : 49152 - 256 - 2 + 1]
+            )
+        bpe_merges = [tuple(merge.split()) for merge in bpe_merges]
+        self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))
+        self.cache = {
+            "<|startoftext|>": "<|startoftext|>",
+            "<|endoftext|>": "<|endoftext|>",
+        }
+
+        self.pat = self.re.compile(
+            r"""<\|startoftext\|>|<\|endoftext\|>|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""",
+            self.re.IGNORECASE,
+        )
+
+    @property
+    def vocab_size(self):
+        """
+        Returns the size of vocabulary.
+
+        Returns:
+            int: The sum of size of vocabulary and the size of speical tokens.
+
+        """
+        return len(self.encoder)
+
+    def get_vocab(self):
+        return dict(self.encoder, **self.added_tokens_encoder)
+
+    def build_inputs_with_special_tokens(
+        self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+    ) -> List[int]:
+        """
+        Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
+        adding special tokens. A CLIP sequence has the following format:
+
+        - single sequence: `<|startoftext|> X <|endoftext|>`
+
+        Pairs of sequences are not the expected use case, but they will be handled without a separator.
+
+        Args:
+            token_ids_0 (`List[int]`):
+                List of IDs to which the special tokens will be added.
+            token_ids_1 (`List[int]`, *optional*):
+                Optional second list of IDs for sequence pairs.
+
+        Returns:
+            `List[int]`: List of input IDs with the appropriate special tokens.
+        """
+        bos_token = [self.bos_token_id]
+        eos_token = [self.eos_token_id]
+
+        if token_ids_1 is None:
+            return bos_token + token_ids_0 + eos_token
+        return bos_token + token_ids_0 + eos_token + eos_token + token_ids_1 + eos_token
+
+    def build_offset_mapping_with_special_tokens(
+        self, offset_mapping_0, offset_mapping_1=None
+    ):
+        """
+        Build offset map from a pair of offset map by concatenating and adding offsets of special tokens.
+
+        Should be overridden in a subclass if the model has a special way of building those.
+
+        Args:
+            offset_mapping_0 (List[tuple]):
+                List of char offsets to which the special tokens will be added.
+            offset_mapping_1 (List[tuple], optional):
+                Optional second list of char offsets for offset mapping pairs.
+
+        Returns:
+            List[tuple]: List of char offsets with the appropriate offsets of special tokens.
+        """
+        if offset_mapping_1 is None:
+            return [(0, 0)] + offset_mapping_0 + [(0, 0)]
+
+        return (
+            [(0, 0)] + offset_mapping_0 + [(0, 0), (0, 0)] + offset_mapping_1 + [(0, 0)]
+        )
+
+    def get_special_tokens_mask(
+        self, token_ids_0, token_ids_1=None, already_has_special_tokens=False
+    ):
+        """
+        Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
+        special tokens using the tokenizer `prepare_for_model` method.
+
+        Args:
+            token_ids_0 (`List[int]`):
+                List of IDs.
+            token_ids_1 (`List[int]`, *optional*):
+                Optional second list of IDs for sequence pairs.
+            already_has_special_tokens (`bool`, *optional*, defaults to `False`):
+                Whether or not the token list is already formatted with special tokens for the model.
+
+        Returns:
+            `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
+        """
+
+        if already_has_special_tokens:
+            return super().get_special_tokens_mask(
+                token_ids_0=token_ids_0,
+                token_ids_1=token_ids_1,
+                already_has_special_tokens=True,
+            )
+
+        if token_ids_1 is None:
+            return [1] + ([0] * len(token_ids_0)) + [1]
+        return (
+            [1] + ([0] * len(token_ids_0)) + [1] + [1] + ([0] * len(token_ids_1)) + [1]
+        )
+
+    def create_token_type_ids_from_sequences(
+        self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+    ) -> List[int]:
+        """
+        Create a mask from the two sequences passed. CLIP does not make use of token type ids, therefore a list of
+        zeros is returned.
+
+        Args:
+            token_ids_0 (`List[int]`):
+                List of IDs.
+            token_ids_1 (`List[int]`, *optional*):
+                Optional second list of IDs for sequence pairs.
+
+        Returns:
+            `List[int]`: List of zeros.
+        """
+        bos_token = [self.bos_token_id]
+        eos_token = [self.eos_token_id]
+
+        if token_ids_1 is None:
+            return len(bos_token + token_ids_0 + eos_token) * [0]
+        return len(
+            bos_token + token_ids_0 + eos_token + eos_token + token_ids_1 + eos_token
+        ) * [0]
+
+    def bpe(self, token):
+        if token in self.cache:
+            return self.cache[token]
+        word = tuple(token[:-1]) + (token[-1] + "</w>",)
+        pairs = get_pairs(word)
+
+        if not pairs:
+            return token + "</w>"
+
+        while True:
+            bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
+            if bigram not in self.bpe_ranks:
+                break
+            first, second = bigram
+            new_word = []
+            i = 0
+            while i < len(word):
+                try:
+                    j = word.index(first, i)
+                except ValueError:
+                    new_word.extend(word[i:])
+                    break
+                else:
+                    new_word.extend(word[i:j])
+                    i = j
+
+                if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
+                    new_word.append(first + second)
+                    i += 2
+                else:
+                    new_word.append(word[i])
+                    i += 1
+            new_word = tuple(new_word)
+            word = new_word
+            if len(word) == 1:
+                break
+            else:
+                pairs = get_pairs(word)
+        word = " ".join(word)
+        self.cache[token] = word
+        return word
+
+    def _tokenize(self, text):
+        """Tokenize a string."""
+        bpe_tokens = []
+        if self.fix_text is None:
+            text = " ".join(self.nlp.tokenize(text))
+        else:
+            text = whitespace_clean(self.fix_text(text), self.re).lower()
+
+        for token in self.re.findall(self.pat, text):
+            token = "".join(
+                self.byte_encoder[b] for b in token.encode("utf-8")
+            )  # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
+            bpe_tokens.extend(bpe_token for bpe_token in self.bpe(token).split(" "))
+        return bpe_tokens
+
+    def _convert_token_to_id(self, token):
+        """Converts a token (str) in an id using the vocab."""
+        return self.encoder.get(token, self.encoder.get(self.unk_token))
+
+    def _convert_id_to_token(self, index):
+        """Converts an index (integer) in a token (str) using the vocab."""
+        return self.decoder.get(index)
+
+    def convert_tokens_to_string(self, tokens):
+        """Converts a sequence of tokens (string) in a single string."""
+        text = "".join(tokens)
+        byte_array = bytearray([self.byte_decoder[c] for c in text])
+        text = (
+            byte_array.decode("utf-8", errors=self.errors).replace("</w>", " ").strip()
+        )
+        return text
+
+    def save_resources(self, save_directory):
+        """
+        Saves `SentencePiece <https://github.com/google/sentencepiece>`__ file
+        (ends with '.spm') under `save_directory`.
+
+        Args:
+            save_directory (str): Directory to save files into.
+        """
+        for name, file_name in self.resource_files_names.items():
+            source_path = getattr(self, "_%s" % name)
+
+            save_path = os.path.join(save_directory, file_name)
+            if os.path.abspath(source_path) != os.path.abspath(save_path):
+                shutil.copyfile(source_path, save_path)

+ 25 - 2
paddlex/inference/models/open_vocabulary_detection/predictor.py

@@ -20,7 +20,12 @@ from ....utils.func_register import FuncRegister
 from ....modules.open_vocabulary_detection.model_list import MODELS
 from ...common.batch_sampler import ImageBatchSampler
 from ...common.reader import ReadImage
-from .processors import GroundingDINOProcessor, GroundingDINOPostProcessor
+from .processors import (
+    GroundingDINOProcessor,
+    GroundingDINOPostProcessor,
+    YOLOWorldProcessor,
+    YOLOWorldPostProcessor,
+)
 from ..common import StaticInfer
 from ..base import BasicPredictor
 from ..object_detection.result import DetResult
@@ -97,7 +102,7 @@ class OVDetPredictor(BasicPredictor):
         image_paths = batch_data.input_paths
         src_images = self.pre_ops[0](batch_data.instances)
         datas = src_images
-        # preprocess
+        # preprocess for image only
         for pre_op in self.pre_ops[1:-1]:
             datas = pre_op(datas)
 
@@ -141,6 +146,10 @@ class OVDetPredictor(BasicPredictor):
                 box_threshold=self.config["box_threshold"],
                 text_threshold=self.config["text_threshold"],
             )
+        elif "YOLO-World" in self.model_name:
+            return YOLOWorldPostProcessor(
+                threshold=self.config["threshold"],
+            )
         else:
             raise NotImplementedError
 
@@ -153,3 +162,17 @@ class OVDetPredictor(BasicPredictor):
             text_max_words=text_max_words,
             target_size=target_size,
         )
+
+    @register("YOLOWorldProcessor")
+    def build_yoloworld_preprocessor(
+        self,
+        image_target_size=(640, 640),
+        image_mean=[0.0, 0.0, 0.0],
+        image_std=[1.0, 1.0, 1.0],
+    ):
+        return YOLOWorldProcessor(
+            model_dir=self.model_dir,
+            image_target_size=image_target_size,
+            image_mean=image_mean,
+            image_std=image_std,
+        )

+ 1 - 0
paddlex/inference/models/open_vocabulary_detection/processors/__init__.py

@@ -13,3 +13,4 @@
 # limitations under the License.
 
 from .groundingdino_processors import GroundingDINOProcessor, GroundingDINOPostProcessor
+from .yoloworld_processors import YOLOWorldProcessor, YOLOWorldPostProcessor

+ 110 - 0
paddlex/inference/models/open_vocabulary_detection/processors/common.py

@@ -0,0 +1,110 @@
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import os, cv2
+from typing import Dict, List, Optional, Union, Tuple
+
+import numpy as np
+import PIL
+
+
+class LetterResize(object):
+    def __init__(
+        self,
+        scale=[640, 640],
+        pad_val=144,
+        use_mini_pad=False,
+        stretch_only=False,
+        allow_scale_up=False,
+    ):
+        super(LetterResize, self).__init__()
+        self.scale = scale
+        self.pad_val = pad_val
+
+        self.use_mini_pad = use_mini_pad
+        self.stretch_only = stretch_only
+        self.allow_scale_up = allow_scale_up
+
+    def _resize_img(self, image: np.ndarray) -> Dict:
+
+        scale = self.scale
+        image_shape = image.shape[:2]
+
+        ratio = min(scale[0] / image_shape[0], scale[1] / image_shape[1])
+
+        if not self.allow_scale_up:
+            ratio = min(ratio, 1.0)
+        ratio = [ratio, ratio]
+
+        no_pad_shape = (
+            int(round(image_shape[0] * ratio[0])),
+            int(round(image_shape[1] * ratio[1])),
+        )
+        padding_h, padding_w = [scale[0] - no_pad_shape[0], scale[1] - no_pad_shape[1]]
+        if self.use_mini_pad:
+            padding_w, padding_h = np.mod(padding_w, 32), np.mod(padding_h, 32)
+        elif self.stretch_only:
+            padding_h, padding_w = 0.0, 0.0
+            no_pad_shape = (scale[0], scale[1])
+            ratio = [scale[0] / image_shape[0], scale[1] / image_shape[1]]
+
+        if image_shape != no_pad_shape:
+            image = cv2.resize(
+                image,
+                (no_pad_shape[1], no_pad_shape[0]),
+                interpolation=cv2.INTER_LINEAR,
+            )
+
+        scale_factor = (
+            no_pad_shape[1] / image_shape[1],
+            no_pad_shape[0] / image_shape[0],
+        )
+
+        top_padding, left_padding = int(round(padding_h // 2 - 0.1)), int(
+            round(padding_w // 2 - 0.1)
+        )
+        bottom_padding = padding_h - top_padding
+        right_padding = padding_w - left_padding
+
+        padding_list = [top_padding, bottom_padding, left_padding, right_padding]
+        if (
+            top_padding != 0
+            or bottom_padding != 0
+            or left_padding != 0
+            or right_padding != 0
+        ):
+            pad_val = self.pad_val
+            if isinstance(pad_val, int) and image.ndim == 3:
+                pad_val = tuple(pad_val for _ in range(image.shape[2]))
+            top, bottom, left, right = padding_list
+            image = cv2.copyMakeBorder(
+                image, top, bottom, left, right, cv2.BORDER_CONSTANT, value=pad_val
+            )
+
+        result = dict()
+        result["image"] = image
+        result["scale_factor"] = np.array(scale_factor, dtype=np.float32)
+        result["pad_param"] = np.array(padding_list, dtype=np.float32)
+
+        return result
+
+    def __call__(self, images: List[np.ndarray]) -> List[Dict]:
+
+        if not isinstance(images, (List, Tuple)):
+            images = [images]
+
+        rst_images = [self._resize_img(image) for image in images]
+
+        return rst_images

+ 210 - 0
paddlex/inference/models/open_vocabulary_detection/processors/yoloworld_processors.py

@@ -0,0 +1,210 @@
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+from typing import Dict, List, Optional, Union, Tuple
+
+import numpy as np
+import PIL
+from .common import LetterResize
+
+from ...common.tokenizer.clip_tokenizer import CLIPTokenizer
+
+
+class YOLOWorldProcessor(object):
+    """Image and Text Processors for YOLO-World"""
+
+    def __init__(
+        self,
+        model_dir,
+        image_target_size: Union[Tuple[int], int] = (640, 640),
+        image_mean: Union[float, List[float]] = [0.0, 0.0, 0.0],
+        image_std: Union[float, List[float]] = [1.0, 1.0, 1.0],
+        **kwargs,
+    ):
+
+        if isinstance(image_target_size, int):
+            image_target_size = (image_target_size, image_target_size)
+        if isinstance(image_mean, float):
+            image_mean = [image_mean, image_mean, image_mean]
+        if isinstance(image_std, float):
+            image_std = [image_std, image_std, image_std]
+
+        self.image_target_size = image_target_size
+        self.image_mean = image_mean
+        self.image_std = image_std
+
+        tokenizer_dir = os.path.join(model_dir, "tokenizer")
+        assert os.path.isdir(tokenizer_dir), f"{tokenizer_dir} not exists."
+        self.tokenizer = CLIPTokenizer.from_pretrained(tokenizer_dir)
+
+        self.resize_op = LetterResize(self.image_target_size, allow_scale_up=True)
+
+        if isinstance(image_mean, (tuple, list)):
+            self.image_mean = np.array(image_mean)
+        if self.image_mean.ndim < 4:
+            self.image_mean = self.image_mean.reshape(1, -1, 1, 1)
+
+        if isinstance(image_std, (tuple, list)):
+            self.image_std = np.array(image_std)
+        if self.image_std.ndim < 4:
+            self.image_std = self.image_std.reshape(1, -1, 1, 1)
+
+    def __call__(
+        self,
+        images: List[np.ndarray],
+        text: str,
+        **kwargs,
+    ):
+        preprocess_results = self.process_image(images)
+        preprocess_results.update(self.process_text(text))
+        static_input_orders = [
+            "attention_mask",
+            "image",
+            "input_ids",
+            "pad_param",
+            "scale_factor",
+        ]
+        result = [preprocess_results[k] for k in static_input_orders]
+
+        return result
+
+    def process_image(self, images):
+        """Image preprocess for YOLO-World"""
+        rescaled_images = self.resize_op(images)
+
+        images = np.stack(
+            [rescaled_image["image"] for rescaled_image in rescaled_images], axis=0
+        )
+        scale_factors = np.stack(
+            [rescaled_image["scale_factor"] for rescaled_image in rescaled_images],
+            axis=0,
+        )
+        pad_params = np.stack(
+            [rescaled_image["pad_param"] for rescaled_image in rescaled_images], axis=0
+        )
+
+        images = np.transpose(images, (0, 3, 1, 2)).astype(np.float32) / 255.0
+        images -= self.image_mean
+        images /= self.image_std
+
+        image_results = {
+            "image": images,
+            "scale_factor": scale_factors,
+            "pad_param": pad_params[:, [3, 0]],
+        }
+
+        return image_results
+
+    def process_text(self, text):
+
+        text = text.strip().lower()
+        words = [word.strip() for word in text.split(",")]
+        words += [" "]
+        tokenized_text = self.tokenizer(text=words, return_tensors="pd", padding=True)
+
+        text_results = {
+            "input_ids": tokenized_text["input_ids"].numpy(),
+            "attention_mask": tokenized_text["attention_mask"].numpy(),
+        }
+
+        return text_results
+
+
+class YOLOWorldPostProcessor(object):
+    """PostProcessors for YOLO-World"""
+
+    def __init__(
+        self,
+        threshold: float = 0.05,
+        **kwargs,
+    ):
+        """Init Function for YOLO-World PostProcessor
+
+        Args:
+            threshold (float): threshold for low confidence bbox filtering.
+        """
+        self.threshold = threshold
+
+    def __call__(
+        self,
+        pred_boxes,
+        pred_nums,
+        prompt,
+        src_images,
+        threshold=None,
+        **kwargs,
+    ):
+
+        threshold = self.threshold if threshold is None else threshold
+
+        split_index = np.cumsum(pred_nums)[:-1]
+        pred_boxes = np.split(pred_boxes, split_index, axis=0)
+        assert len(pred_boxes) == len(src_images)
+
+        classnames = self.prompt_to_classnames(prompt)
+
+        rst_boxes = []
+        for pred_box, src_image in zip(pred_boxes, src_images):
+            rst_boxes.append(
+                self.postprocess(
+                    pred_box,
+                    classnames,
+                    src_image,
+                    threshold,
+                )
+            )
+
+        return rst_boxes
+
+    def postprocess(
+        self,
+        pred_boxes,
+        classnames,
+        src_image,
+        threshold,
+    ):
+        """Post Process for prediction result of single image."""
+
+        pred_boxes = pred_boxes[pred_boxes[:, 1] > threshold]
+        H, W, *_ = src_image.shape
+
+        pred_labels = pred_boxes[:, 0].astype(np.int32)
+        pred_scores = pred_boxes[:, 1]
+        pred_bboxes = pred_boxes[:, 2:]
+
+        pred_bboxes[:, ::2] = np.clip(pred_bboxes[:, ::2], a_min=0, a_max=W)
+        pred_bboxes[:, 1::2] = np.clip(pred_bboxes[:, 1::2], a_min=0, a_max=H)
+
+        rst_bboxes = []
+        for pred_label, pred_score, pred_bbox in zip(
+            pred_labels, pred_scores, pred_bboxes
+        ):
+            rst_bboxes.append(
+                {
+                    "coordinate": pred_bbox.tolist(),
+                    "label": classnames[pred_label],
+                    "score": pred_score,
+                }
+            )
+
+        return rst_bboxes
+
+    def prompt_to_classnames(self, text):
+
+        text = text.strip().lower()
+        words = [word.strip() for word in text.split(",")]
+        words += [" "]
+
+        return words

+ 1 - 0
paddlex/inference/utils/official_models.py

@@ -333,6 +333,7 @@ PP-LCNet_x1_0_vehicle_attribute_infer.tar",
     "PP-DocLayout-M": "https://paddle-model-ecology.bj.bcebos.com/paddlex/official_inference_model/paddle3.0rc0/PP-DocLayout-M_infer.tar",
     "PP-DocLayout-S": "https://paddle-model-ecology.bj.bcebos.com/paddlex/official_inference_model/paddle3.0rc0/PP-DocLayout-S_infer.tar",
     "BEVFusion": "https://paddle-model-ecology.bj.bcebos.com/paddlex/official_inference_model/paddle3.0rc0/BEVFusion_infer.tar",
+    "YOLO-Worldv2-L": "https://paddle-model-ecology.bj.bcebos.com/paddlex/official_inference_model/paddle3.0rc0/YOLO-Worldv2-L_infer.tar",
 }
 
 

+ 1 - 3
paddlex/modules/open_vocabulary_detection/model_list.py

@@ -13,6 +13,4 @@
 # limitations under the License.
 
 
-MODELS = [
-    "GroundingDINO-T",
-]
+MODELS = ["GroundingDINO-T", "YOLO-Worldv2-L"]