Browse Source

refactor: update imports and adapt to sglang version changes in processing logic

myhloli 3 months ago
parent
commit
56f25a4e90

+ 3 - 10
mineru/model/vlm_sglang_model/__init__.py

@@ -1,16 +1,9 @@
 from sglang.srt.configs.model_config import multimodal_model_archs
 from sglang.srt.models.registry import ModelRegistry
 
-try:
-    # sglang==0.4.5.post3
-    from sglang.srt.managers.multimodal_processor import (
-        PROCESSOR_MAPPING as PROCESSOR_MAPPING,
-    )
-except ImportError:
-    # sglang==0.4.4.post1
-    from sglang.srt.managers.image_processor import (
-        IMAGE_PROCESSOR_MAPPING as PROCESSOR_MAPPING,
-    )
+from sglang.srt.managers.multimodal_processor import (
+    PROCESSOR_MAPPING as PROCESSOR_MAPPING,
+)
 
 from .. import vlm_hf_model as _
 from .image_processor import Mineru2ImageProcessor

+ 49 - 54
mineru/model/vlm_sglang_model/image_processor.py

@@ -5,21 +5,21 @@ from typing import List, Optional, Union
 
 import numpy as np
 
-try:
-    # sglang==0.4.5.post3
-    from sglang.srt.managers.multimodal_processors.base_processor import (
+from sglang.version import __version__ as sglang_version
+if sglang_version >= "0.4.9":
+    # sglang >= 0.4.9
+    from sglang.srt.multimodal.processors.base_processor import (
         BaseMultimodalProcessor as BaseProcessor,
     )
-
-    get_global_processor = None
-except ImportError:
-    # sglang==0.4.4.post1
-    from sglang.srt.managers.image_processors.base_image_processor import (
-        BaseImageProcessor as BaseProcessor,
-        get_global_processor,
+    from sglang.srt.multimodal.mm_utils import divide_to_patches, expand2square, select_best_resolution
+else:
+    # 0.4.7 <= sglang < 0.4.9
+    from sglang.srt.managers.multimodal_processors.base_processor import (
+        BaseMultimodalProcessor as BaseProcessor,
     )
+    from sglang.srt.mm_utils import divide_to_patches, expand2square, select_best_resolution
 
-from sglang.srt.mm_utils import divide_to_patches, expand2square, select_best_resolution
+get_global_processor = None
 from sglang.srt.utils import load_image, logger
 from sglang.utils import get_exception_traceback
 
@@ -123,8 +123,7 @@ class Mineru2ImageProcessor(BaseProcessor):
                 image_processor,
             )
 
-    # sglang==0.4.4.post1
-    async def process_images_async(
+    async def process_mm_data_async(
         self,
         image_data: List[Union[str, bytes]],
         input_text,
@@ -132,15 +131,17 @@ class Mineru2ImageProcessor(BaseProcessor):
         *args,
         **kwargs,
     ):
+        from sglang.srt.managers.schedule_batch import Modality, MultimodalDataItem
+
         if not image_data:
             return None
 
         modalities = request_obj.modalities or ["image"]
-        aspect_ratio = getattr(self.hf_config, "image_aspect_ratio", "")
-
+        aspect_ratio = getattr(self.hf_config, "image_aspect_ratio", None)
         grid_pinpoints = (
             self.hf_config.image_grid_pinpoints
-            if hasattr(self.hf_config, "image_grid_pinpoints") and "anyres" in aspect_ratio
+            if hasattr(self.hf_config, "image_grid_pinpoints")
+               and "anyres" in aspect_ratio
             else None
         )
 
@@ -151,14 +152,19 @@ class Mineru2ImageProcessor(BaseProcessor):
             if "multi-images" in modalities or "video" in modalities:
                 # Multiple images
                 aspect_ratio = "pad"  # LLaVA OneVision Handling: more than one image --> interleaved image mode or video mode. We do not use anyres
-                pixel_values, image_hashes, image_sizes = [], [], []
+                pixel_values, data_hashes, image_sizes = [], [], []
                 res = []
                 for img_data in image_data:
-                    res.append(self._process_single_image(img_data, aspect_ratio, grid_pinpoints))
+                    res.append(
+                        self._process_single_image(
+                            img_data, aspect_ratio, grid_pinpoints
+                        )
+                    )
+
                 res = await asyncio.gather(*res)
                 for pixel_v, image_h, image_s in res:
                     pixel_values.append(pixel_v)
-                    image_hashes.append(image_h)
+                    data_hashes.append(image_h)
                     image_sizes.append(image_s)
 
                 if isinstance(pixel_values[0], np.ndarray):
@@ -168,34 +174,9 @@ class Mineru2ImageProcessor(BaseProcessor):
                 pixel_values, image_hash, image_size = await self._process_single_image(
                     image_data[0], aspect_ratio, grid_pinpoints
                 )
-                image_hashes = [image_hash]
                 image_sizes = [image_size]
         else:
             raise ValueError(f"Invalid image data: {image_data}")
-
-        return {
-            "pixel_values": pixel_values,
-            "image_hashes": image_hashes,
-            "image_sizes": image_sizes,
-            "modalities": request_obj.modalities or ["image"],
-        }
-
-    # sglang==0.4.5.post3
-    async def process_mm_data_async(
-        self,
-        image_data: List[Union[str, bytes]],
-        input_text,
-        request_obj,
-        *args,
-        **kwargs,
-    ):
-        from sglang.srt.managers.schedule_batch import Modality, MultimodalDataItem
-
-        result = await self.process_images_async(image_data, input_text, request_obj, *args, **kwargs)
-
-        if result is None:
-            return None
-
         modality = Modality.IMAGE
         if isinstance(request_obj.modalities, list):
             if request_obj.modalities[0] == "multi-images":
@@ -203,15 +184,29 @@ class Mineru2ImageProcessor(BaseProcessor):
             elif request_obj.modalities[0] == "video":
                 modality = Modality.VIDEO
 
-        return {
-            "mm_items": [
-                MultimodalDataItem(
-                    pixel_values=result["pixel_values"],
-                    image_sizes=result["image_sizes"],
-                    modality=modality,
-                )
-            ],
-        }
-
+        if sglang_version >= "0.4.9.post3":
+            # sglang >= 0.4.9.post3
+            return {
+                "mm_items": [
+                    MultimodalDataItem(
+                        feature=pixel_values,
+                        model_specific_data={
+                            "image_sizes": image_sizes,
+                        },
+                        modality=modality,
+                    )
+                ],
+            }
+        else:
+            # 0.4.7 <= sglang <= 0.4.9.post2
+            return {
+                "mm_items": [
+                    MultimodalDataItem(
+                        pixel_values=pixel_values,
+                        image_sizes=image_sizes,
+                        modality=modality,
+                    )
+                ],
+            }
 
 ImageProcessorMapping = {Mineru2QwenForCausalLM: Mineru2ImageProcessor}

+ 31 - 31
mineru/model/vlm_sglang_model/model.py

@@ -5,9 +5,19 @@ from typing import Iterable, List, Optional, Tuple
 import numpy as np
 import torch
 from sglang.srt.layers.quantization.base_config import QuantizationConfig
-from sglang.srt.mm_utils import (
-    get_anyres_image_grid_shape,  # unpad_image, unpad_image_shape
-)
+
+from sglang.version import __version__ as sglang_version
+if sglang_version >= "0.4.9":
+    # sglang >= 0.4.9
+    from sglang.srt.multimodal.mm_utils import (
+            get_anyres_image_grid_shape,
+        )
+else:
+    #  0.4.7 <= sglang < 0.4.9
+    from sglang.srt.mm_utils import (
+        get_anyres_image_grid_shape,
+    )
+
 from sglang.srt.model_executor.forward_batch_info import ForwardBatch
 from sglang.srt.model_loader.weight_utils import default_weight_loader
 from sglang.srt.models.qwen2 import Qwen2ForCausalLM
@@ -111,14 +121,9 @@ class Mineru2QwenForCausalLM(nn.Module):
             raise ValueError(f"Unexpected select feature: {self.select_feature}")
 
     def pad_input_ids(self, input_ids: List[int], image_inputs):
-        if hasattr(image_inputs, "mm_items"):  # MultimodalInputs
-            # sglang==0.4.5.post3
-            image_sizes = flatten_nested_list([item.image_sizes for item in image_inputs.mm_items])
-            pad_values = [item.pad_value for item in image_inputs.mm_items]
-        else:  # ImageInputs
-            # sglang==0.4.4.post1
-            image_sizes = image_inputs.image_sizes
-            pad_values = image_inputs.pad_values
+
+        image_sizes = flatten_nested_list([item.image_sizes for item in image_inputs.mm_items])
+        pad_values = [item.pad_value for item in image_inputs.mm_items]
 
         # hardcode for spatial_unpad + anyres
         # if image_inputs.modalities is not None and (
@@ -196,14 +201,8 @@ class Mineru2QwenForCausalLM(nn.Module):
         positions: torch.Tensor,
         forward_batch: ForwardBatch,
     ) -> torch.Tensor:
-        if hasattr(forward_batch, "mm_inputs"):
-            # sglang==0.4.5.post3
-            image_inputs = forward_batch.mm_inputs
-            is_sglang_mm_inputs = True
-        else:
-            # sglang==0.4.4.post1
-            image_inputs = forward_batch.image_inputs
-            is_sglang_mm_inputs = False
+
+        image_inputs = forward_batch.mm_inputs
 
         if image_inputs is None:
             image_inputs = []
@@ -223,12 +222,7 @@ class Mineru2QwenForCausalLM(nn.Module):
             max_image_offset = []
             for im in image_inputs:
                 if im:
-                    if hasattr(im, "mm_items"):
-                        # sglang==0.4.5.post3
-                        modalities_list.extend([downgrade_modality(item.modality) for item in im.mm_items])
-                    elif im.modalities is not None:
-                        # sglang==0.4.4.post1
-                        modalities_list.extend(im.modalities)
+                    modalities_list.extend([downgrade_modality(item.modality) for item in im.mm_items])
                 if im and im.image_offsets:
                     max_image_offset.append(np.max(np.array(im.image_offsets) + np.array(im.image_pad_len)))
                 else:
@@ -240,8 +234,18 @@ class Mineru2QwenForCausalLM(nn.Module):
             if need_vision.any():
                 bs = forward_batch.batch_size
 
-                if is_sglang_mm_inputs:
-                    # sglang==0.4.5.post3
+                if sglang_version >= "0.4.9.post3":
+                    # sglang >= 0.4.9.post3
+                    pixel_values = flatten_nested_list(
+                        [[item.feature for item in image_inputs[i].mm_items] for i in range(bs) if need_vision[i]]
+                    )  # image_inputs[batch_idx].mm_items[item_idx].pixel_values is Tensor
+                    image_sizes = [
+                        flatten_nested_list([item.model_specific_data["image_sizes"] for item in image_inputs[i].mm_items])
+                        for i in range(bs)
+                        if need_vision[i]
+                    ]  # image_inputs[batch_idx].mm_items[item_idx].image_sizes should be tuple, but is list of tuple for now.
+                else:
+                    # 0.4.7 <= sglang <= 0.4.9.post2
                     pixel_values = flatten_nested_list(
                         [[item.pixel_values for item in image_inputs[i].mm_items] for i in range(bs) if need_vision[i]]
                     )  # image_inputs[batch_idx].mm_items[item_idx].pixel_values is Tensor
@@ -250,10 +254,6 @@ class Mineru2QwenForCausalLM(nn.Module):
                         for i in range(bs)
                         if need_vision[i]
                     ]  # image_inputs[batch_idx].mm_items[item_idx].image_sizes should be tuple, but is list of tuple for now.
-                else:
-                    # sglang==0.4.4.post1
-                    pixel_values = [image_inputs[i].pixel_values for i in range(bs) if need_vision[i]]
-                    image_sizes = [image_inputs[i].image_sizes for i in range(bs) if need_vision[i]]
 
                 ########## Encode Image ########
 

+ 1 - 1
pyproject.toml

@@ -53,7 +53,7 @@ vlm = [
     "pydantic",
 ]
 sglang = [
-    "sglang[all]>=0.4.8,<0.4.9",
+    "sglang[all]>=0.4.7,<0.4.10",
 ]
 pipeline = [
     "matplotlib>=3.10,<4",