Преглед изворни кода

Merge pull request #3139 from opendatalab/release-2.1.2

Release 2.1.2
Xiaomeng Zhao пре 3 месеци
родитељ
комит
07f6ba7299

+ 3 - 3
.github/workflows/cla.yml

@@ -18,9 +18,9 @@ jobs:
     steps:
       - name: "CLA Assistant"
         if: (github.event.comment.body == 'recheck' || github.event.comment.body == 'I have read the CLA Document and I hereby sign the CLA') || github.event_name == 'pull_request_target'
-        uses: contributor-assistant/github-action@v2.5.0
+        uses: contributor-assistant/github-action@v2.6.1
         env:
-          GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+          GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN  }}
           # the below token should have repo scope and must be manually added by you in the repository's secret
           # This token is required only if you have configured to store the signatures in a remote repository/organization
           PERSONAL_ACCESS_TOKEN: ${{ secrets.RELEASE_TOKEN }}
@@ -28,7 +28,7 @@ jobs:
           path-to-signatures: 'signatures/version1/cla.json'
           path-to-document: 'https://github.com/opendatalab/MinerU/blob/master/MinerU_CLA.md' # e.g. a CLA or a DCO document
           # branch should not be protected
-          branch: 'master'
+          branch: 'cla'
           allowlist: myhloli,dt-yy,Focusshang,renpengli01,icecraft,drunkpig,wangbinDL,qiangqiang199,GDDGCZ518,papayalove,conghui,quyuan,LollipopsAndWine,Sidney233
 
          # the followings are the optional inputs - If the optional inputs are not given, then default values will be taken

+ 4 - 1
README.md

@@ -43,7 +43,10 @@
 </div>
 
 # Changelog
-
+- 2025/07/22 2.1.2 Released
+  - Bug Fixes
+    - Fixed the issue of excessive memory consumption during the `MFR` step in the `pipeline` backend under certain scenarios #2771
+    - Fixed the inaccurate matching between `image`/`table` and `caption`/`footnote` under certain conditions #3129
 - 2025/07/16 2.1.1 Released
   - Bug fixes
     - Fixed text block content loss issue that could occur in certain `pipeline` scenarios #3005

+ 4 - 0
README_zh-CN.md

@@ -43,6 +43,10 @@
 </div>
 
 # 更新记录
+- 2025/07/22 2.1.2发布
+  - bug修复
+    - 修复`pipeline`后端中`MFR`步骤在某些情况下显存消耗过大的问题 #2771
+    - 修复某些情况下`image`/`table`与`caption`/`footnote`匹配不准确的问题 #3129
 - 2025/07/16 2.1.1发布
   - bug修复 
     - 修复`pipeline`在某些情况可能发生的文本块内容丢失问题 #3005

+ 4 - 3
mineru/backend/pipeline/batch_analyze.py

@@ -12,6 +12,7 @@ from ...utils.ocr_utils import get_adjusted_mfdetrec_res, get_ocr_result_list, O
 YOLO_LAYOUT_BASE_BATCH_SIZE = 8
 MFD_BASE_BATCH_SIZE = 1
 MFR_BASE_BATCH_SIZE = 16
+OCR_DET_BASE_BATCH_SIZE = 16
 
 
 class BatchAnalyze:
@@ -170,9 +171,9 @@ class BatchAnalyze:
                         batch_images.append(padded_img)
 
                     # 批处理检测
-                    batch_size = min(len(batch_images), self.batch_ratio * 16)  # 增加批处理大小
-                    # logger.debug(f"OCR-det batch: {batch_size} images, target size: {target_h}x{target_w}")
-                    batch_results = ocr_model.text_detector.batch_predict(batch_images, batch_size)
+                    det_batch_size = min(len(batch_images), self.batch_ratio * OCR_DET_BASE_BATCH_SIZE)  # 增加批处理大小
+                    # logger.debug(f"OCR-det batch: {det_batch_size} images, target size: {target_h}x{target_w}")
+                    batch_results = ocr_model.text_detector.batch_predict(batch_images, det_batch_size)
 
                     # 处理批处理结果
                     for i, (crop_info, (dt_boxes, elapse)) in enumerate(zip(group_crops, batch_results)):

+ 3 - 3
mineru/backend/pipeline/pipeline_analyze.py

@@ -74,10 +74,10 @@ def doc_analyze(
         table_enable=True,
 ):
     """
-    适当调大MIN_BATCH_INFERENCE_SIZE可以提高性能,可能会增加显存使用量
-    可通过环境变量MINERU_MIN_BATCH_INFERENCE_SIZE设置,默认值为128
+    适当调大MIN_BATCH_INFERENCE_SIZE可以提高性能,更大的 MIN_BATCH_INFERENCE_SIZE会消耗更多内存
+    可通过环境变量MINERU_MIN_BATCH_INFERENCE_SIZE设置,默认值为384
     """
-    min_batch_inference_size = int(os.environ.get('MINERU_MIN_BATCH_INFERENCE_SIZE', 128))
+    min_batch_inference_size = int(os.environ.get('MINERU_MIN_BATCH_INFERENCE_SIZE', 384))
 
     # 收集所有页面信息
     all_pages_info = []  # 存储(dataset_index, page_index, img, ocr, lang, width, height)

+ 4 - 2
mineru/backend/pipeline/pipeline_magic_model.py

@@ -275,7 +275,8 @@ class MagicModel:
 
 
             fst_idx, fst_kind, left_x, top_y = candidates[0]
-            candidates.sort(key=lambda x: (x[2] - left_x) ** 2 + (x[3] - top_y)**2)
+            fst_bbox = subjects[fst_idx]['bbox'] if fst_kind == SUB_BIT_KIND else objects[fst_idx - OBJ_IDX_OFFSET]['bbox']
+            candidates.sort(key=lambda x: bbox_distance(fst_bbox, subjects[x[0]]['bbox']) if x[1] == SUB_BIT_KIND else bbox_distance(fst_bbox, objects[x[0] - OBJ_IDX_OFFSET]['bbox']))
             nxt = None
 
             for i in range(1, len(candidates)):
@@ -294,7 +295,8 @@ class MagicModel:
             pair_dis = bbox_distance(subjects[sub_idx]['bbox'], objects[obj_idx]['bbox'])
             nearest_dis = float('inf')
             for i in range(N):
-                if i in seen_idx or i == sub_idx:continue
+                # 取消原先算法中 1对1 匹配的偏置
+                # if i in seen_idx or i == sub_idx:continue
                 nearest_dis = min(nearest_dis, bbox_distance(subjects[i]['bbox'], objects[obj_idx]['bbox']))
 
             if pair_dis >= 3*nearest_dis:

+ 1 - 1
mineru/model/mfr/unimernet/Unimernet.py

@@ -115,7 +115,7 @@ class UnimernetModel(object):
                 mf_img = mf_img.to(dtype=self.model.dtype)
                 mf_img = mf_img.to(self.device)
                 with torch.no_grad():
-                    output = self.model.generate({"image": mf_img})
+                    output = self.model.generate({"image": mf_img}, batch_size=batch_size)
                 mfr_res.extend(output["fixed_str"])
 
                 # 更新进度条,每次增加batch_size,但要注意最后一个batch可能不足batch_size

+ 8 - 2
mineru/model/mfr/unimernet/unimernet_hf/modeling_unimernet.py

@@ -468,7 +468,7 @@ class UnimernetModel(VisionEncoderDecoderModel):
         ).loss
         return {"loss": loss}
 
-    def generate(self, samples, do_sample: bool = False, temperature: float = 0.2, top_p: float = 0.95):
+    def generate(self, samples, do_sample: bool = False, temperature: float = 0.2, top_p: float = 0.95, batch_size=64):
         pixel_values = samples["image"]
         num_channels = pixel_values.shape[1]
         if num_channels == 1:
@@ -478,7 +478,13 @@ class UnimernetModel(VisionEncoderDecoderModel):
         if do_sample:
             kwargs["temperature"] = temperature
             kwargs["top_p"] = top_p
-        
+
+        if self.tokenizer.tokenizer.model_max_length > 1152:
+            if batch_size <= 32:
+                self.tokenizer.tokenizer.model_max_length = 1152  # 6g
+            else:
+                self.tokenizer.tokenizer.model_max_length = 1344  # 8g
+
         outputs = super().generate(
             pixel_values=pixel_values,
             max_new_tokens=self.tokenizer.tokenizer.model_max_length, # required

+ 1 - 1
mineru/model/ocr/paddleocr2pytorch/pytorch_paddle.py

@@ -88,7 +88,7 @@ class PytorchPaddleOCR(TextSystem):
         kwargs['det_model_path'] = det_model_path
         kwargs['rec_model_path'] = rec_model_path
         kwargs['rec_char_dict_path'] = os.path.join(root_dir, 'pytorchocr', 'utils', 'resources', 'dict', dict_file)
-        # kwargs['rec_batch_num'] = 8
+        kwargs['rec_batch_num'] = 16
 
         kwargs['device'] = device