model_single_process.py 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264
  1. import os
  2. import sys
  3. import time
  4. import json
  5. import argparse
  6. import traceback
  7. from pathlib import Path
  8. from typing import List, Dict, Any
  9. from tqdm import tqdm
  10. from dotenv import load_dotenv
  11. load_dotenv(override=True)
  12. from paddlex import create_model
  13. # 复用你现有的输入收集与PDF转图像逻辑
  14. from ppstructurev3_utils import get_input_files
  15. # 定义paddlex模型名称列表
  16. MODEL_LIST = [
  17. # OCR文本检测模型
  18. {"model_name": "PP-OCRv5_mobile_det", "description": "轻量级OCR文本检测模型,适用于移动端部署"},
  19. {"model_name": "PP-OCRv5_server_det", "description": "PP-OCRv5_rec 是新一代文本识别模型。该模型致力于以单一模型高效、精准地支持简体中文、繁体中文、英文、日文四种主要语言,以及手写、竖版、拼音、生僻字等复杂文本场景的识别。在保持识别效果的同时,兼顾推理速度和模型鲁棒性,为各种场景下的文档理解提供高效、精准的技术支撑。"},
  20. # OCR文本识别模型
  21. {"model_name": "PP-OCRv5_mobile_rec", "description": "轻量级OCR文本识别模型,适用于移动端部署"},
  22. {"model_name": "PP-OCRv5_server_rec", "description": "服务端OCR文本识别模型,高精度识别"},
  23. # 版面区域检测模型
  24. {"model_name": "PP-DocLayout_plus-L", "description": "版面检测模型,包含20个常见的类别:文档标题、段落标题、文本、页码、摘要、目录、参考文献、脚注、页眉、页脚、算法、公式、公式编号、图像、表格、图和表标题(图标题、表格标题和图表标题)、印章、图表、侧栏文本和参考文献内容"},
  25. {"model_name": "PP-DocBlockLayout", "description": "文档图像版面子模块检测,包含1个 版面区域 类别,能检测多栏的报纸、杂志的每个子文章的文本区域"},
  26. # 表格分类模型
  27. {"model_name": "PP-LCNet_x1_0_table_cls", "description": "wired_table, wireless_table"},
  28. # 表格识别模型
  29. {"model_name": "SLANet_plus", "description": "SLANet_plus 是百度飞桨视觉团队自研的表格结构识别模型 SLANet 的增强版。相较于 SLANet,SLANet_plus 对无线表、复杂表格的识别能力得到了大幅提升,并降低了模型对表格定位准确性的敏感度,即使表格定位出现偏移,也能够较准确地进行识别。"},
  30. {"model_name": "SLANeXt_wired", "description": "SLANeXt 系列是百度飞桨视觉团队自研的新一代表格结构识别模型。相较于 SLANet 和 SLANet_plus,SLANeXt 专注于对表格结构进行识别,并且对有线表格(wired)和无线表格(wireless)的识别分别训练了专用的权重,对各类型表格的识别能力都得到了明显提高,特别是对有线表格的识别能力得到了大幅提升。"},
  31. {"model_name": "SLANeXt_wireless", "description": "SLANeXt 系列是百度飞桨视觉团队自研的新一代表格结构识别模型。相较于 SLANet 和 SLANet_plus,SLANeXt 专注于对表格结构进行识别,并且对有线表格(wired)和无线表格(wireless)的识别分别训练了专用的权重,对各类型表格的识别能力都得到了明显提高,特别是对无线表格的识别能力得到了大幅提升。"},
  32. # 表格单元格识别模型
  33. {"model_name": "RT-DETR-L_wired_table_cell_det", "description": "有线表格单元格检测模型"},
  34. {"model_name": "RT-DETR-L_wireless_table_cell_det", "description": "无线表格单元格检测模型"},
  35. # 公式识别模型
  36. {"model_name": "PP-FormulaNet_plus-L", "description": "负责将图像中的数学公式转换为可编辑的文本或计算机可识别的格式。该模块的性能直接影响到整个OCR系统的准确性和效率。公式识别模块通常会输出数学公式的 LaTeX 或 MathML 代码"},
  37. # 文档图像方向分类模型
  38. {"model_name": "PP-LCNet_x1_0_doc_ori", "description": "基于PP-LCNet_x1_0的文档图像分类模型,含有四个类别,即0度,90度,180度,270度"},
  39. # 文本图像矫正模型
  40. {"model_name": "UVDoc", "description": "针对图像进行几何变换,以纠正图像中的文档扭曲、倾斜、透视变形等问题,以供后续的文本识别进行更加准确"},
  41. # 印章检测模型
  42. {"model_name": "PP-OCRv4_mobile_seal_det", "description": "PP-OCRv4的移动端印章文本检测模型,效率更高,适合在端侧部署"},
  43. {"model_name": "PP-OCRv4_server_seal_det", "description": "PP-OCRv4的服务端印章文本检测模型,精度更高,适合在较好的服务器上部署"},
  44. ]
  45. # 需要字典输入的模型(Doc VLM / 图表到表格)
  46. DICT_INPUT_MODELS = {
  47. "PP-Chart2Table",
  48. "PP-DocBee-2B",
  49. "PP-DocBee-7B",
  50. "PP-DocBee2-3B",
  51. }
  52. def init_model(model_name: str, device: str = "gpu:0"):
  53. """
  54. 初始化单一模型。若不支持device参数则回退到默认构造。
  55. """
  56. try:
  57. model = create_model(model_name=model_name, device=device)
  58. except TypeError:
  59. model = create_model(model_name=model_name)
  60. return model
  61. def predict_on_images(
  62. model_name: str,
  63. image_paths: List[str],
  64. output_dir: str,
  65. device: str = "gpu:0",
  66. batch_size: int = 1,
  67. layout_nms: bool = True,
  68. query: str = "请将图表转换为表格格式"
  69. ) -> List[Dict[str, Any]]:
  70. """
  71. 对一组图片运行任意单一模型,保存可视化与原始结果,并返回汇总信息。
  72. """
  73. output_base = Path(output_dir).resolve()
  74. output_base.mkdir(parents=True, exist_ok=True)
  75. model = init_model(model_name, device=device)
  76. # 一些检测/版面模型支持 layout_nms
  77. predict_kwargs = {}
  78. if hasattr(model, "_predictor") and hasattr(model._predictor, "layout_nms"):
  79. predict_kwargs["layout_nms"] = layout_nms
  80. results_summary: List[Dict[str, Any]] = []
  81. with tqdm(total=len(image_paths), desc=f"{model_name} predicting", unit="img",
  82. bar_format='{l_bar}{bar}| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, {rate_fmt}]') as pbar:
  83. for img_path in image_paths:
  84. img_path = str(img_path)
  85. # img_name = Path(img_path).stem
  86. # img_out_dir = output_base / img_name
  87. # img_out_dir.mkdir(parents=True, exist_ok=True)
  88. start = time.time()
  89. try:
  90. # 针对需要字典输入的模型
  91. if model_name in DICT_INPUT_MODELS:
  92. input_data = {"image": img_path, "query": query}
  93. outputs = model.predict(input_data, batch_size=1, **predict_kwargs)
  94. else:
  95. outputs = model.predict(img_path, batch_size=batch_size, **predict_kwargs)
  96. elapsed = time.time() - start
  97. # 保存模型输出(可视化与结构化)
  98. saved_files = []
  99. for i, res in enumerate(outputs):
  100. # 子目录区分多结果
  101. # sub_dir = img_out_dir / f"res_{i:02d}"
  102. # sub_dir.mkdir(parents=True, exist_ok=True)
  103. # # 可视化与所有产物
  104. # res.save_all(save_path=sub_dir.as_posix())
  105. # saved_files.append(sub_dir.as_posix())
  106. res.save_all(save_path=output_base.as_posix())
  107. saved_files.append(output_base.as_posix())
  108. results_summary.append({
  109. "image_path": img_path,
  110. "success": True,
  111. "model_name": model_name,
  112. "device": device,
  113. "batch_size": batch_size,
  114. "layout_nms": layout_nms,
  115. "time_sec": elapsed,
  116. "saved_paths": saved_files
  117. })
  118. pbar.update(1)
  119. pbar.set_postfix(time=f"{elapsed:.2f}s", ok=len([r for r in results_summary if r['success']]))
  120. except Exception as e:
  121. elapsed = time.time() - start
  122. traceback.print_exc()
  123. results_summary.append({
  124. "image_path": img_path,
  125. "success": False,
  126. "model_name": model_name,
  127. "device": device,
  128. "batch_size": batch_size,
  129. "layout_nms": layout_nms,
  130. "time_sec": elapsed,
  131. "error": str(e)
  132. })
  133. pbar.update(1)
  134. pbar.set_postfix_str("error")
  135. return results_summary
  136. def save_summary(summary: List[Dict[str, Any]], output_dir: str, model_name: str):
  137. out_dir = Path(output_dir).resolve()
  138. out_dir.mkdir(parents=True, exist_ok=True)
  139. stats = {
  140. "model_name": model_name,
  141. "total": len(summary),
  142. "success": sum(1 for r in summary if r.get("success")),
  143. "failed": sum(1 for r in summary if not r.get("success")),
  144. "avg_time": (sum(r.get("time_sec", 0) for r in summary) / len(summary)) if summary else 0,
  145. "timestamp": time.strftime("%Y-%m-%d %H:%M:%S"),
  146. }
  147. final = {"stats": stats, "results": summary}
  148. out_file = out_dir / f"{model_name}_results.json"
  149. with open(out_file, "w", encoding="utf-8") as f:
  150. json.dump(final, f, ensure_ascii=False, indent=2)
  151. print(f"💾 Summary saved to: {out_file}")
  152. def main():
  153. parser = argparse.ArgumentParser(description="Run any single PaddleX model on images/PDFs (similar to ppstructurev3_single_process.py)")
  154. # 输入源(与 ppstructurev3_single_process 一致)
  155. group = parser.add_mutually_exclusive_group(required=True)
  156. group.add_argument("--input_file", type=str, help="单个文件(图片或PDF)")
  157. group.add_argument("--input_dir", type=str, help="目录(扫描图片或PDF)")
  158. group.add_argument("--input_file_list", type=str, help="文件列表(每行一个路径)")
  159. group.add_argument("--input_csv", type=str, help="CSV,含 image_path 与 status 列")
  160. parser.add_argument("--model_name", type=str, required=True, help="要运行的模型名,如 PP-OCRv5_server_det / PP-DocLayout_plus-L / SLANeXt_wireless 等")
  161. parser.add_argument("--output_dir", type=str, required=True, help="输出目录")
  162. parser.add_argument("--device", type=str, default="gpu:0", help="设备,如 gpu:0 或 cpu")
  163. parser.add_argument("--pdf_dpi", type=int, default=200, help="PDF 转图像的 DPI")
  164. parser.add_argument("--batch_size", type=int, default=1, help="预测 batch size(多数单图模型支持)")
  165. parser.add_argument("--no_layout_nms", action="store_true", help="关闭 layout_nms(若模型支持)")
  166. parser.add_argument("--query", type=str, default="请将图表转换为表格格式", help="仅对需要字典输入的模型生效,如 PP-Chart2Table")
  167. parser.add_argument("--test_mode", action="store_true", help="仅处理前 20 个文件")
  168. args = parser.parse_args()
  169. # 复用 ppstructurev3_utils 的文件收集能力(含PDF转图像)
  170. class DummyArgs:
  171. input_file = args.input_file
  172. input_dir = args.input_dir
  173. input_file_list = args.input_file_list
  174. input_csv = args.input_csv
  175. output_dir = args.output_dir
  176. pdf_dpi = args.pdf_dpi
  177. test_mode = args.test_mode
  178. input_files = get_input_files(DummyArgs)
  179. if not input_files:
  180. print("❌ No input files found.")
  181. return 1
  182. if args.test_mode:
  183. input_files = input_files[:20]
  184. print(f"Test mode: {len(input_files)} files")
  185. print(f"🚀 Model: {args.model_name} | Device: {args.device} | Files: {len(input_files)}")
  186. summary = predict_on_images(
  187. model_name=args.model_name,
  188. image_paths=input_files,
  189. output_dir=args.output_dir,
  190. device=args.device,
  191. batch_size=args.batch_size,
  192. layout_nms=not args.no_layout_nms,
  193. query=args.query
  194. )
  195. save_summary(summary, args.output_dir, args.model_name)
  196. return 0
  197. if __name__ == "__main__":
  198. # 无参数示例(便于快速体验)
  199. if len(sys.argv) == 1:
  200. model_name = "RT-DETR-L_wired_table_cell_det"
  201. # demo = {
  202. # "--model_name": model_name,
  203. # "--input_dir": "/Users/zhch158/workspace/data/流水分析/A用户_单元格扫描流水.img",
  204. # "--output_dir": f"/Users/zhch158/workspace/data/流水分析/A用户_单元格扫描流水/{model_name}_Results",
  205. # "--device": "cpu",
  206. # }
  207. # model_name = "RT-DETR-L_wireless_table_cell_det"
  208. # demo = {
  209. # "--model_name": model_name,
  210. # "--input_dir": "/Users/zhch158/workspace/data/流水分析/B用户_扫描流水.img",
  211. # "--output_dir": f"/Users/zhch158/workspace/data/流水分析/B用户_扫描流水/{model_name}_Results",
  212. # "--device": "cpu",
  213. # }
  214. model_name = "SLANet_plus"
  215. demo = {
  216. "--model_name": model_name,
  217. "--input_dir": "/Users/zhch158/workspace/data/流水分析/B用户_扫描流水.img",
  218. "--output_dir": f"/Users/zhch158/workspace/data/流水分析/B用户_扫描流水/{model_name}_Results",
  219. "--device": "cpu",
  220. }
  221. sys.argv = [sys.argv[0]] + [kv for pair in demo.items() for kv in pair]
  222. print("ℹ️ No args provided. Running demo with:", demo)
  223. sys.exit(main())