ppstructurev3_single_process.py 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336
  1. """PDF转图像后统一处理"""
  2. import json
  3. import time
  4. import os
  5. import traceback
  6. import argparse
  7. import sys
  8. import warnings
  9. from pathlib import Path
  10. from typing import List, Dict, Any, Union
  11. import cv2
  12. import numpy as np
  13. # 抑制特定警告
  14. warnings.filterwarnings("ignore", message="To copy construct from a tensor")
  15. warnings.filterwarnings("ignore", message="Setting `pad_token_id`")
  16. warnings.filterwarnings("ignore", category=UserWarning, module="paddlex")
  17. from paddlex import create_pipeline
  18. from paddlex.utils.device import constr_device, parse_device
  19. from tqdm import tqdm
  20. from dotenv import load_dotenv
  21. load_dotenv(override=True)
  22. from utils import (
  23. collect_pid_files,
  24. )
  25. from ppstructurev3_utils import (
  26. get_input_files,
  27. convert_pruned_result_to_json,
  28. save_output_images,
  29. save_markdown_content
  30. )
  31. def process_images_unified(image_paths: List[str],
  32. pipeline_name: str = "PP-StructureV3",
  33. device: str = "gpu:0",
  34. output_dir: str = "./output",
  35. normalize_numbers: bool = True) -> List[Dict[str, Any]]:
  36. """
  37. 统一的图像处理函数,支持数字标准化
  38. """
  39. # 创建输出目录
  40. output_path = Path(output_dir)
  41. output_path.mkdir(parents=True, exist_ok=True)
  42. print(f"Initializing pipeline '{pipeline_name}' on device '{device}'...")
  43. try:
  44. # 设置环境变量以减少警告
  45. os.environ['PYTHONWARNINGS'] = 'ignore::UserWarning'
  46. # 初始化pipeline
  47. pipeline = create_pipeline(pipeline_name, device=device)
  48. print(f"Pipeline initialized successfully on {device}")
  49. except Exception as e:
  50. print(f"Failed to initialize pipeline: {e}", file=sys.stderr)
  51. traceback.print_exc()
  52. return []
  53. all_results = []
  54. total_images = len(image_paths)
  55. print(f"Processing {total_images} images one by one")
  56. print(f"🔧 数字标准化: {'启用' if normalize_numbers else '禁用'}")
  57. # 使用tqdm显示进度
  58. with tqdm(total=total_images, desc="Processing images", unit="img",
  59. bar_format='{l_bar}{bar}| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, {rate_fmt}]') as pbar:
  60. # 逐个处理图像
  61. for img_path in image_paths:
  62. start_time = time.time()
  63. try:
  64. # 使用pipeline预测单个图像
  65. results = pipeline.predict(
  66. img_path,
  67. use_doc_orientation_classify=True,
  68. use_doc_unwarping=False,
  69. use_seal_recognition=True,
  70. use_table_recognition=True,
  71. use_formula_recognition=False,
  72. use_chart_recognition=True,
  73. )
  74. processing_time = time.time() - start_time
  75. # 处理结果
  76. for idx, result in enumerate(results):
  77. if idx > 0:
  78. raise ValueError("Multiple results found for a single image")
  79. try:
  80. input_path = Path(result["input_path"])
  81. # 生成输出文件名
  82. if result.get("page_index") is not None:
  83. output_filename = f"{input_path.stem}_{result['page_index']}"
  84. else:
  85. output_filename = f"{input_path.stem}"
  86. # 转换并保存标准JSON格式
  87. json_content = result.json['res']
  88. json_output_path, converted_json = convert_pruned_result_to_json(
  89. json_content,
  90. str(input_path),
  91. output_dir,
  92. output_filename,
  93. normalize_numbers=normalize_numbers
  94. )
  95. # 保存输出图像
  96. img_content = result.img
  97. saved_images = save_output_images(img_content, str(output_dir), output_filename)
  98. # 保存Markdown内容
  99. markdown_content = result.markdown
  100. md_output_path = save_markdown_content(
  101. markdown_content,
  102. output_dir,
  103. output_filename,
  104. normalize_numbers=normalize_numbers,
  105. key_text='markdown_texts',
  106. key_images='markdown_images'
  107. )
  108. # 记录处理结果
  109. all_results.append({
  110. "image_path": str(input_path),
  111. "processing_time": processing_time,
  112. "success": True,
  113. "device": device,
  114. "output_json": json_output_path,
  115. "output_md": md_output_path,
  116. "is_pdf_page": "_page_" in input_path.name, # 标记是否为PDF页面
  117. "processing_info": converted_json.get('processing_info', {})
  118. })
  119. except Exception as e:
  120. print(f"Error saving result for {result.get('input_path', 'unknown')}: {e}", file=sys.stderr)
  121. traceback.print_exc()
  122. all_results.append({
  123. "image_path": str(img_path),
  124. "processing_time": 0,
  125. "success": False,
  126. "device": device,
  127. "error": str(e)
  128. })
  129. # 更新进度条
  130. success_count = sum(1 for r in all_results if r.get('success', False))
  131. pbar.update(1)
  132. pbar.set_postfix({
  133. 'time': f"{processing_time:.2f}s",
  134. 'success': f"{success_count}/{len(all_results)}",
  135. 'rate': f"{success_count/len(all_results)*100:.1f}%"
  136. })
  137. except Exception as e:
  138. print(f"Error processing {Path(img_path).name}: {e}", file=sys.stderr)
  139. traceback.print_exc()
  140. # 添加错误结果
  141. all_results.append({
  142. "image_path": str(img_path),
  143. "processing_time": 0,
  144. "success": False,
  145. "device": device,
  146. "error": str(e)
  147. })
  148. pbar.update(1)
  149. return all_results
  150. def main():
  151. """主函数"""
  152. parser = argparse.ArgumentParser(description="PaddleX PP-StructureV3 Unified PDF/Image Processor")
  153. # 参数定义
  154. input_group = parser.add_mutually_exclusive_group(required=True)
  155. input_group.add_argument("--input_file", type=str, help="Input file (supports both PDF and image file)")
  156. input_group.add_argument("--input_dir", type=str, help="Input directory (supports both PDF and image files)")
  157. input_group.add_argument("--input_file_list", type=str, help="Input file list (one file per line)")
  158. input_group.add_argument("--input_csv", type=str, help="Input CSV file with image_path and status columns")
  159. parser.add_argument("--output_dir", type=str, required=True, help="Output directory")
  160. parser.add_argument("--pipeline", type=str, default="PP-StructureV3", help="Pipeline name")
  161. parser.add_argument("--device", type=str, default="gpu:0", help="Device string (e.g., 'gpu:0', 'cpu')")
  162. parser.add_argument("--pdf_dpi", type=int, default=200, help="DPI for PDF to image conversion")
  163. parser.add_argument("--no-normalize", action="store_true", help="禁用数字标准化")
  164. parser.add_argument("--test_mode", action="store_true", help="Test mode (process only 20 files)")
  165. parser.add_argument("--collect_results", type=str, help="收集处理结果到指定CSV文件")
  166. args = parser.parse_args()
  167. normalize_numbers = not args.no_normalize
  168. try:
  169. # 获取并预处理输入文件
  170. print("🔄 Preprocessing input files...")
  171. input_files = get_input_files(args)
  172. if not input_files:
  173. print("❌ No input files found or processed")
  174. return 1
  175. if args.test_mode:
  176. input_files = input_files[:20]
  177. print(f"Test mode: processing only {len(input_files)} images")
  178. print(f"Using device: {args.device}")
  179. # 开始处理
  180. start_time = time.time()
  181. results = process_images_unified(
  182. input_files,
  183. args.pipeline,
  184. args.device,
  185. args.output_dir,
  186. normalize_numbers=normalize_numbers
  187. )
  188. total_time = time.time() - start_time
  189. # 统计结果
  190. success_count = sum(1 for r in results if r.get('success', False))
  191. error_count = len(results) - success_count
  192. pdf_page_count = sum(1 for r in results if r.get('is_pdf_page', False))
  193. total_changes = sum(r.get('processing_info', {}).get('character_changes_count', 0) for r in results if 'processing_info' in r)
  194. print(f"\n" + "="*60)
  195. print(f"✅ Processing completed!")
  196. print(f"📊 Statistics:")
  197. print(f" Total files processed: {len(input_files)}")
  198. print(f" PDF pages processed: {pdf_page_count}")
  199. print(f" Regular images processed: {len(input_files) - pdf_page_count}")
  200. print(f" Successful: {success_count}")
  201. print(f" Failed: {error_count}")
  202. if len(input_files) > 0:
  203. print(f" Success rate: {success_count / len(input_files) * 100:.2f}%")
  204. if normalize_numbers:
  205. print(f" 总标准化字符数: {total_changes}")
  206. print(f"⏱️ Performance:")
  207. print(f" Total time: {total_time:.2f} seconds")
  208. if total_time > 0:
  209. print(f" Throughput: {len(input_files) / total_time:.2f} files/second")
  210. print(f" Avg time per file: {total_time / len(input_files):.2f} seconds")
  211. # 保存结果统计
  212. stats = {
  213. "total_files": len(input_files),
  214. "pdf_pages": pdf_page_count,
  215. "regular_images": len(input_files) - pdf_page_count,
  216. "success_count": success_count,
  217. "error_count": error_count,
  218. "success_rate": success_count / len(input_files) if len(input_files) > 0 else 0,
  219. "total_time": total_time,
  220. "throughput": len(input_files) / total_time if total_time > 0 else 0,
  221. "avg_time_per_file": total_time / len(input_files) if len(input_files) > 0 else 0,
  222. "device": args.device,
  223. "pipeline": args.pipeline,
  224. "pdf_dpi": args.pdf_dpi,
  225. "normalize_numbers": normalize_numbers,
  226. "total_character_changes": total_changes,
  227. "timestamp": time.strftime("%Y-%m-%d %H:%M:%S")
  228. }
  229. # 保存最终结果
  230. output_file_name = Path(args.output_dir).name
  231. output_file = os.path.join(args.output_dir, f"{output_file_name}_unified.json")
  232. final_results = {
  233. "stats": stats,
  234. "results": results
  235. }
  236. with open(output_file, 'w', encoding='utf-8') as f:
  237. json.dump(final_results, f, ensure_ascii=False, indent=2)
  238. print(f"💾 Results saved to: {output_file}")
  239. # 如果没有收集结果的路径,使用缺省文件名,和output_dir同一路径
  240. if not args.collect_results:
  241. output_file_processed = Path(args.output_dir) / f"processed_files_{time.strftime('%Y%m%d_%H%M%S')}.csv"
  242. else:
  243. output_file_processed = Path(args.collect_results).resolve()
  244. processed_files = collect_pid_files(output_file)
  245. with open(output_file_processed, 'w', encoding='utf-8') as f:
  246. f.write("image_path,status\n")
  247. for file_path, status in processed_files:
  248. f.write(f"{file_path},{status}\n")
  249. print(f"💾 Processed files saved to: {output_file_processed}")
  250. return 0
  251. except Exception as e:
  252. print(f"❌ Processing failed: {e}", file=sys.stderr)
  253. traceback.print_exc()
  254. return 1
  255. if __name__ == "__main__":
  256. print(f"🚀 启动统一PDF/图像处理程序...")
  257. print(f"🔧 CUDA_VISIBLE_DEVICES: {os.environ.get('CUDA_VISIBLE_DEVICES', 'Not set')}")
  258. if len(sys.argv) == 1:
  259. # 如果没有命令行参数,使用默认配置运行
  260. print("ℹ️ No command line arguments provided. Running with default configuration...")
  261. # 默认配置
  262. default_config = {
  263. # "input_file": "/home/ubuntu/zhch/data/至远彩色印刷工业有限公司/2023年度报告母公司.pdf",
  264. "input_file": "/home/ubuntu/zhch/data/至远彩色印刷工业有限公司/PPStructureV3_Results/2023年度报告母公司/2023年度报告母公司_page_027.png",
  265. "output_dir": "/home/ubuntu/zhch/data/至远彩色印刷工业有限公司/PPStructureV3_Results",
  266. "collect_results": f"/home/ubuntu/zhch/data/至远彩色印刷工业有限公司/PPStructureV3_Results/processed_files_{time.strftime('%Y%m%d_%H%M%S')}.csv",
  267. # "input_dir": "../../OmniDocBench/OpenDataLab___OmniDocBench/images",
  268. # "output_dir": "./OmniDocBench_PPStructureV3_Results",
  269. # "collect_results": f"./OmniDocBench_PPStructureV3_Results/processed_files_{time.strftime('%Y%m%d_%H%M%S')}.csv",
  270. "pipeline": "./my_config/PP-StructureV3.yaml",
  271. "device": "gpu:3",
  272. }
  273. # 构造参数
  274. sys.argv = [sys.argv[0]]
  275. for key, value in default_config.items():
  276. sys.argv.extend([f"--{key}", str(value)])
  277. # 可以添加禁用标准化选项
  278. # sys.argv.append("--no-normalize")
  279. # 测试模式
  280. # sys.argv.append("--test_mode")
  281. sys.exit(main())