| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401 |
- """PDF转图像后统一处理"""
- import json
- import time
- import os
- import traceback
- import argparse
- import sys
- import warnings
- from pathlib import Path
- from typing import List, Dict, Any, Union
- import cv2
- import numpy as np
- # 抑制特定警告
- warnings.filterwarnings("ignore", message="To copy construct from a tensor")
- warnings.filterwarnings("ignore", message="Setting `pad_token_id`")
- warnings.filterwarnings("ignore", category=UserWarning, module="paddlex")
- from paddlex import create_pipeline
- from paddlex.utils.device import constr_device, parse_device
- from tqdm import tqdm
- from dotenv import load_dotenv
- load_dotenv(override=True)
- from utils import (
- collect_pid_files,
- get_input_files,
- )
- from ppstructurev3_utils import (
- convert_pruned_result_to_json,
- save_output_images,
- save_markdown_content
- )
- # 🎯 新增:导入适配器
- from adapters import (
- apply_table_recognition_adapter,
- restore_original_function,
- apply_enhanced_doc_preprocessor,
- restore_paddlex_doc_preprocessor
- )
- def process_images_unified(image_paths: List[str],
- pipeline_name: str = "PP-StructureV3",
- device: str = "gpu:0",
- output_dir: str = "./output",
- normalize_numbers: bool = True,
- use_enhanced_adapter: bool = True,
- **kwargs) -> List[Dict[str, Any]]: # 🎯 新增 **kwargs
- """
- 统一的图像处理函数,支持数字标准化和多种 pipeline
- """
- # 创建输出目录
- output_path = Path(output_dir)
- output_path.mkdir(parents=True, exist_ok=True)
-
- # 🎯 应用适配器
- adapter_applied = False
- if use_enhanced_adapter:
- adapter_applied = apply_table_recognition_adapter() and apply_enhanced_doc_preprocessor()
- if adapter_applied:
- print("🎯 Enhanced table recognition adapter activated and document preprocessor applied")
- else:
- print("⚠️ Failed to apply adapter, using original implementation")
-
- print(f"Initializing pipeline '{pipeline_name}' on device '{device}'...")
-
- try:
- # 设置环境变量以减少警告
- os.environ['PYTHONWARNINGS'] = 'ignore::UserWarning'
-
- # 初始化pipeline
- pipeline = create_pipeline(pipeline_name, device=device)
- print(f"Pipeline initialized successfully on {device}")
-
- except Exception as e:
- print(f"Failed to initialize pipeline: {e}", file=sys.stderr)
- traceback.print_exc()
- if adapter_applied:
- restore_original_function()
- restore_paddlex_doc_preprocessor()
- return []
-
- try:
- all_results = []
- total_images = len(image_paths)
-
- print(f"Processing {total_images} images one by one")
- print(f"🔧 数字标准化: {'启用' if normalize_numbers else '禁用'}")
- print(f"🎯 增强适配器: {'启用' if adapter_applied else '禁用'}")
-
- # 🎯 检测 pipeline 类型
- is_paddleocr_vl = 'PaddleOCR-VL'.lower() in str(pipeline_name).lower()
-
- # 使用tqdm显示进度
- with tqdm(total=total_images, desc="Processing images", unit="img",
- bar_format='{l_bar}{bar}| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, {rate_fmt}]') as pbar:
-
- # 逐个处理图像
- for img_path in image_paths:
- start_time = time.time()
-
- try:
- # 🎯 根据 pipeline 类型使用不同的参数
- if is_paddleocr_vl:
- # PaddleOCR-VL 使用驼峰命名
- predict_kwargs = {
- 'input': img_path,
- 'useLayoutDetection': kwargs.get('use_layout_detection', False),
- 'useDocOrientationClassify': kwargs.get('use_doc_orientation', False),
- 'useDocUnwarping': kwargs.get('use_doc_unwarping', False),
- }
- else:
- # PP-StructureV3 使用下划线命名
- predict_kwargs = {
- 'input': img_path,
- 'use_doc_orientation_classify': kwargs.get('use_doc_orientation', True), # 流水分析场景关闭方向分类
- 'use_doc_unwarping': kwargs.get('use_doc_unwarping', False),
- 'use_layout_detection': kwargs.get('use_layout_detection', True),
- 'use_seal_recognition': kwargs.get('use_seal_recognition', True),
- 'use_table_recognition': kwargs.get('use_table_recognition', True),
- 'use_formula_recognition': kwargs.get('use_formula_recognition', False),
- 'use_chart_recognition': kwargs.get('use_chart_recognition', True),
- 'use_ocr_results_with_table_cells': kwargs.get('use_ocr_results_with_table_cells', True),
- 'use_table_orientation_classify': kwargs.get('use_table_orientation_classify', False),
- 'use_wired_table_cells_trans_to_html': kwargs.get('use_wired_table_cells_trans_to_html', True),
- 'use_wireless_table_cells_trans_to_html': kwargs.get('use_wireless_table_cells_trans_to_html', True),
- }
-
- # 使用pipeline预测
- results = pipeline.predict(**predict_kwargs)
-
- processing_time = time.time() - start_time
-
- # 处理结果
- for idx, result in enumerate(results):
- if idx > 0:
- raise ValueError("Multiple results found for a single image")
- try:
- input_path = Path(result["input_path"])
-
- # 生成输出文件名
- if result.get("page_index") is not None:
- output_filename = f"{input_path.stem}_{result['page_index']}"
- else:
- output_filename = f"{input_path.stem}"
-
- # 转换并保存标准JSON格式
- json_content = result.json['res']
- json_output_path, converted_json = convert_pruned_result_to_json(
- json_content,
- str(input_path),
- output_dir,
- output_filename,
- normalize_numbers=normalize_numbers
- )
- # 保存输出图像
- img_content = result.img
- saved_images = save_output_images(img_content, str(output_dir), output_filename)
- # 保存Markdown内容
- markdown_content = result.markdown
- md_output_path = save_markdown_content(
- markdown_content,
- output_dir,
- output_filename,
- normalize_numbers=normalize_numbers,
- key_text='markdown_texts',
- key_images='markdown_images',
- json_data=converted_json # 🎯 新增参数
- )
-
- # 记录处理结果
- all_results.append({
- "image_path": str(input_path),
- "processing_time": processing_time,
- "success": True,
- "device": device,
- "output_json": json_output_path,
- "output_md": md_output_path,
- "is_pdf_page": "_page_" in input_path.name,
- "processing_info": converted_json.get('processing_info', {})
- })
- except Exception as e:
- print(f"Error saving result for {result.get('input_path', 'unknown')}: {e}", file=sys.stderr)
- traceback.print_exc()
- all_results.append({
- "image_path": str(img_path),
- "processing_time": 0,
- "success": False,
- "device": device,
- "error": str(e)
- })
-
- # 更新进度条
- success_count = sum(1 for r in all_results if r.get('success', False))
-
- pbar.update(1)
- pbar.set_postfix({
- 'time': f"{processing_time:.2f}s",
- 'success': f"{success_count}/{len(all_results)}",
- 'rate': f"{success_count/len(all_results)*100:.1f}%"
- })
-
- except Exception as e:
- print(f"Error processing {Path(img_path).name}: {e}", file=sys.stderr)
- traceback.print_exc()
-
- # 添加错误结果
- all_results.append({
- "image_path": str(img_path),
- "processing_time": 0,
- "success": False,
- "device": device,
- "error": str(e)
- })
- pbar.update(1)
-
- return all_results
-
- finally:
- # 🎯 清理:恢复原始函数
- if adapter_applied:
- restore_original_function()
- restore_paddlex_doc_preprocessor()
- print("🔄 Original function restored")
- def main():
- """主函数"""
- parser = argparse.ArgumentParser(description="PaddleX Unified PDF/Image Processor")
-
- # 参数定义
- input_group = parser.add_mutually_exclusive_group(required=True)
- input_group.add_argument("--input_file", type=str, help="Input file (supports both PDF and image file)")
- input_group.add_argument("--input_dir", type=str, help="Input directory (supports both PDF and image files)")
- input_group.add_argument("--input_file_list", type=str, help="Input file list (one file per line)")
- input_group.add_argument("--input_csv", type=str, help="Input CSV file with image_path and status columns")
- parser.add_argument("--output_dir", type=str, required=True, help="Output directory")
- parser.add_argument("--pipeline", type=str, default="PP-StructureV3", help="Pipeline name")
- parser.add_argument("--device", type=str, default="gpu:0", help="Device string (e.g., 'gpu:0', 'cpu')")
- parser.add_argument("--pdf_dpi", type=int, default=200, help="DPI for PDF to image conversion")
- parser.add_argument("--no-normalize", action="store_true", help="禁用数字标准化")
- parser.add_argument("--test_mode", action="store_true", help="Test mode (process only 20 files)")
- parser.add_argument("--collect_results", type=str, help="收集处理结果到指定CSV文件")
- parser.add_argument("--no-adapter", action="store_true", help="禁用增强适配器") # 🎯 新增参数
- args = parser.parse_args()
-
- normalize_numbers = not args.no_normalize
- use_enhanced_adapter = not args.no_adapter
-
- # 🎯 构建 predict 参数
- predict_kwargs = {}
-
- try:
- # 获取并预处理输入文件
- print("🔄 Preprocessing input files...")
- input_files = get_input_files(args)
-
- if not input_files:
- print("❌ No input files found or processed")
- return 1
-
- if args.test_mode:
- input_files = input_files[:20]
- print(f"Test mode: processing only {len(input_files)} images")
-
- print(f"Using device: {args.device}")
-
- # 开始处理
- start_time = time.time()
- results = process_images_unified(
- input_files,
- args.pipeline,
- args.device,
- args.output_dir,
- normalize_numbers=normalize_numbers,
- use_enhanced_adapter=use_enhanced_adapter,
- **predict_kwargs # 🎯 传递所有参数
- )
- total_time = time.time() - start_time
-
- # 统计结果
- success_count = sum(1 for r in results if r.get('success', False))
- error_count = len(results) - success_count
- pdf_page_count = sum(1 for r in results if r.get('is_pdf_page', False))
- total_changes = sum(r.get('processing_info', {}).get('character_changes_count', 0) for r in results if 'processing_info' in r)
-
- print(f"\n" + "="*60)
- print(f"✅ Processing completed!")
- print(f"📊 Statistics:")
- print(f" Total files processed: {len(input_files)}")
- print(f" PDF pages processed: {pdf_page_count}")
- print(f" Regular images processed: {len(input_files) - pdf_page_count}")
- print(f" Successful: {success_count}")
- print(f" Failed: {error_count}")
- if len(input_files) > 0:
- print(f" Success rate: {success_count / len(input_files) * 100:.2f}%")
- if normalize_numbers:
- print(f" 总标准化字符数: {total_changes}")
- print(f"⏱️ Performance:")
- print(f" Total time: {total_time:.2f} seconds")
- if total_time > 0:
- print(f" Throughput: {len(input_files) / total_time:.2f} files/second")
- print(f" Avg time per file: {total_time / len(input_files):.2f} seconds")
-
- # 保存结果统计
- stats = {
- "total_files": len(input_files),
- "pdf_pages": pdf_page_count,
- "regular_images": len(input_files) - pdf_page_count,
- "success_count": success_count,
- "error_count": error_count,
- "success_rate": success_count / len(input_files) if len(input_files) > 0 else 0,
- "total_time": total_time,
- "throughput": len(input_files) / total_time if total_time > 0 else 0,
- "avg_time_per_file": total_time / len(input_files) if len(input_files) > 0 else 0,
- "device": args.device,
- "pipeline": args.pipeline,
- "pdf_dpi": args.pdf_dpi,
- "normalize_numbers": normalize_numbers,
- "total_character_changes": total_changes,
- "timestamp": time.strftime("%Y-%m-%d %H:%M:%S")
- }
-
- # 保存最终结果
- output_file_name = Path(args.output_dir).name
- output_file = os.path.join(args.output_dir, f"{output_file_name}_unified.json")
- final_results = {
- "stats": stats,
- "results": results
- }
-
- with open(output_file, 'w', encoding='utf-8') as f:
- json.dump(final_results, f, ensure_ascii=False, indent=2)
-
- print(f"💾 Results saved to: {output_file}")
- # 如果没有收集结果的路径,使用缺省文件名,和output_dir同一路径
- if not args.collect_results:
- output_file_processed = Path(args.output_dir) / f"processed_files_{time.strftime('%Y%m%d_%H%M%S')}.csv"
- else:
- output_file_processed = Path(args.collect_results).resolve()
- processed_files = collect_pid_files(output_file)
- with open(output_file_processed, 'w', encoding='utf-8') as f:
- f.write("image_path,status\n")
- for file_path, status in processed_files:
- f.write(f"{file_path},{status}\n")
- print(f"💾 Processed files saved to: {output_file_processed}")
- return 0
-
- except Exception as e:
- print(f"❌ Processing failed: {e}", file=sys.stderr)
- traceback.print_exc()
- return 1
- if __name__ == "__main__":
- print(f"🚀 启动统一PDF/图像处理程序...")
- print(f"🔧 CUDA_VISIBLE_DEVICES: {os.environ.get('CUDA_VISIBLE_DEVICES', 'Not set')}")
-
- if len(sys.argv) == 1:
- # 如果没有命令行参数,使用默认配置运行
- print("ℹ️ No command line arguments provided. Running with default configuration...")
-
- # 默认配置
- default_config = {
- # "input_file": "/home/ubuntu/zhch/data/至远彩色印刷工业有限公司/2023年度报告母公司.pdf",
- # "input_file": "/home/ubuntu/zhch/data/至远彩色印刷工业有限公司/PPStructureV3_Results/2023年度报告母公司/2023年度报告母公司_page_003.png",
- # "output_dir": "/home/ubuntu/zhch/data/至远彩色印刷工业有限公司/PPStructureV3_Results",
- # "collect_results": f"/home/ubuntu/zhch/data/至远彩色印刷工业有限公司/PPStructureV3_Results/processed_files_{time.strftime('%Y%m%d_%H%M%S')}.csv",
- "input_file": "/Users/zhch158/workspace/data/至远彩色印刷工业有限公司/2023年度报告母公司.pdf",
- # "input_file": "/Users/zhch158/workspace/data/至远彩色印刷工业有限公司/data_PPStructureV3_Results/2023年度报告母公司/2023年度报告母公司_page_003.png",
- "output_dir": "/Users/zhch158/workspace/data/至远彩色印刷工业有限公司/data_PPStructureV3_Results",
- "collect_results": f"/Users/zhch158/workspace/data/至远彩色印刷工业有限公司/data_PPStructureV3_Results/processed_files_{time.strftime('%Y%m%d_%H%M%S')}.csv",
- # "input_dir": "../../OmniDocBench/OpenDataLab___OmniDocBench/images",
- # "output_dir": "./OmniDocBench_PPStructureV3_Results",
- # "collect_results": f"./OmniDocBench_PPStructureV3_Results/processed_files_{time.strftime('%Y%m%d_%H%M%S')}.csv",
- "pipeline": "./my_config/PP-StructureV3-zhch.yaml",
- "device": "cpu",
- }
-
- # 构造参数
- sys.argv = [sys.argv[0]]
- for key, value in default_config.items():
- sys.argv.extend([f"--{key}", str(value)])
-
- # 可以添加禁用标准化选项
- # sys.argv.append("--no-normalize")
-
- # 测试模式
- # sys.argv.append("--test_mode")
-
- sys.exit(main())
|