| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560 |
- #!/usr/bin/env python3
- """
- 批量处理图片/PDF文件并生成符合评测要求的预测结果(DotsOCR版本)
- 根据 OmniDocBench 评测要求:
- - 输入:支持 PDF 和各种图片格式(统一使用 --input 参数)
- - 输出:每个文件对应的 .md、.json 和带标注的 layout 图片文件
- - 调用方式:通过 DotsOCR vLLM 服务器处理
- 使用方法:
- python main.py --input document.pdf --output_dir ./output
- python main.py --input ./images/ --output_dir ./output
- python main.py --input file_list.txt --output_dir ./output
- python main.py --input results.csv --output_dir ./output --dry_run
- """
- import os
- import sys
- import json
- import time
- import traceback
- from pathlib import Path
- from typing import List, Dict, Any
- from tqdm import tqdm
- import argparse
- from loguru import logger
- # 导入 ocr_utils
- ocr_platform_root = Path(__file__).parents[2]
- if str(ocr_platform_root) not in sys.path:
- sys.path.insert(0, str(ocr_platform_root))
- from ocr_utils import (
- get_input_files,
- collect_pid_files,
- setup_logging
- )
- # 导入处理器
- try:
- from .processor import DotsOCRProcessor
- except ImportError:
- from processor import DotsOCRProcessor
- # 导入 dots.ocr 相关模块
- from dots_ocr.utils import dict_promptmode_to_prompt
- from dots_ocr.utils.consts import MIN_PIXELS, MAX_PIXELS
- def process_images_single_process(
- image_paths: List[str],
- processor: DotsOCRProcessor,
- batch_size: int = 1,
- output_dir: str = "./output"
- ) -> List[Dict[str, Any]]:
- """
- 单进程版本的图像处理函数
-
- Args:
- image_paths: 图像文件路径列表
- processor: DotsOCR处理器实例
- batch_size: 批处理大小
- output_dir: 输出目录
-
- Returns:
- 处理结果列表
- """
- # 创建输出目录
- output_path = Path(output_dir)
- output_path.mkdir(parents=True, exist_ok=True)
-
- all_results = []
- total_images = len(image_paths)
-
- logger.info(f"Processing {total_images} images with batch size {batch_size}")
-
- with tqdm(total=total_images, desc="Processing images", unit="img",
- bar_format='{l_bar}{bar}| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, {rate_fmt}]') as pbar:
-
- for i in range(0, total_images, batch_size):
- batch = image_paths[i:i + batch_size]
- batch_start_time = time.time()
- batch_results = []
-
- try:
- for image_path in batch:
- try:
- result = processor.process_single_image(image_path, output_dir)
- batch_results.append(result)
- except Exception as e:
- logger.error(f"Error processing {image_path}: {e}")
- batch_results.append({
- "image_path": image_path,
- "processing_time": 0,
- "success": False,
- "device": f"{processor.ip}:{processor.port}",
- "error": str(e)
- })
-
- batch_processing_time = time.time() - batch_start_time
- all_results.extend(batch_results)
-
- # 更新进度条
- success_count = sum(1 for r in batch_results if r.get('success', False))
- skipped_count = sum(1 for r in batch_results if r.get('skipped', False))
- total_success = sum(1 for r in all_results if r.get('success', False))
- total_skipped = sum(1 for r in all_results if r.get('skipped', False))
- avg_time = batch_processing_time / len(batch) if len(batch) > 0 else 0
-
- pbar.update(len(batch))
- pbar.set_postfix({
- 'batch_time': f"{batch_processing_time:.2f}s",
- 'avg_time': f"{avg_time:.2f}s/img",
- 'success': f"{total_success}/{len(all_results)}",
- 'skipped': f"{total_skipped}",
- 'rate': f"{total_success/len(all_results)*100:.1f}%" if len(all_results) > 0 else "0%"
- })
-
- except Exception as e:
- logger.error(f"Error processing batch {[Path(p).name for p in batch]}: {e}")
- error_results = []
- for img_path in batch:
- error_results.append({
- "image_path": str(img_path),
- "processing_time": 0,
- "success": False,
- "device": f"{processor.ip}:{processor.port}",
- "error": str(e)
- })
- all_results.extend(error_results)
- pbar.update(len(batch))
-
- return all_results
- def process_images_concurrent(
- image_paths: List[str],
- processor: DotsOCRProcessor,
- batch_size: int = 1,
- output_dir: str = "./output",
- max_workers: int = 3
- ) -> List[Dict[str, Any]]:
- """并发版本的图像处理函数"""
-
- from concurrent.futures import ThreadPoolExecutor, as_completed
-
- Path(output_dir).mkdir(parents=True, exist_ok=True)
-
- def process_batch(batch_images):
- """处理一批图像"""
- batch_results = []
- for image_path in batch_images:
- try:
- result = processor.process_single_image(image_path, output_dir)
- batch_results.append(result)
- except Exception as e:
- batch_results.append({
- "image_path": image_path,
- "processing_time": 0,
- "success": False,
- "device": f"{processor.ip}:{processor.port}",
- "error": str(e)
- })
- return batch_results
-
- # 将图像分批
- batches = [image_paths[i:i + batch_size] for i in range(0, len(image_paths), batch_size)]
-
- all_results = []
-
- with ThreadPoolExecutor(max_workers=max_workers) as executor:
- # 提交所有批次
- future_to_batch = {executor.submit(process_batch, batch): batch for batch in batches}
-
- # 使用 tqdm 显示进度
- with tqdm(total=len(image_paths), desc="Processing images") as pbar:
- for future in as_completed(future_to_batch):
- try:
- batch_results = future.result()
- all_results.extend(batch_results)
-
- # 更新进度
- success_count = sum(1 for r in batch_results if r.get('success', False))
- pbar.update(len(batch_results))
- pbar.set_postfix({'batch_success': f"{success_count}/{len(batch_results)}"})
-
- except Exception as e:
- batch = future_to_batch[future]
- # 为批次中的所有图像添加错误结果
- error_results = [
- {
- "image_path": img_path,
- "processing_time": 0,
- "success": False,
- "device": f"{processor.ip}:{processor.port}",
- "error": str(e)
- }
- for img_path in batch
- ]
- all_results.extend(error_results)
- pbar.update(len(batch))
-
- return all_results
- def main():
- """主函数"""
- parser = argparse.ArgumentParser(
- description="DotsOCR vLLM Batch Processing",
- formatter_class=argparse.RawDescriptionHelpFormatter,
- epilog="""
- 示例:
- # 处理单个PDF文件
- python main.py --input document.pdf --output_dir ./output
-
- # 处理图片目录
- python main.py --input ./images/ --output_dir ./output
-
- # 处理文件列表
- python main.py --input file_list.txt --output_dir ./output
-
- # 处理CSV文件(失败的文件)
- python main.py --input results.csv --output_dir ./output
-
- # 指定页面范围(仅PDF)
- python main.py --input document.pdf --output_dir ./output --pages "1-5,7"
-
- # 仅验证配置(dry run)
- python main.py --input document.pdf --output_dir ./output --dry_run
-
- # 使用 DEBUG 日志级别获取详细错误信息
- python main.py --input document.pdf --output_dir ./output --log_level DEBUG
- """
- )
-
- # 输入参数(统一使用 --input)
- parser.add_argument(
- "--input", "-i",
- required=True,
- type=str,
- help="输入路径(支持PDF文件、图片文件、图片目录、文件列表.txt、CSV文件)"
- )
-
- # 输出参数
- parser.add_argument(
- "--output_dir", "-o",
- type=str,
- required=True,
- help="输出目录"
- )
-
- # DotsOCR vLLM 参数
- parser.add_argument(
- "--ip",
- type=str,
- default="10.192.72.11",
- help="vLLM 服务器 IP"
- )
- parser.add_argument(
- "--port",
- type=int,
- default=8101,
- help="vLLM 服务器端口"
- )
- parser.add_argument(
- "--model_name",
- type=str,
- default="DotsOCR",
- help="模型名称"
- )
- parser.add_argument(
- "--prompt_mode",
- type=str,
- default="prompt_layout_all_en",
- choices=list(dict_promptmode_to_prompt.keys()),
- help="提示模式"
- )
- parser.add_argument(
- "--min_pixels",
- type=int,
- default=MIN_PIXELS,
- help="最小像素数"
- )
- parser.add_argument(
- "--max_pixels",
- type=int,
- default=MAX_PIXELS,
- help="最大像素数"
- )
- parser.add_argument(
- "--dpi",
- type=int,
- default=200,
- help="PDF 转图片的 DPI"
- )
- parser.add_argument(
- '--no-normalize',
- action='store_true',
- help='禁用数字标准化'
- )
-
- # 处理参数
- parser.add_argument(
- "--batch_size",
- type=int,
- default=1,
- help="Batch size"
- )
- parser.add_argument(
- "--pages", "-p",
- type=str,
- help="页面范围(PDF和图片目录有效),如: '1-5,7,9-12', '1-', '-10'"
- )
- parser.add_argument(
- "--collect_results",
- type=str,
- help="收集处理结果到指定CSV文件"
- )
-
- # 并发参数
- parser.add_argument(
- "--max_workers",
- type=int,
- default=3,
- help="Maximum number of concurrent workers (should match vLLM data-parallel-size)"
- )
- parser.add_argument(
- "--use_threading",
- action="store_true",
- help="Use multi-threading"
- )
-
- # 日志参数
- parser.add_argument(
- "--log_level",
- default="INFO",
- choices=["DEBUG", "INFO", "WARNING", "ERROR"],
- help="日志级别(默认: INFO)"
- )
- parser.add_argument(
- "--log_file",
- type=str,
- help="日志文件路径"
- )
-
- # Dry run 参数
- parser.add_argument(
- "--dry_run",
- action="store_true",
- help="仅验证配置和输入,不执行实际处理"
- )
-
- args = parser.parse_args()
-
- # 设置日志
- setup_logging(args.log_level, args.log_file)
-
- try:
- # 创建参数对象(用于 get_input_files)
- class Args:
- def __init__(self, input_path, output_dir, pdf_dpi):
- self.input = input_path
- self.output_dir = output_dir
- self.pdf_dpi = pdf_dpi
-
- args_obj = Args(args.input, args.output_dir, args.dpi)
-
- # 获取并预处理输入文件(页面范围过滤已在 get_input_files 中处理)
- logger.info("🔄 Preprocessing input files...")
- if args.pages:
- logger.info(f"📄 页面范围: {args.pages}")
- image_files = get_input_files(args_obj, page_range=args.pages)
-
- if not image_files:
- logger.error("❌ No input files found or processed")
- return 1
-
- output_dir = Path(args.output_dir).resolve()
- logger.info(f"📁 Output dir: {output_dir}")
- logger.info(f"📊 Found {len(image_files)} image files to process")
-
- # Dry run 模式
- if args.dry_run:
- logger.info("🔍 Dry run mode: 仅验证配置,不执行处理")
- logger.info(f"📋 配置信息:")
- logger.info(f" - 输入: {args.input}")
- logger.info(f" - 输出目录: {output_dir}")
- logger.info(f" - 服务器: {args.ip}:{args.port}")
- logger.info(f" - 模型: {args.model_name}")
- logger.info(f" - 提示模式: {args.prompt_mode}")
- logger.info(f" - 批次大小: {args.batch_size}")
- logger.info(f" - PDF DPI: {args.dpi}")
- logger.info(f" - 数字标准化: {not args.no_normalize}")
- logger.info(f" - 日志级别: {args.log_level}")
- if args.pages:
- logger.info(f" - 页面范围: {args.pages}")
- if args.use_threading:
- logger.info(f" - 并发工作数: {args.max_workers}")
- logger.info(f"📋 将要处理的文件 ({len(image_files)} 个):")
- for i, img_file in enumerate(image_files[:20], 1): # 只显示前20个
- logger.info(f" {i}. {img_file}")
- if len(image_files) > 20:
- logger.info(f" ... 还有 {len(image_files) - 20} 个文件")
- logger.info("✅ Dry run 完成:配置验证通过")
- return 0
-
- logger.info(f"🌐 Using server: {args.ip}:{args.port}")
- logger.info(f"📦 Batch size: {args.batch_size}")
- logger.info(f"🎯 Prompt mode: {args.prompt_mode}")
-
- # 创建处理器
- processor = DotsOCRProcessor(
- ip=args.ip,
- port=args.port,
- model_name=args.model_name,
- prompt_mode=args.prompt_mode,
- dpi=args.dpi,
- min_pixels=args.min_pixels,
- max_pixels=args.max_pixels,
- normalize_numbers=not args.no_normalize,
- log_level=args.log_level
- )
-
- # 开始处理
- start_time = time.time()
-
- # 选择处理方式
- if args.use_threading:
- results = process_images_concurrent(
- image_files,
- processor,
- args.batch_size,
- str(output_dir),
- args.max_workers
- )
- else:
- results = process_images_single_process(
- image_files,
- processor,
- args.batch_size,
- str(output_dir)
- )
-
- total_time = time.time() - start_time
-
- # 统计结果
- success_count = sum(1 for r in results if r.get('success', False))
- skipped_count = sum(1 for r in results if r.get('skipped', False))
- error_count = len(results) - success_count
- pdf_page_count = sum(1 for r in results if r.get('is_pdf_page', False))
-
- print(f"\n" + "="*60)
- print(f"✅ Processing completed!")
- print(f"📊 Statistics:")
- print(f" Total files processed: {len(image_files)}")
- print(f" PDF pages processed: {pdf_page_count}")
- print(f" Regular images processed: {len(image_files) - pdf_page_count}")
- print(f" Successful: {success_count}")
- print(f" Skipped: {skipped_count}")
- print(f" Failed: {error_count}")
- if len(image_files) > 0:
- print(f" Success rate: {success_count / len(image_files) * 100:.2f}%")
-
- print(f"⏱️ Performance:")
- print(f" Total time: {total_time:.2f} seconds")
- if total_time > 0:
- print(f" Throughput: {len(image_files) / total_time:.2f} images/second")
- print(f" Avg time per image: {total_time / len(image_files):.2f} seconds")
-
- print(f"\n📁 Output Structure:")
- print(f" output_dir/")
- print(f" ├── filename.md # Markdown content")
- print(f" ├── filename.json # Layout info JSON")
- print(f" └── filename_layout.jpg # Layout visualization")
- # 保存结果统计
- stats = {
- "total_files": len(image_files),
- "pdf_pages": pdf_page_count,
- "regular_images": len(image_files) - pdf_page_count,
- "success_count": success_count,
- "skipped_count": skipped_count,
- "error_count": error_count,
- "success_rate": success_count / len(image_files) if len(image_files) > 0 else 0,
- "total_time": total_time,
- "throughput": len(image_files) / total_time if total_time > 0 else 0,
- "avg_time_per_image": total_time / len(image_files) if len(image_files) > 0 else 0,
- "batch_size": args.batch_size,
- "server": f"{args.ip}:{args.port}",
- "model": args.model_name,
- "prompt_mode": args.prompt_mode,
- "pdf_dpi": args.dpi,
- "normalization_enabled": not args.no_normalize,
- "timestamp": time.strftime("%Y-%m-%d %H:%M:%S")
- }
-
- # 保存最终结果
- output_file_name = Path(output_dir).name
- output_file = output_dir / f"{output_file_name}_results.json"
- final_results = {
- "stats": stats,
- "results": results
- }
-
- with open(output_file, 'w', encoding='utf-8') as f:
- json.dump(final_results, f, ensure_ascii=False, indent=2)
-
- logger.info(f"💾 Results saved to: {output_file}")
- # 收集处理结果
- if not args.collect_results:
- output_file_processed = output_dir / f"processed_files_{time.strftime('%Y%m%d_%H%M%S')}.csv"
- else:
- output_file_processed = Path(args.collect_results).resolve()
-
- processed_files = collect_pid_files(str(output_file))
- with open(output_file_processed, 'w', encoding='utf-8') as f:
- f.write("image_path,status\n")
- for file_path, status in processed_files:
- f.write(f"{file_path},{status}\n")
- logger.info(f"💾 Processed files saved to: {output_file_processed}")
- return 0
-
- except Exception as e:
- logger.error(f"Processing failed: {e}")
- traceback.print_exc()
- return 1
- if __name__ == "__main__":
- logger.info(f"🚀 启动DotsOCR vLLM统一PDF/图像处理程序...")
- logger.info(f"🔧 CUDA_VISIBLE_DEVICES: {os.environ.get('CUDA_VISIBLE_DEVICES', 'Not set')}")
-
- if len(sys.argv) == 1:
- # 如果没有命令行参数,使用默认配置运行
- logger.info("ℹ️ No command line arguments provided. Running with default configuration...")
-
- # 默认配置
- default_config = {
- # "input": "/Users/zhch158/workspace/data/流水分析/马公账流水_工商银行.pdf",
- "input": "/Users/zhch158/workspace/repository.git/ocr_platform/ocr_tools/dots.ocr_vl_tool/output/processed_files_20251218_164332.csv",
- "output_dir": "./output",
- "ip": "10.192.72.11",
- "port": "8101",
- "model_name": "DotsOCR",
- "prompt_mode": "prompt_layout_all_en",
- "batch_size": "1",
- "dpi": "200",
- "pages": "-2",
- }
-
- # 构造参数
- sys.argv = [sys.argv[0]]
- for key, value in default_config.items():
- sys.argv.extend([f"--{key}", str(value)])
-
- sys.exit(main())
|