pipeline_v4.py 41 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001
  1. # Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. import base64
  15. import copy
  16. import json
  17. import os
  18. import re
  19. from typing import Any, Dict, List, Optional, Tuple, Union
  20. import numpy as np
  21. from ....utils import logging
  22. from ....utils.deps import (
  23. function_requires_deps,
  24. is_dep_available,
  25. pipeline_requires_extra,
  26. )
  27. from ....utils.file_interface import custom_open
  28. from ...common.batch_sampler import ImageBatchSampler
  29. from ...common.reader import ReadImage
  30. from ...utils.benchmark import benchmark
  31. from ...utils.hpi import HPIConfig
  32. from ...utils.pp_option import PaddlePredictorOption
  33. from ..components.chat_server import BaseChat
  34. from ..layout_parsing.result import LayoutParsingResult
  35. from .pipeline_base import PP_ChatOCR_Pipeline
  36. if is_dep_available("opencv-contrib-python"):
  37. import cv2
  38. @benchmark.time_methods
  39. @pipeline_requires_extra("ie")
  40. class PP_ChatOCRv4_Pipeline(PP_ChatOCR_Pipeline):
  41. """PP-ChatOCRv4 Pipeline"""
  42. entities = ["PP-ChatOCRv4-doc"]
  43. def __init__(
  44. self,
  45. config: Dict,
  46. device: str = None,
  47. pp_option: PaddlePredictorOption = None,
  48. use_hpip: bool = False,
  49. hpi_config: Optional[Union[Dict[str, Any], HPIConfig]] = None,
  50. initial_predictor: bool = True,
  51. ) -> None:
  52. """Initializes the pp-chatocrv3-doc pipeline.
  53. Args:
  54. config (Dict): Configuration dictionary containing various settings.
  55. device (str, optional): Device to run the predictions on. Defaults to None.
  56. pp_option (PaddlePredictorOption, optional): PaddlePredictor options. Defaults to None.
  57. use_hpip (bool, optional): Whether to use the high-performance
  58. inference plugin (HPIP) by default. Defaults to False.
  59. hpi_config (Optional[Union[Dict[str, Any], HPIConfig]], optional):
  60. The default high-performance inference configuration dictionary.
  61. Defaults to None.
  62. initial_predictor (bool, optional): Whether to initialize the predictor. Defaults to True.
  63. """
  64. super().__init__(
  65. device=device, pp_option=pp_option, use_hpip=use_hpip, hpi_config=hpi_config
  66. )
  67. self.pipeline_name = config["pipeline_name"]
  68. self.config = config
  69. self.use_layout_parser = config.get("use_layout_parser", True)
  70. self.use_mllm_predict = config.get("use_mllm_predict", True)
  71. self.layout_parsing_pipeline = None
  72. self.chat_bot = None
  73. self.retriever = None
  74. self.mllm_chat_bot = None
  75. if initial_predictor:
  76. self.inintial_visual_predictor(config)
  77. self.inintial_chat_predictor(config)
  78. self.inintial_retriever_predictor(config)
  79. self.inintial_mllm_predictor(config)
  80. self.batch_sampler = ImageBatchSampler(batch_size=1)
  81. self.img_reader = ReadImage(format="BGR")
  82. self.table_structure_len_max = 500
  83. def close(self):
  84. if self.layout_parsing_pipeline is not None:
  85. self.layout_parsing_pipeline.close()
  86. def inintial_visual_predictor(self, config: dict) -> None:
  87. """
  88. Initializes the visual predictor with the given configuration.
  89. Args:
  90. config (dict): The configuration dictionary containing the necessary
  91. parameters for initializing the predictor.
  92. Returns:
  93. None
  94. """
  95. self.use_layout_parser = config.get("use_layout_parser", True)
  96. if self.use_layout_parser:
  97. layout_parsing_config = config.get("SubPipelines", {}).get(
  98. "LayoutParser",
  99. {"pipeline_config_error": "config error for layout_parsing_pipeline!"},
  100. )
  101. self.layout_parsing_pipeline = self.create_pipeline(layout_parsing_config)
  102. return
  103. def inintial_retriever_predictor(self, config: dict) -> None:
  104. """
  105. Initializes the retriever predictor with the given configuration.
  106. Args:
  107. config (dict): The configuration dictionary containing the necessary
  108. parameters for initializing the predictor.
  109. Returns:
  110. None
  111. """
  112. from .. import create_retriever
  113. retriever_config = config.get("SubModules", {}).get(
  114. "LLM_Retriever",
  115. {"retriever_config_error": "config error for llm retriever!"},
  116. )
  117. self.retriever = create_retriever(retriever_config)
  118. def inintial_chat_predictor(self, config: dict) -> None:
  119. """
  120. Initializes the chat predictor with the given configuration.
  121. Args:
  122. config (dict): The configuration dictionary containing the necessary
  123. parameters for initializing the predictor.
  124. Returns:
  125. None
  126. """
  127. from .. import create_chat_bot
  128. chat_bot_config = config.get("SubModules", {}).get(
  129. "LLM_Chat",
  130. {"chat_bot_config_error": "config error for llm chat bot!"},
  131. )
  132. self.chat_bot = create_chat_bot(chat_bot_config)
  133. from .. import create_prompt_engineering
  134. text_pe_config = (
  135. config.get("SubModules", {})
  136. .get("PromptEngneering", {})
  137. .get(
  138. "KIE_CommonText",
  139. {"pe_config_error": "config error for text_pe!"},
  140. )
  141. )
  142. self.text_pe = create_prompt_engineering(text_pe_config)
  143. table_pe_config = (
  144. config.get("SubModules", {})
  145. .get("PromptEngneering", {})
  146. .get(
  147. "KIE_Table",
  148. {"pe_config_error": "config error for table_pe!"},
  149. )
  150. )
  151. self.table_pe = create_prompt_engineering(table_pe_config)
  152. return
  153. def inintial_mllm_predictor(self, config: dict) -> None:
  154. """
  155. Initializes the predictor with the given configuration.
  156. Args:
  157. config (dict): The configuration dictionary containing the necessary
  158. parameters for initializing the predictor.
  159. Returns:
  160. None
  161. """
  162. from .. import create_chat_bot, create_prompt_engineering
  163. self.use_mllm_predict = config.get("use_mllm_predict", True)
  164. if self.use_mllm_predict:
  165. mllm_chat_bot_config = config.get("SubModules", {}).get(
  166. "MLLM_Chat",
  167. {"mllm_chat_bot_config": "config error for mllm chat bot!"},
  168. )
  169. self.mllm_chat_bot = create_chat_bot(mllm_chat_bot_config)
  170. ensemble_pe_config = (
  171. config.get("SubModules", {})
  172. .get("PromptEngneering", {})
  173. .get(
  174. "Ensemble",
  175. {"pe_config_error": "config error for ensemble_pe!"},
  176. )
  177. )
  178. self.ensemble_pe = create_prompt_engineering(ensemble_pe_config)
  179. return
  180. def decode_visual_result(self, layout_parsing_result: LayoutParsingResult) -> dict:
  181. """
  182. Decodes the visual result from the layout parsing result.
  183. Args:
  184. layout_parsing_result (LayoutParsingResult): The result of layout parsing.
  185. Returns:
  186. dict: The decoded visual information.
  187. """
  188. normal_text_dict = {}
  189. parsing_res_list = layout_parsing_result["parsing_res_list"]
  190. for pno in range(len(parsing_res_list)):
  191. label = parsing_res_list[pno]["block_label"]
  192. content = parsing_res_list[pno]["block_content"]
  193. if label in ["table", "formula"]:
  194. continue
  195. key = f"words in {label}"
  196. if key not in normal_text_dict:
  197. normal_text_dict[key] = content
  198. else:
  199. normal_text_dict[key] += f"\n {content}"
  200. table_res_list = layout_parsing_result["table_res_list"]
  201. table_text_list = []
  202. table_html_list = []
  203. table_nei_text_list = []
  204. for table_res in table_res_list:
  205. table_html_list.append(table_res["pred_html"])
  206. single_table_text = " ".join(table_res["table_ocr_pred"]["rec_texts"])
  207. table_text_list.append(single_table_text)
  208. table_nei_text_list.append(table_res["neighbor_texts"])
  209. visual_info = {}
  210. visual_info["normal_text_dict"] = normal_text_dict
  211. visual_info["table_text_list"] = table_text_list
  212. visual_info["table_html_list"] = table_html_list
  213. visual_info["table_nei_text_list"] = table_nei_text_list
  214. return visual_info
  215. # Function to perform visual prediction on input images
  216. def visual_predict(
  217. self,
  218. input: Union[str, List[str], np.ndarray, List[np.ndarray]],
  219. use_doc_orientation_classify: Optional[bool] = None,
  220. use_doc_unwarping: Optional[bool] = None,
  221. use_textline_orientation: Optional[bool] = None,
  222. use_seal_recognition: Optional[bool] = None,
  223. use_table_recognition: Optional[bool] = None,
  224. layout_threshold: Optional[Union[float, dict]] = None,
  225. layout_nms: Optional[bool] = None,
  226. layout_unclip_ratio: Optional[Union[float, Tuple[float, float], dict]] = None,
  227. layout_merge_bboxes_mode: Optional[str] = None,
  228. text_det_limit_side_len: Optional[int] = None,
  229. text_det_limit_type: Optional[str] = None,
  230. text_det_thresh: Optional[float] = None,
  231. text_det_box_thresh: Optional[float] = None,
  232. text_det_unclip_ratio: Optional[float] = None,
  233. text_rec_score_thresh: Optional[float] = None,
  234. seal_det_limit_side_len: Optional[int] = None,
  235. seal_det_limit_type: Optional[str] = None,
  236. seal_det_thresh: Optional[float] = None,
  237. seal_det_box_thresh: Optional[float] = None,
  238. seal_det_unclip_ratio: Optional[float] = None,
  239. seal_rec_score_thresh: Optional[float] = None,
  240. **kwargs,
  241. ) -> dict:
  242. """
  243. This function takes an input image or a list of images and performs various visual
  244. prediction tasks such as document orientation classification, document unwarping,
  245. general OCR, seal recognition, and table recognition based on the provided flags.
  246. Args:
  247. input (Union[str, list[str], np.ndarray, list[np.ndarray]]): Input image path, list of image paths,
  248. numpy array of an image, or list of numpy arrays.
  249. use_doc_orientation_classify (bool): Flag to use document orientation classification.
  250. use_doc_unwarping (bool): Flag to use document unwarping.
  251. use_textline_orientation (Optional[bool]): Whether to use textline orientation prediction.
  252. use_seal_recognition (bool): Flag to use seal recognition.
  253. use_table_recognition (bool): Flag to use table recognition.
  254. layout_threshold (Optional[float]): The threshold value to filter out low-confidence predictions. Default is None.
  255. layout_nms (bool, optional): Whether to use layout-aware NMS. Defaults to False.
  256. layout_unclip_ratio (Optional[Union[float, Tuple[float, float]]], optional): The ratio of unclipping the bounding box.
  257. Defaults to None.
  258. If it's a single number, then both width and height are used.
  259. If it's a tuple of two numbers, then they are used separately for width and height respectively.
  260. If it's None, then no unclipping will be performed.
  261. layout_merge_bboxes_mode (Optional[str], optional): The mode for merging bounding boxes. Defaults to None.
  262. text_det_limit_side_len (Optional[int]): Maximum side length for text detection.
  263. text_det_limit_type (Optional[str]): Type of limit to apply for text detection.
  264. text_det_thresh (Optional[float]): Threshold for text detection.
  265. text_det_box_thresh (Optional[float]): Threshold for text detection boxes.
  266. text_det_unclip_ratio (Optional[float]): Ratio for unclipping text detection boxes.
  267. text_rec_score_thresh (Optional[float]): Score threshold for text recognition.
  268. seal_det_limit_side_len (Optional[int]): Maximum side length for seal detection.
  269. seal_det_limit_type (Optional[str]): Type of limit to apply for seal detection.
  270. seal_det_thresh (Optional[float]): Threshold for seal detection.
  271. seal_det_box_thresh (Optional[float]): Threshold for seal detection boxes.
  272. seal_det_unclip_ratio (Optional[float]): Ratio for unclipping seal detection boxes.
  273. seal_rec_score_thresh (Optional[float]): Score threshold for seal recognition.
  274. **kwargs: Additional keyword arguments.
  275. Returns:
  276. dict: A dictionary containing the layout parsing result and visual information.
  277. """
  278. if self.use_layout_parser == False:
  279. logging.error("The models for layout parser are not initialized.")
  280. yield {"error": "The models for layout parser are not initialized."}
  281. if self.layout_parsing_pipeline is None:
  282. logging.warning(
  283. "The layout parsing pipeline is not initialized, will initialize it now."
  284. )
  285. self.inintial_visual_predictor(self.config)
  286. for layout_parsing_result in self.layout_parsing_pipeline.predict(
  287. input,
  288. use_doc_orientation_classify=use_doc_orientation_classify,
  289. use_doc_unwarping=use_doc_unwarping,
  290. use_textline_orientation=use_textline_orientation,
  291. use_seal_recognition=use_seal_recognition,
  292. use_table_recognition=use_table_recognition,
  293. layout_threshold=layout_threshold,
  294. layout_nms=layout_nms,
  295. layout_unclip_ratio=layout_unclip_ratio,
  296. layout_merge_bboxes_mode=layout_merge_bboxes_mode,
  297. text_det_limit_side_len=text_det_limit_side_len,
  298. text_det_limit_type=text_det_limit_type,
  299. text_det_thresh=text_det_thresh,
  300. text_det_box_thresh=text_det_box_thresh,
  301. text_det_unclip_ratio=text_det_unclip_ratio,
  302. text_rec_score_thresh=text_rec_score_thresh,
  303. seal_det_box_thresh=seal_det_box_thresh,
  304. seal_det_limit_side_len=seal_det_limit_side_len,
  305. seal_det_limit_type=seal_det_limit_type,
  306. seal_det_thresh=seal_det_thresh,
  307. seal_det_unclip_ratio=seal_det_unclip_ratio,
  308. seal_rec_score_thresh=seal_rec_score_thresh,
  309. ):
  310. visual_info = self.decode_visual_result(layout_parsing_result)
  311. visual_predict_res = {
  312. "layout_parsing_result": layout_parsing_result,
  313. "visual_info": visual_info,
  314. }
  315. yield visual_predict_res
  316. def save_visual_info_list(self, visual_info: dict, save_path: str) -> None:
  317. """
  318. Save the visual info list to the specified file path.
  319. Args:
  320. visual_info (dict): The visual info result, which can be a single object or a list of objects.
  321. save_path (str): The file path to save the visual info list.
  322. Returns:
  323. None
  324. """
  325. if not isinstance(visual_info, list):
  326. visual_info_list = [visual_info]
  327. else:
  328. visual_info_list = visual_info
  329. with open(save_path, "w") as fout:
  330. fout.write(json.dumps(visual_info_list, ensure_ascii=False) + "\n")
  331. return
  332. def load_visual_info_list(self, data_path: str) -> List[dict]:
  333. """
  334. Loads visual info list from a JSON file.
  335. Args:
  336. data_path (str): The path to the JSON file containing visual info.
  337. Returns:
  338. list[dict]: A list of dict objects parsed from the JSON file.
  339. """
  340. with open(data_path, "r") as fin:
  341. data = fin.readline()
  342. visual_info_list = json.loads(data)
  343. return visual_info_list
  344. def merge_visual_info_list(
  345. self, visual_info_list: List[dict]
  346. ) -> Tuple[list, list, list, list]:
  347. """
  348. Merge visual info lists.
  349. Args:
  350. visual_info_list (list[dict]): A list of visual info results.
  351. Returns:
  352. tuple[list, list, list, list]: A tuple containing four lists, one for normal text dicts,
  353. one for table text lists, one for table HTML lists.
  354. one for table neighbor texts.
  355. """
  356. all_normal_text_list = []
  357. all_table_text_list = []
  358. all_table_html_list = []
  359. all_table_nei_text_list = []
  360. for single_visual_info in visual_info_list:
  361. normal_text_dict = single_visual_info["normal_text_dict"]
  362. for key in normal_text_dict:
  363. normal_text_dict[key] = normal_text_dict[key].replace("\n", "")
  364. table_text_list = single_visual_info["table_text_list"]
  365. table_html_list = single_visual_info["table_html_list"]
  366. table_nei_text_list = single_visual_info["table_nei_text_list"]
  367. all_normal_text_list.append(normal_text_dict)
  368. all_table_text_list.extend(table_text_list)
  369. all_table_html_list.extend(table_html_list)
  370. all_table_nei_text_list.extend(table_nei_text_list)
  371. return (
  372. all_normal_text_list,
  373. all_table_text_list,
  374. all_table_html_list,
  375. all_table_nei_text_list,
  376. )
  377. def build_vector(
  378. self,
  379. visual_info: dict,
  380. min_characters: int = 3500,
  381. block_size: int = 300,
  382. flag_save_bytes_vector: bool = False,
  383. retriever_config: dict = None,
  384. ) -> dict:
  385. """
  386. Build a vector representation from visual information.
  387. Args:
  388. visual_info (dict): The visual information input, can be a single instance or a list of instances.
  389. min_characters (int): The minimum number of characters required for text processing, defaults to 3500.
  390. block_size (int): The size of each chunk to split the text into.
  391. flag_save_bytes_vector (bool): Whether to save the vector as bytes, defaults to False.
  392. retriever_config (dict): The configuration for the retriever, defaults to None.
  393. Returns:
  394. dict: A dictionary containing the vector info and a flag indicating if the text is too short.
  395. """
  396. if not isinstance(visual_info, list):
  397. visual_info_list = [visual_info]
  398. else:
  399. visual_info_list = visual_info
  400. if retriever_config is not None:
  401. from .. import create_retriever
  402. retriever = create_retriever(retriever_config)
  403. else:
  404. if self.retriever is None:
  405. logging.warning(
  406. "The retriever is not initialized,will initialize it now."
  407. )
  408. self.inintial_retriever_predictor(self.config)
  409. retriever = self.retriever
  410. all_visual_info = self.merge_visual_info_list(visual_info_list)
  411. (
  412. all_normal_text_list,
  413. all_table_text_list,
  414. all_table_html_list,
  415. all_table_nei_text_list,
  416. ) = all_visual_info
  417. vector_info = {}
  418. all_items = []
  419. for i, normal_text_dict in enumerate(all_normal_text_list):
  420. for type, text in normal_text_dict.items():
  421. all_items += [f"{type}:{text}\n"]
  422. for table_html, table_text, table_nei_text in zip(
  423. all_table_html_list, all_table_text_list, all_table_nei_text_list
  424. ):
  425. if len(table_html) > min_characters - self.table_structure_len_max:
  426. all_items += [f"table:{table_text}\t{table_nei_text}"]
  427. all_text_str = "".join(all_items)
  428. vector_info["flag_save_bytes_vector"] = False
  429. if len(all_text_str) > min_characters:
  430. vector_info["flag_too_short_text"] = False
  431. vector_info["model_name"] = retriever.model_name
  432. vector_info["block_size"] = block_size
  433. vector_info["vector"] = retriever.generate_vector_database(
  434. all_items, block_size=block_size
  435. )
  436. if flag_save_bytes_vector:
  437. vector_info["vector"] = retriever.encode_vector_store_to_bytes(
  438. vector_info["vector"]
  439. )
  440. vector_info["flag_save_bytes_vector"] = True
  441. else:
  442. vector_info["flag_too_short_text"] = True
  443. vector_info["vector"] = all_items
  444. return vector_info
  445. def save_vector(
  446. self, vector_info: dict, save_path: str, retriever_config: dict = None
  447. ) -> None:
  448. directory = os.path.dirname(save_path)
  449. if not os.path.exists(directory):
  450. os.makedirs(directory)
  451. if retriever_config is not None:
  452. from .. import create_retriever
  453. retriever = create_retriever(retriever_config)
  454. else:
  455. if self.retriever is None:
  456. logging.warning(
  457. "The retriever is not initialized,will initialize it now."
  458. )
  459. self.inintial_retriever_predictor(self.config)
  460. retriever = self.retriever
  461. vector_info_data = copy.deepcopy(vector_info)
  462. if (
  463. not vector_info["flag_too_short_text"]
  464. and not vector_info["flag_save_bytes_vector"]
  465. ):
  466. vector_info_data["vector"] = retriever.encode_vector_store_to_bytes(
  467. vector_info_data["vector"]
  468. )
  469. vector_info_data["flag_save_bytes_vector"] = True
  470. with custom_open(save_path, "w") as fout:
  471. fout.write(json.dumps(vector_info_data, ensure_ascii=False) + "\n")
  472. return
  473. def load_vector(self, data_path: str, retriever_config: dict = None) -> dict:
  474. vector_info = None
  475. if retriever_config is not None:
  476. from .. import create_retriever
  477. retriever = create_retriever(retriever_config)
  478. else:
  479. if self.retriever is None:
  480. logging.warning(
  481. "The retriever is not initialized,will initialize it now."
  482. )
  483. self.inintial_retriever_predictor(self.config)
  484. retriever = self.retriever
  485. with open(data_path, "r") as fin:
  486. data = fin.readline()
  487. vector_info = json.loads(data)
  488. if (
  489. "flag_too_short_text" not in vector_info
  490. or "flag_save_bytes_vector" not in vector_info
  491. or "vector" not in vector_info
  492. ):
  493. logging.error("Invalid vector info.")
  494. return {"error": "Invalid vector info when load vector!"}
  495. if vector_info["flag_save_bytes_vector"]:
  496. vector_info["vector"] = retriever.decode_vector_store_from_bytes(
  497. vector_info["vector"]
  498. )
  499. vector_info["flag_save_bytes_vector"] = False
  500. return vector_info
  501. def format_key(self, key_list: Union[str, List[str]]) -> List[str]:
  502. """
  503. Formats the key list.
  504. Args:
  505. key_list (str|list[str]): A string or a list of strings representing the keys.
  506. Returns:
  507. list[str]: A list of formatted keys.
  508. """
  509. if key_list == "":
  510. return []
  511. if isinstance(key_list, list):
  512. key_list = [key.replace("\xa0", " ") for key in key_list]
  513. return key_list
  514. if isinstance(key_list, str):
  515. key_list = re.sub(r"[\t\n\r\f\v]", "", key_list)
  516. key_list = key_list.replace(",", ",").split(",")
  517. return key_list
  518. return []
  519. @function_requires_deps("opencv-contrib-python")
  520. def mllm_pred(
  521. self,
  522. input: Union[str, np.ndarray],
  523. key_list: Union[str, List[str]],
  524. mllm_chat_bot_config=None,
  525. ) -> dict:
  526. """
  527. Generates MLLM results based on the provided key list and input image.
  528. Args:
  529. input (Union[str, np.ndarray]): Input image path, or numpy array of an image.
  530. key_list (Union[str, list[str]]): A single key or a list of keys to extract information.
  531. chat_bot_config (dict): The parameters for LLM chatbot, including api_type, api_key... refer to config file for more details.
  532. Returns:
  533. dict: A dictionary containing the chat results.
  534. """
  535. if self.use_mllm_predict == False:
  536. logging.error("MLLM prediction is disabled.")
  537. return {"mllm_res": "Error:MLLM prediction is disabled!"}
  538. key_list = self.format_key(key_list)
  539. if len(key_list) == 0:
  540. return {"mllm_res": "Error:输入的key_list无效!"}
  541. if isinstance(input, list):
  542. logging.error("Input is a list, but it's not supported here.")
  543. return {"mllm_res": "Error:Input is a list, but it's not supported here!"}
  544. if isinstance(input, str) and input.endswith(".pdf"):
  545. logging.error("MLMM prediction does not support PDF currently!")
  546. return {"mllm_res": "Error:MLMM prediction does not support PDF currently!"}
  547. if self.mllm_chat_bot is None:
  548. logging.warning(
  549. "The MLLM chat bot is not initialized,will initialize it now."
  550. )
  551. self.inintial_mllm_predictor(self.config)
  552. if mllm_chat_bot_config is not None:
  553. from .. import create_chat_bot
  554. mllm_chat_bot = create_chat_bot(mllm_chat_bot_config)
  555. else:
  556. mllm_chat_bot = self.mllm_chat_bot
  557. for image_array in self.img_reader([input]):
  558. image_string = cv2.imencode(".jpg", image_array)[1].tobytes()
  559. image_base64 = base64.b64encode(image_string).decode("utf-8")
  560. result = {}
  561. for key in key_list:
  562. prompt = (
  563. str(key)
  564. + "\n请用图片中完整出现的内容回答,可以是单词、短语或句子,针对问题回答尽可能详细和完整,并保持格式、单位、符号和标点都与图片中的文字内容完全一致。"
  565. )
  566. mllm_chat_bot_result = mllm_chat_bot.generate_chat_results(
  567. prompt=prompt, image=image_base64
  568. )["content"]
  569. if mllm_chat_bot_result is None:
  570. return {"mllm_res": "大模型调用失败"}
  571. result[key] = mllm_chat_bot_result
  572. return {"mllm_res": result}
  573. def generate_and_merge_chat_results(
  574. self,
  575. chat_bot: BaseChat,
  576. prompt: str,
  577. key_list: list,
  578. final_results: dict,
  579. failed_results: list,
  580. ) -> None:
  581. """
  582. Generate and merge chat results into the final results dictionary.
  583. Args:
  584. prompt (str): The input prompt for the chat bot.
  585. key_list (list): A list of keys to track which results to merge.
  586. final_results (dict): The dictionary to store the final merged results.
  587. failed_results (list): A list of failed results to avoid merging.
  588. Returns:
  589. None
  590. """
  591. llm_result = chat_bot.generate_chat_results(prompt)
  592. llm_result_content = llm_result["content"]
  593. llm_result_reasoning_content = llm_result["reasoning_content"]
  594. if llm_result_reasoning_content is not None:
  595. if "reasoning_content" not in final_results:
  596. final_results["reasoning_content"] = [llm_result_reasoning_content]
  597. else:
  598. final_results["reasoning_content"].append(llm_result_reasoning_content)
  599. if llm_result_content is None:
  600. logging.error(
  601. "chat bot error: \n [prompt:]\n %s\n [result:] %s\n"
  602. % (prompt, chat_bot.ERROR_MASSAGE)
  603. )
  604. return
  605. llm_result_content = chat_bot.fix_llm_result_format(llm_result_content)
  606. for key, value in llm_result_content.items():
  607. if value not in failed_results and key in key_list:
  608. key_list.remove(key)
  609. final_results[key] = value
  610. return
  611. def get_related_normal_text(
  612. self,
  613. retriever_config: dict,
  614. use_vector_retrieval: bool,
  615. vector_info: dict,
  616. key_list: List[str],
  617. all_normal_text_list: list,
  618. min_characters: int,
  619. ) -> str:
  620. """
  621. Retrieve related normal text based on vector retrieval or all normal text list.
  622. Args:
  623. retriever_config (dict): Configuration for the retriever.
  624. use_vector_retrieval (bool): Whether to use vector retrieval.
  625. vector_info (dict): Dictionary containing vector information.
  626. key_list (list[str]): List of keys to generate question keys.
  627. all_normal_text_list (list): List of normal text.
  628. min_characters (int): The minimum number of characters required for text processing, defaults to 3500.
  629. Returns:
  630. str: Related normal text.
  631. """
  632. if use_vector_retrieval and vector_info is not None:
  633. if retriever_config is not None:
  634. from .. import create_retriever
  635. retriever = create_retriever(retriever_config)
  636. else:
  637. if self.retriever is None:
  638. logging.warning(
  639. "The retriever is not initialized,will initialize it now."
  640. )
  641. self.inintial_retriever_predictor(self.config)
  642. retriever = self.retriever
  643. question_key_list = [f"{key}" for key in key_list]
  644. vector = vector_info["vector"]
  645. if not vector_info["flag_too_short_text"]:
  646. assert (
  647. vector_info["model_name"] == retriever.model_name
  648. ), f"The vector model name ({vector_info['model_name']}) does not match the retriever model name ({retriever.model_name}). Please check your retriever config."
  649. if vector_info["flag_save_bytes_vector"]:
  650. vector = retriever.decode_vector_store_from_bytes(vector)
  651. related_text = retriever.similarity_retrieval(
  652. question_key_list, vector, topk=50, min_characters=min_characters
  653. )
  654. else:
  655. if len(vector) > 0:
  656. related_text = "".join(vector)
  657. else:
  658. related_text = ""
  659. else:
  660. all_items = []
  661. for i, normal_text_dict in enumerate(all_normal_text_list):
  662. for type, text in normal_text_dict.items():
  663. all_items += [f"{type}:{text}\n"]
  664. related_text = "".join(all_items)
  665. if len(related_text) > min_characters:
  666. logging.warning(
  667. "The input text content is too long, the large language model may truncate it."
  668. )
  669. return related_text
  670. def ensemble_ocr_llm_mllm(
  671. self,
  672. chat_bot: BaseChat,
  673. key_list: List[str],
  674. ocr_llm_predict_dict: dict,
  675. mllm_predict_dict: dict,
  676. ) -> dict:
  677. """
  678. Ensemble OCR_LLM and LMM predictions based on given key list.
  679. Args:
  680. key_list (list[str]): List of keys to retrieve predictions.
  681. ocr_llm_predict_dict (dict): Dictionary containing OCR LLM predictions.
  682. mllm_predict_dict (dict): Dictionary containing mLLM predictions.
  683. Returns:
  684. dict: A dictionary with final predictions.
  685. """
  686. final_predict_dict = {}
  687. for key in key_list:
  688. predict = ""
  689. ocr_llm_predict = ""
  690. mllm_predict = ""
  691. if key in ocr_llm_predict_dict:
  692. ocr_llm_predict = ocr_llm_predict_dict[key]
  693. if key in mllm_predict_dict:
  694. mllm_predict = mllm_predict_dict[key]
  695. if ocr_llm_predict != "" and mllm_predict != "":
  696. prompt = self.ensemble_pe.generate_prompt(
  697. key, ocr_llm_predict, mllm_predict
  698. )
  699. llm_result = chat_bot.generate_chat_results(prompt)
  700. llm_result_content = llm_result["content"]
  701. llm_result_reasoning_content = llm_result["reasoning_content"]
  702. if llm_result_reasoning_content is not None:
  703. if "reasoning_content" not in final_predict_dict:
  704. final_predict_dict["reasoning_content"] = [
  705. llm_result_reasoning_content
  706. ]
  707. else:
  708. final_predict_dict["reasoning_content"].append(
  709. llm_result_reasoning_content
  710. )
  711. if llm_result_content is not None:
  712. llm_result_content = chat_bot.fix_llm_result_format(
  713. llm_result_content
  714. )
  715. if key in llm_result_content:
  716. tmp = llm_result_content[key]
  717. if "B" in tmp:
  718. predict = mllm_predict
  719. else:
  720. predict = ocr_llm_predict
  721. else:
  722. predict = ocr_llm_predict
  723. elif key in ocr_llm_predict_dict:
  724. predict = ocr_llm_predict_dict[key]
  725. elif key in mllm_predict_dict:
  726. predict = mllm_predict_dict[key]
  727. if predict != "":
  728. final_predict_dict[key] = predict
  729. return final_predict_dict
  730. def chat(
  731. self,
  732. key_list: Union[str, List[str]],
  733. visual_info: dict,
  734. use_vector_retrieval: bool = True,
  735. vector_info: dict = None,
  736. min_characters: int = 3500,
  737. text_task_description: str = None,
  738. text_output_format: str = None,
  739. text_rules_str: str = None,
  740. text_few_shot_demo_text_content: str = None,
  741. text_few_shot_demo_key_value_list: str = None,
  742. table_task_description: str = None,
  743. table_output_format: str = None,
  744. table_rules_str: str = None,
  745. table_few_shot_demo_text_content: str = None,
  746. table_few_shot_demo_key_value_list: str = None,
  747. mllm_predict_info: dict = None,
  748. mllm_integration_strategy: str = "integration",
  749. chat_bot_config: dict = None,
  750. retriever_config: dict = None,
  751. ) -> dict:
  752. """
  753. Generates chat results based on the provided key list and visual information.
  754. Args:
  755. key_list (Union[str, list[str]]): A single key or a list of keys to extract information.
  756. visual_info (dict): The visual information result.
  757. use_vector_retrieval (bool): Whether to use vector retrieval.
  758. vector_info (dict): The vector information for retrieval.
  759. min_characters (int): The minimum number of characters required for text processing, defaults to 3500.
  760. text_task_description (str): The description of the text task.
  761. text_output_format (str): The output format for text results.
  762. text_rules_str (str): The rules for generating text results.
  763. text_few_shot_demo_text_content (str): The text content for few-shot demos.
  764. text_few_shot_demo_key_value_list (str): The key-value list for few-shot demos.
  765. table_task_description (str): The description of the table task.
  766. table_output_format (str): The output format for table results.
  767. table_rules_str (str): The rules for generating table results.
  768. table_few_shot_demo_text_content (str): The text content for table few-shot demos.
  769. table_few_shot_demo_key_value_list (str): The key-value list for table few-shot demos.
  770. mllm_predict_dict (dict): The dictionary of mLLM predicts.
  771. mllm_integration_strategy (str): The integration strategy of mLLM and LLM, defaults to "integration", options are "integration", "llm_only" and "mllm_only".
  772. chat_bot_config (dict): The parameters for LLM chatbot, including api_type, api_key... refer to config file for more details.
  773. retriever_config (dict): The parameters for LLM retriever, including api_type, api_key... refer to config file for more details.
  774. Returns:
  775. dict: A dictionary containing the chat results.
  776. """
  777. key_list = self.format_key(key_list)
  778. key_list_ori = key_list.copy()
  779. if len(key_list) == 0:
  780. return {"chat_res": "Error:输入的key_list无效!"}
  781. if not isinstance(visual_info, list):
  782. visual_info_list = [visual_info]
  783. else:
  784. visual_info_list = visual_info
  785. if self.chat_bot is None:
  786. logging.warning(
  787. "The LLM chat bot is not initialized,will initialize it now."
  788. )
  789. self.inintial_chat_predictor(self.config)
  790. if chat_bot_config is not None:
  791. from .. import create_chat_bot
  792. chat_bot = create_chat_bot(chat_bot_config)
  793. else:
  794. chat_bot = self.chat_bot
  795. all_visual_info = self.merge_visual_info_list(visual_info_list)
  796. (
  797. all_normal_text_list,
  798. all_table_text_list,
  799. all_table_html_list,
  800. all_table_nei_text_list,
  801. ) = all_visual_info
  802. final_results = {}
  803. failed_results = ["大模型调用失败", "未知", "未找到关键信息", "None", ""]
  804. if len(key_list) > 0:
  805. related_text = self.get_related_normal_text(
  806. retriever_config,
  807. use_vector_retrieval,
  808. vector_info,
  809. key_list,
  810. all_normal_text_list,
  811. min_characters,
  812. )
  813. if len(related_text) > 0:
  814. prompt = self.text_pe.generate_prompt(
  815. related_text,
  816. key_list,
  817. task_description=text_task_description,
  818. output_format=text_output_format,
  819. rules_str=text_rules_str,
  820. few_shot_demo_text_content=text_few_shot_demo_text_content,
  821. few_shot_demo_key_value_list=text_few_shot_demo_key_value_list,
  822. )
  823. self.generate_and_merge_chat_results(
  824. chat_bot, prompt, key_list, final_results, failed_results
  825. )
  826. if len(key_list) > 0:
  827. for table_html, table_text, table_nei_text in zip(
  828. all_table_html_list, all_table_text_list, all_table_nei_text_list
  829. ):
  830. if len(table_html) <= min_characters - self.table_structure_len_max:
  831. for table_info in [table_html]:
  832. if len(key_list) > 0:
  833. if len(table_nei_text) > 0:
  834. table_info = (
  835. table_info + "\n 表格周围文字:" + table_nei_text
  836. )
  837. prompt = self.table_pe.generate_prompt(
  838. table_info,
  839. key_list,
  840. task_description=table_task_description,
  841. output_format=table_output_format,
  842. rules_str=table_rules_str,
  843. few_shot_demo_text_content=table_few_shot_demo_text_content,
  844. few_shot_demo_key_value_list=table_few_shot_demo_key_value_list,
  845. )
  846. self.generate_and_merge_chat_results(
  847. chat_bot,
  848. prompt,
  849. key_list,
  850. final_results,
  851. failed_results,
  852. )
  853. if (
  854. self.use_mllm_predict
  855. and mllm_integration_strategy != "llm_only"
  856. and mllm_predict_info is not None
  857. ):
  858. if mllm_integration_strategy == "integration":
  859. final_predict_dict = self.ensemble_ocr_llm_mllm(
  860. chat_bot, key_list_ori, final_results, mllm_predict_info
  861. )
  862. elif mllm_integration_strategy == "mllm_only":
  863. final_predict_dict = mllm_predict_info
  864. else:
  865. return {
  866. "chat_res": f"Error:Unsupported mllm_integration_strategy {mllm_integration_strategy}, only support 'integration', 'llm_only' and 'mllm_only'!"
  867. }
  868. else:
  869. final_predict_dict = final_results
  870. return {"chat_res": final_predict_dict}
  871. def predict(self, *args, **kwargs) -> None:
  872. logging.error(
  873. "PP-ChatOCRv4-doc Pipeline do not support to call `predict()` directly! Please invoke `visual_predict`, `build_vector`, `chat` sequentially to obtain the result."
  874. )
  875. return