pp_structurev3.py 5.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143
  1. # Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. from typing import Any, Dict, List
  15. from .....utils.deps import function_requires_deps, is_dep_available
  16. from ...infra import utils as serving_utils
  17. from ...infra.config import AppConfig
  18. from ...infra.models import AIStudioResultResponse
  19. from ...schemas.pp_structurev3 import INFER_ENDPOINT, InferRequest, InferResult
  20. from .._app import create_app, primary_operation
  21. from ._common import common
  22. from ._common import ocr as ocr_common
  23. if is_dep_available("fastapi"):
  24. from fastapi import FastAPI
  25. @function_requires_deps("fastapi")
  26. def create_pipeline_app(pipeline: Any, app_config: AppConfig) -> "FastAPI":
  27. app, ctx = create_app(
  28. pipeline=pipeline, app_config=app_config, app_aiohttp_session=True
  29. )
  30. ocr_common.update_app_context(ctx)
  31. @primary_operation(
  32. app,
  33. INFER_ENDPOINT,
  34. "infer",
  35. )
  36. async def _infer(
  37. request: InferRequest,
  38. ) -> AIStudioResultResponse[InferResult]:
  39. pipeline = ctx.pipeline
  40. log_id = serving_utils.generate_log_id()
  41. images, data_info = await ocr_common.get_images(request, ctx)
  42. result = await pipeline.infer(
  43. images,
  44. use_doc_orientation_classify=request.useDocOrientationClassify,
  45. use_doc_unwarping=request.useDocUnwarping,
  46. use_textline_orientation=request.useTextlineOrientation,
  47. use_seal_recognition=request.useSealRecognition,
  48. use_table_recognition=request.useTableRecognition,
  49. use_formula_recognition=request.useFormulaRecognition,
  50. use_chart_recognition=request.useChartRecognition,
  51. use_region_detection=request.useRegionDetection,
  52. layout_threshold=request.layoutThreshold,
  53. layout_nms=request.layoutNms,
  54. layout_unclip_ratio=request.layoutUnclipRatio,
  55. layout_merge_bboxes_mode=request.layoutMergeBboxesMode,
  56. text_det_limit_side_len=request.textDetLimitSideLen,
  57. text_det_limit_type=request.textDetLimitType,
  58. text_det_thresh=request.textDetThresh,
  59. text_det_box_thresh=request.textDetBoxThresh,
  60. text_det_unclip_ratio=request.textDetUnclipRatio,
  61. text_rec_score_thresh=request.textRecScoreThresh,
  62. seal_det_limit_side_len=request.sealDetLimitSideLen,
  63. seal_det_limit_type=request.sealDetLimitType,
  64. seal_det_thresh=request.sealDetThresh,
  65. seal_det_box_thresh=request.sealDetBoxThresh,
  66. seal_det_unclip_ratio=request.sealDetUnclipRatio,
  67. seal_rec_score_thresh=request.sealRecScoreThresh,
  68. use_wired_table_cells_trans_to_html=request.useWiredTableCellsTransToHtml,
  69. use_wireless_table_cells_trans_to_html=request.useWirelessTableCellsTransToHtml,
  70. use_table_orientation_classify=request.useTableOrientationClassify,
  71. use_ocr_results_with_table_cells=request.useOcrResultsWithTableCells,
  72. use_e2e_wired_table_rec_model=request.useE2eWiredTableRecModel,
  73. use_e2e_wireless_table_rec_model=request.useE2eWirelessTableRecModel,
  74. )
  75. layout_parsing_results: List[Dict[str, Any]] = []
  76. for i, (img, item) in enumerate(zip(images, result)):
  77. pruned_res = common.prune_result(item.json["res"])
  78. md_data = item.markdown
  79. md_text = md_data["markdown_texts"]
  80. md_imgs = await serving_utils.call_async(
  81. common.postprocess_images,
  82. md_data["markdown_images"],
  83. log_id,
  84. filename_template=f"markdown_{i}/{{key}}",
  85. file_storage=ctx.extra["file_storage"],
  86. return_urls=ctx.extra["return_img_urls"],
  87. max_img_size=ctx.extra["max_output_img_size"],
  88. )
  89. md_flags = md_data["page_continuation_flags"]
  90. if ctx.config.visualize:
  91. imgs = {
  92. "input_img": img,
  93. **item.img,
  94. }
  95. imgs = await serving_utils.call_async(
  96. common.postprocess_images,
  97. imgs,
  98. log_id,
  99. filename_template=f"{{key}}_{i}.jpg",
  100. file_storage=ctx.extra["file_storage"],
  101. return_urls=ctx.extra["return_img_urls"],
  102. max_img_size=ctx.extra["max_output_img_size"],
  103. )
  104. else:
  105. imgs = {}
  106. layout_parsing_results.append(
  107. dict(
  108. prunedResult=pruned_res,
  109. markdown=dict(
  110. text=md_text,
  111. images=md_imgs,
  112. isStart=md_flags[0],
  113. isEnd=md_flags[1],
  114. ),
  115. outputImages=(
  116. {k: v for k, v in imgs.items() if k != "input_img"}
  117. if imgs
  118. else None
  119. ),
  120. inputImage=imgs.get("input_img"),
  121. )
  122. )
  123. return AIStudioResultResponse[InferResult](
  124. logId=log_id,
  125. result=InferResult(
  126. layoutParsingResults=layout_parsing_results,
  127. dataInfo=data_info,
  128. ),
  129. )
  130. return app