seal_recognition.py 4.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106
  1. # copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. from typing import Any, Dict, List
  15. from fastapi import FastAPI
  16. from ...infra import utils as serving_utils
  17. from ...infra.config import AppConfig
  18. from ...infra.models import ResultResponse
  19. from ...schemas.seal_recognition import INFER_ENDPOINT, InferRequest, InferResult
  20. from .._app import create_app, primary_operation
  21. from ._common import common
  22. from ._common import ocr as ocr_common
  23. def create_pipeline_app(pipeline: Any, app_config: AppConfig) -> FastAPI:
  24. app, ctx = create_app(
  25. pipeline=pipeline, app_config=app_config, app_aiohttp_session=True
  26. )
  27. ocr_common.update_app_context(ctx)
  28. @primary_operation(
  29. app,
  30. INFER_ENDPOINT,
  31. "infer",
  32. )
  33. async def _infer(request: InferRequest) -> ResultResponse[InferResult]:
  34. pipeline = ctx.pipeline
  35. log_id = serving_utils.generate_log_id()
  36. images, data_info = await ocr_common.get_images(request, ctx)
  37. result = await pipeline.infer(
  38. images,
  39. use_doc_orientation_classify=request.useDocOrientationClassify,
  40. use_doc_unwarping=request.useDocUnwarping,
  41. use_layout_detection=request.useLayoutDetection,
  42. layout_threshold=request.layoutThreshold,
  43. layout_nms=request.layoutNms,
  44. layout_unclip_ratio=request.layoutUnclipRatio,
  45. layout_merge_bboxes_mode=request.layoutMergeBboxesMode,
  46. seal_det_limit_side_len=request.sealDetLimitSideLen,
  47. seal_det_limit_type=request.sealDetLimitType,
  48. seal_det_thresh=request.sealDetThresh,
  49. seal_det_box_thresh=request.sealDetBoxThresh,
  50. seal_det_unclip_ratio=request.sealDetUnclipRatio,
  51. seal_rec_score_thresh=request.sealRecScoreThresh,
  52. )
  53. seal_rec_results: List[Dict[str, Any]] = []
  54. for i, (img, item) in enumerate(zip(images, result)):
  55. pruned_res = common.prune_result(item.json["res"])
  56. if ctx.config.visualize:
  57. output_imgs = item.img
  58. imgs = {
  59. "input_img": img,
  60. "seal_rec_img": output_imgs["seal_res_region1"],
  61. }
  62. if "layout_det_res" in output_imgs:
  63. imgs["layout_det_img"] = output_imgs["layout_det_res"]
  64. if "preprocessed_img" in output_imgs:
  65. imgs["doc_preprocessing_img"] = output_imgs["preprocessed_img"]
  66. imgs = await serving_utils.call_async(
  67. common.postprocess_images,
  68. imgs,
  69. log_id,
  70. filename_template=f"{{key}}_{i}.jpg",
  71. file_storage=ctx.extra["file_storage"],
  72. return_urls=ctx.extra["return_img_urls"],
  73. max_img_size=ctx.extra["max_output_img_size"],
  74. )
  75. else:
  76. imgs = {}
  77. seal_rec_results.append(
  78. dict(
  79. prunedResult=pruned_res,
  80. sealRecImage=imgs.get("seal_rec_img"),
  81. layoutDetImage=imgs.get("layout_det_img"),
  82. docPreprocessingImage=imgs.get("doc_preprocessing_img"),
  83. inputImage=imgs.get("input_img"),
  84. )
  85. )
  86. return ResultResponse[InferResult](
  87. logId=log_id,
  88. result=InferResult(
  89. sealRecResults=seal_rec_results,
  90. dataInfo=data_info,
  91. ),
  92. )
  93. return app