instance_segmentation.py 3.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110
  1. # copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. from typing import List
  15. import numpy as np
  16. import pycocotools.mask as mask_util
  17. from fastapi import FastAPI, HTTPException
  18. from pydantic import BaseModel, Field
  19. from typing_extensions import Annotated, TypeAlias
  20. from .....utils import logging
  21. from ...single_model_pipeline import InstanceSegmentation
  22. from .. import utils as serving_utils
  23. from ..app import AppConfig, create_app
  24. from ..models import Response, ResultResponse
  25. class InferRequest(BaseModel):
  26. image: str
  27. BoundingBox: TypeAlias = Annotated[List[float], Field(min_length=4, max_length=4)]
  28. class Mask(BaseModel):
  29. rleResult: str
  30. size: Annotated[List[int], Field(min_length=2, max_length=2)]
  31. class Instance(BaseModel):
  32. bbox: BoundingBox
  33. categoryId: int
  34. score: float
  35. mask: Mask
  36. class InferResult(BaseModel):
  37. instances: List[Instance]
  38. image: str
  39. def _rle(mask: np.ndarray) -> str:
  40. rle_res = mask_util.encode(np.asarray(mask[..., None], order="F", dtype="uint8"))[0]
  41. return rle_res["counts"].decode("utf-8")
  42. def create_pipeline_app(
  43. pipeline: InstanceSegmentation, app_config: AppConfig
  44. ) -> FastAPI:
  45. app, ctx = create_app(
  46. pipeline=pipeline,
  47. app_config=app_config,
  48. app_aiohttp_session=True,
  49. )
  50. @app.post(
  51. "/instance-segmentation",
  52. operation_id="infer",
  53. responses={422: {"model": Response}},
  54. )
  55. async def _infer(request: InferRequest) -> ResultResponse[InferResult]:
  56. pipeline = ctx.pipeline
  57. aiohttp_session = ctx.aiohttp_session
  58. try:
  59. file_bytes = await serving_utils.get_raw_bytes(
  60. request.image, aiohttp_session
  61. )
  62. image = serving_utils.image_bytes_to_array(file_bytes)
  63. result = (await pipeline.infer(image))[0]
  64. instances: List[Instance] = []
  65. for obj, mask in zip(result["boxes"], result["masks"]):
  66. rle_res = _rle(mask)
  67. mask = Mask(rleResult=rle_res, size=mask.shape)
  68. instances.append(
  69. Instance(
  70. bbox=obj["coordinate"],
  71. categoryId=obj["cls_id"],
  72. score=obj["score"],
  73. mask=mask,
  74. )
  75. )
  76. output_image_base64 = serving_utils.image_to_base64(result.img)
  77. return ResultResponse(
  78. logId=serving_utils.generate_log_id(),
  79. errorCode=0,
  80. errorMsg="Success",
  81. result=InferResult(instances=instances, image=output_image_base64),
  82. )
  83. except Exception as e:
  84. logging.exception(e)
  85. raise HTTPException(status_code=500, detail="Internal server error")
  86. return app