semantic_segmentation.py 2.6 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182
  1. # copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. from typing import List
  15. from fastapi import FastAPI, HTTPException
  16. from pydantic import BaseModel, Field
  17. from typing_extensions import Annotated
  18. from .....utils import logging
  19. from ...single_model_pipeline import SemanticSegmentation
  20. from .. import utils as serving_utils
  21. from ..app import AppConfig, create_app
  22. from ..models import Response, ResultResponse
  23. class InferRequest(BaseModel):
  24. image: str
  25. class InferResult(BaseModel):
  26. labelMap: List[int]
  27. size: Annotated[List[int], Field(min_length=2, max_length=2)]
  28. image: str
  29. def create_pipeline_app(
  30. pipeline: SemanticSegmentation, app_config: AppConfig
  31. ) -> FastAPI:
  32. app, ctx = create_app(
  33. pipeline=pipeline, app_config=app_config, app_aiohttp_session=True
  34. )
  35. @app.post(
  36. "/semantic-segmentation",
  37. operation_id="infer",
  38. responses={422: {"model": Response}},
  39. )
  40. async def _infer(request: InferRequest) -> ResultResponse[InferResult]:
  41. pipeline = ctx.pipeline
  42. aiohttp_session = ctx.aiohttp_session
  43. try:
  44. file_bytes = await serving_utils.get_raw_bytes(
  45. request.image, aiohttp_session
  46. )
  47. image = serving_utils.image_bytes_to_array(file_bytes)
  48. result = (await pipeline.infer(image))[0]
  49. pred = result["pred"][0].tolist()
  50. size = [len(pred), len(pred[0])]
  51. label_map = [item for sublist in pred for item in sublist]
  52. output_image_base64 = serving_utils.base64_encode(
  53. serving_utils.image_to_bytes(result.img.convert("RGB"))
  54. )
  55. return ResultResponse(
  56. logId=serving_utils.generate_log_id(),
  57. errorCode=0,
  58. errorMsg="Success",
  59. result=InferResult(
  60. labelMap=label_map, size=size, image=output_image_base64
  61. ),
  62. )
  63. except Exception as e:
  64. logging.exception(e)
  65. raise HTTPException(status_code=500, detail="Internal server error")
  66. return app