text_model.cc 2.6 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879
  1. // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. #include "ultra_infer/text/text_model.h"
  15. #include "ultra_infer/text/common/option.h"
  16. #include "ultra_infer/text/common/result.h"
  17. #include "ultra_infer/text/postprocessor/postprocessor.h"
  18. #include "ultra_infer/text/preprocessor/preprocessor.h"
  19. namespace ultra_infer {
  20. namespace text {
  21. bool TextModel::Predict(const std::string &raw_text, Result *result,
  22. const PredictionOption &option) {
  23. // Preprocess
  24. std::vector<FDTensor> input_tensor;
  25. std::vector<FDTensor> output_tensor;
  26. if (!preprocessor_->Encode(raw_text, &input_tensor)) {
  27. FDERROR << "Failed to preprocess input data while using model:"
  28. << ModelName() << "." << std::endl;
  29. return false;
  30. }
  31. // Inference Runtime
  32. if (!Infer(input_tensor, &output_tensor)) {
  33. FDERROR << "Failed to inference while using model:" << ModelName() << "."
  34. << std::endl;
  35. return false;
  36. }
  37. // Postprocess
  38. if (postprocessor_->Decode(output_tensor, result)) {
  39. FDERROR << "Failed to postprocess while using model:" << ModelName() << "."
  40. << std::endl;
  41. return false;
  42. }
  43. return true;
  44. }
  45. bool TextModel::PredictBatch(const std::vector<std::string> &raw_text_array,
  46. Result *results, const PredictionOption &option) {
  47. // Preprocess
  48. std::vector<FDTensor> input_tensor;
  49. std::vector<FDTensor> output_tensor;
  50. if (!preprocessor_->EncodeBatch(raw_text_array, &input_tensor)) {
  51. FDERROR << "Failed to preprocess input data while using model:"
  52. << ModelName() << "." << std::endl;
  53. return false;
  54. }
  55. // Inference Runtime
  56. if (!Infer(input_tensor, &output_tensor)) {
  57. FDERROR << "Failed to inference while using model:" << ModelName() << "."
  58. << std::endl;
  59. return false;
  60. }
  61. // Postprocess
  62. if (postprocessor_->DecodeBatch(output_tensor, results)) {
  63. FDERROR << "Failed to postprocess while using model:" << ModelName() << "."
  64. << std::endl;
  65. return false;
  66. }
  67. return true;
  68. }
  69. } // namespace text
  70. } // namespace ultra_infer