option.h 4.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122
  1. // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. #pragma once
  15. #include "ultra_infer/core/fd_type.h"
  16. #include <iostream>
  17. #include <map>
  18. #include <string>
  19. #include <vector>
  20. namespace ultra_infer {
  21. /*! @brief Option object to configure TensorRT backend
  22. */
  23. struct TrtBackendOption {
  24. /// `max_batch_size`, it's deprecated in TensorRT 8.x
  25. size_t max_batch_size = 32;
  26. /// `max_workspace_size` for TensorRT
  27. size_t max_workspace_size = 1 << 30;
  28. /// Enable log while converting onnx model to tensorrt
  29. bool enable_log_info = false;
  30. /// Enable half precision inference, on some device not support half
  31. /// precision, it will fallback to float32 mode
  32. bool enable_fp16 = false;
  33. /** \brief Set shape range of input tensor for the model that contain dynamic
  34. * input shape while using TensorRT backend
  35. *
  36. * \param[in] tensor_name The name of input for the model which is dynamic
  37. * shape \param[in] min The minimal shape for the input tensor \param[in] opt
  38. * The optimized shape for the input tensor, just set the most common shape,
  39. * if set as default value, it will keep same with min_shape \param[in] max
  40. * The maximum shape for the input tensor, if set as default value, it will
  41. * keep same with min_shape
  42. */
  43. void SetShape(const std::string &tensor_name, const std::vector<int32_t> &min,
  44. const std::vector<int32_t> &opt = std::vector<int32_t>(),
  45. const std::vector<int32_t> &max = std::vector<int32_t>()) {
  46. min_shape[tensor_name].clear();
  47. max_shape[tensor_name].clear();
  48. opt_shape[tensor_name].clear();
  49. min_shape[tensor_name].assign(min.begin(), min.end());
  50. if (opt.size() == 0) {
  51. opt_shape[tensor_name].assign(min.begin(), min.end());
  52. } else {
  53. opt_shape[tensor_name].assign(opt.begin(), opt.end());
  54. }
  55. if (max.size() == 0) {
  56. max_shape[tensor_name].assign(min.begin(), min.end());
  57. } else {
  58. max_shape[tensor_name].assign(max.begin(), max.end());
  59. }
  60. }
  61. /** \brief Set Input data for input tensor for the model while using TensorRT
  62. * backend
  63. *
  64. * \param[in] tensor_name The name of input for the model which is dynamic
  65. * shape \param[in] min_data The input data for minimal shape for the input
  66. * tensor \param[in] opt_data The input data for optimized shape for the input
  67. * tensor \param[in] max_data The input data for maximum shape for the input
  68. * tensor, if set as default value, it will keep same with min_data
  69. */
  70. void SetInputData(const std::string &tensor_name,
  71. const std::vector<float> min_data,
  72. const std::vector<float> opt_data = std::vector<float>(),
  73. const std::vector<float> max_data = std::vector<float>()) {
  74. max_input_data[tensor_name].clear();
  75. min_input_data[tensor_name].clear();
  76. opt_input_data[tensor_name].clear();
  77. min_input_data[tensor_name].assign(min_data.begin(), min_data.end());
  78. if (opt_data.empty()) {
  79. opt_input_data[tensor_name].assign(min_data.begin(), min_data.end());
  80. } else {
  81. opt_input_data[tensor_name].assign(opt_data.begin(), opt_data.end());
  82. }
  83. if (max_data.empty()) {
  84. max_input_data[tensor_name].assign(min_data.begin(), min_data.end());
  85. } else {
  86. max_input_data[tensor_name].assign(max_data.begin(), max_data.end());
  87. }
  88. }
  89. /// Set cache file path while use TensorRT backend.
  90. /// Loadding a Paddle/ONNX model and initialize TensorRT will
  91. /// take a long time,
  92. /// by this interface it will save the tensorrt engine to `cache_file_path`,
  93. /// and load it directly while execute the code again
  94. std::string serialize_file = "";
  95. std::map<std::string, std::vector<float>> max_input_data;
  96. std::map<std::string, std::vector<float>> min_input_data;
  97. std::map<std::string, std::vector<float>> opt_input_data;
  98. // The below parameters may be removed in next version, please do not
  99. // visit or use them directly
  100. std::map<std::string, std::vector<int32_t>> max_shape;
  101. std::map<std::string, std::vector<int32_t>> min_shape;
  102. std::map<std::string, std::vector<int32_t>> opt_shape;
  103. bool enable_pinned_memory = false;
  104. void *external_stream_ = nullptr;
  105. int gpu_id = 0;
  106. std::string model_file = ""; // Path of model file
  107. std::string params_file = ""; // Path of parameters file, can be empty
  108. // format of input model
  109. ModelFormat model_format = ModelFormat::AUTOREC;
  110. };
  111. } // namespace ultra_infer