runtime_option.cc 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524
  1. // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. #include "ultra_infer/runtime/runtime.h"
  15. #include "ultra_infer/utils/unique_ptr.h"
  16. #include "ultra_infer/utils/utils.h"
  17. namespace ultra_infer {
  18. void RuntimeOption::SetModelPath(const std::string &model_path,
  19. const std::string &params_path,
  20. const ModelFormat &format) {
  21. model_file = model_path;
  22. params_file = params_path;
  23. model_format = format;
  24. model_from_memory_ = false;
  25. }
  26. void RuntimeOption::SetModelBuffer(const std::string &model_buffer,
  27. const std::string &params_buffer,
  28. const ModelFormat &format) {
  29. model_file = model_buffer;
  30. params_file = params_buffer;
  31. model_format = format;
  32. model_from_memory_ = true;
  33. }
  34. void RuntimeOption::UseGpu(int gpu_id) {
  35. #if defined(WITH_GPU) || defined(WITH_OPENCL)
  36. device = Device::GPU;
  37. device_id = gpu_id;
  38. #if defined(WITH_OPENCL) && defined(ENABLE_LITE_BACKEND)
  39. paddle_lite_option.device = device;
  40. #endif
  41. #else
  42. FDWARNING << "The UltraInfer didn't compile with GPU, will force to use CPU."
  43. << std::endl;
  44. device = Device::CPU;
  45. #endif
  46. }
  47. void RuntimeOption::UseCpu() { device = Device::CPU; }
  48. void RuntimeOption::UseRKNPU2(ultra_infer::rknpu2::CpuName rknpu2_name,
  49. ultra_infer::rknpu2::CoreMask rknpu2_core) {
  50. rknpu2_option.cpu_name = rknpu2_name;
  51. rknpu2_option.core_mask = rknpu2_core;
  52. device = Device::RKNPU;
  53. }
  54. void RuntimeOption::UseHorizon() { device = Device::SUNRISENPU; }
  55. void RuntimeOption::UseTimVX() {
  56. device = Device::TIMVX;
  57. paddle_lite_option.device = device;
  58. }
  59. void RuntimeOption::UseKunlunXin(int kunlunxin_id, int l3_workspace_size,
  60. bool locked, bool autotune,
  61. const std::string &autotune_file,
  62. const std::string &precision,
  63. bool adaptive_seqlen, bool enable_multi_stream,
  64. int64_t gm_default_size) {
  65. #ifdef WITH_KUNLUNXIN
  66. device = Device::KUNLUNXIN;
  67. #ifdef ENABLE_LITE_BACKEND
  68. paddle_lite_option.device = device;
  69. paddle_lite_option.device_id = kunlunxin_id;
  70. paddle_lite_option.kunlunxin_l3_workspace_size = l3_workspace_size;
  71. paddle_lite_option.kunlunxin_locked = locked;
  72. paddle_lite_option.kunlunxin_autotune = autotune;
  73. paddle_lite_option.kunlunxin_autotune_file = autotune_file;
  74. paddle_lite_option.kunlunxin_precision = precision;
  75. paddle_lite_option.kunlunxin_adaptive_seqlen = adaptive_seqlen;
  76. paddle_lite_option.kunlunxin_enable_multi_stream = enable_multi_stream;
  77. paddle_lite_option.kunlunxin_gm_default_size = gm_default_size;
  78. #endif
  79. #ifdef ENABLE_PADDLE_BACKEND
  80. paddle_infer_option.device = device;
  81. paddle_infer_option.xpu_option.kunlunxin_device_id = kunlunxin_id;
  82. paddle_infer_option.xpu_option.kunlunxin_l3_workspace_size =
  83. l3_workspace_size;
  84. paddle_infer_option.xpu_option.kunlunxin_locked = locked;
  85. paddle_infer_option.xpu_option.kunlunxin_autotune = autotune;
  86. paddle_infer_option.xpu_option.kunlunxin_autotune_file = autotune_file;
  87. paddle_infer_option.xpu_option.kunlunxin_precision = precision;
  88. paddle_infer_option.xpu_option.kunlunxin_adaptive_seqlen = adaptive_seqlen;
  89. paddle_infer_option.xpu_option.kunlunxin_enable_multi_stream =
  90. enable_multi_stream;
  91. // paddle_infer_option.xpu_option.kunlunxin_gm_default_size = gm_default_size;
  92. // use paddle_infer_option.xpu_option.SetXpuConfig() for more options.
  93. #endif
  94. #else
  95. FDWARNING
  96. << "The UltraInfer didn't compile with KUNLUNXIN, will force to use CPU."
  97. << std::endl;
  98. device = Device::CPU;
  99. #endif
  100. }
  101. void RuntimeOption::UseIpu(int device_num, int micro_batch_size,
  102. bool enable_pipelining, int batches_per_step) {
  103. #ifdef WITH_IPU
  104. device = Device::IPU;
  105. paddle_infer_option.ipu_option.ipu_device_num = device_num;
  106. paddle_infer_option.ipu_option.ipu_micro_batch_size = micro_batch_size;
  107. paddle_infer_option.ipu_option.ipu_enable_pipelining = enable_pipelining;
  108. paddle_infer_option.ipu_option.ipu_batches_per_step = batches_per_step;
  109. // use paddle_infer_option.ipu_option.SetIpuConfig() for more options.
  110. #else
  111. FDWARNING << "The UltraInfer didn't compile with IPU, will force to use CPU."
  112. << std::endl;
  113. device = Device::CPU;
  114. #endif
  115. }
  116. void RuntimeOption::UseAscend() {
  117. device = Device::ASCEND;
  118. paddle_lite_option.device = device;
  119. }
  120. void RuntimeOption::UseDirectML() { device = Device::DIRECTML; }
  121. void RuntimeOption::UseSophgo() {
  122. device = Device::SOPHGOTPUD;
  123. UseSophgoBackend();
  124. }
  125. void RuntimeOption::SetExternalStream(void *external_stream) {
  126. external_stream_ = external_stream;
  127. }
  128. void RuntimeOption::SetCpuThreadNum(int thread_num) {
  129. FDASSERT(thread_num > 0, "The thread_num must be greater than 0.");
  130. cpu_thread_num = thread_num;
  131. paddle_lite_option.cpu_threads = thread_num;
  132. ort_option.intra_op_num_threads = thread_num;
  133. openvino_option.cpu_thread_num = thread_num;
  134. paddle_infer_option.cpu_thread_num = thread_num;
  135. }
  136. void RuntimeOption::SetOrtGraphOptLevel(int level) {
  137. FDWARNING << "`RuntimeOption::SetOrtGraphOptLevel` will be removed in "
  138. "v1.2.0, please modify its member variables directly, e.g "
  139. "`runtime_option.ort_option.graph_optimization_level = 99`."
  140. << std::endl;
  141. std::vector<int> supported_level{-1, 0, 1, 2};
  142. auto valid_level = std::find(supported_level.begin(), supported_level.end(),
  143. level) != supported_level.end();
  144. FDASSERT(valid_level, "The level must be -1, 0, 1, 2.");
  145. ort_option.graph_optimization_level = level;
  146. }
  147. // use paddle inference backend
  148. void RuntimeOption::UsePaddleBackend() {
  149. #ifdef ENABLE_PADDLE_BACKEND
  150. backend = Backend::PDINFER;
  151. #else
  152. FDASSERT(false, "The UltraInfer didn't compile with Paddle Inference.");
  153. #endif
  154. }
  155. // use onnxruntime backend
  156. void RuntimeOption::UseOrtBackend() {
  157. #ifdef ENABLE_ORT_BACKEND
  158. backend = Backend::ORT;
  159. #else
  160. FDASSERT(false, "The UltraInfer didn't compile with OrtBackend.");
  161. #endif
  162. }
  163. // use sophgoruntime backend
  164. void RuntimeOption::UseSophgoBackend() {
  165. #ifdef ENABLE_SOPHGO_BACKEND
  166. backend = Backend::SOPHGOTPU;
  167. #else
  168. FDASSERT(false, "The UltraInfer didn't compile with SophgoBackend.");
  169. #endif
  170. }
  171. // use poros backend
  172. void RuntimeOption::UsePorosBackend() {
  173. #ifdef ENABLE_POROS_BACKEND
  174. backend = Backend::POROS;
  175. #else
  176. FDASSERT(false, "The UltraInfer didn't compile with PorosBackend.");
  177. #endif
  178. }
  179. void RuntimeOption::UseTrtBackend() {
  180. #ifdef ENABLE_TRT_BACKEND
  181. backend = Backend::TRT;
  182. #else
  183. FDASSERT(false, "The UltraInfer didn't compile with TrtBackend.");
  184. #endif
  185. }
  186. void RuntimeOption::UseOpenVINOBackend() {
  187. #ifdef ENABLE_OPENVINO_BACKEND
  188. backend = Backend::OPENVINO;
  189. #else
  190. FDASSERT(false, "The UltraInfer didn't compile with OpenVINO.");
  191. #endif
  192. }
  193. void RuntimeOption::UseLiteBackend() {
  194. #ifdef ENABLE_LITE_BACKEND
  195. backend = Backend::LITE;
  196. #else
  197. FDASSERT(false, "The UltraInfer didn't compile with Paddle Lite.");
  198. #endif
  199. }
  200. void RuntimeOption::UseHorizonNPUBackend() {
  201. #ifdef ENABLE_HORIZON_BACKEND
  202. backend = Backend::HORIZONNPU;
  203. #else
  204. FDASSERT(false, "The UltraInfer didn't compile with horizon");
  205. #endif
  206. }
  207. void RuntimeOption::SetPaddleMKLDNN(bool pd_mkldnn) {
  208. FDWARNING << "`RuntimeOption::SetPaddleMKLDNN` will be removed in v1.2.0, "
  209. "please modify its member variable directly, e.g "
  210. "`option.paddle_infer_option.enable_mkldnn = true`"
  211. << std::endl;
  212. paddle_infer_option.enable_mkldnn = pd_mkldnn;
  213. }
  214. void RuntimeOption::DeletePaddleBackendPass(const std::string &pass_name) {
  215. FDWARNING
  216. << "`RuntimeOption::DeletePaddleBackendPass` will be removed in v1.2.0, "
  217. "please use `option.paddle_infer_option.DeletePass` instead."
  218. << std::endl;
  219. paddle_infer_option.DeletePass(pass_name);
  220. }
  221. void RuntimeOption::EnablePaddleLogInfo() {
  222. FDWARNING << "`RuntimeOption::EnablePaddleLogInfo` will be removed in "
  223. "v1.2.0, please modify its member variable directly, e.g "
  224. "`option.paddle_infer_option.enable_log_info = true`"
  225. << std::endl;
  226. paddle_infer_option.enable_log_info = true;
  227. }
  228. void RuntimeOption::DisablePaddleLogInfo() {
  229. FDWARNING << "`RuntimeOption::DisablePaddleLogInfo` will be removed in "
  230. "v1.2.0, please modify its member variable directly, e.g "
  231. "`option.paddle_infer_option.enable_log_info = false`"
  232. << std::endl;
  233. paddle_infer_option.enable_log_info = false;
  234. }
  235. void RuntimeOption::EnablePaddleToTrt() {
  236. #ifdef ENABLE_PADDLE_BACKEND
  237. FDWARNING << "`RuntimeOption::EnablePaddleToTrt` will be removed in v1.2.0, "
  238. "please modify its member variable directly, e.g "
  239. "`option.paddle_infer_option.enable_trt = true`"
  240. << std::endl;
  241. FDINFO << "While using TrtBackend with EnablePaddleToTrt, UltraInfer will "
  242. "change to use Paddle Inference Backend."
  243. << std::endl;
  244. backend = Backend::PDINFER;
  245. paddle_infer_option.enable_trt = true;
  246. #else
  247. FDASSERT(false, "While using TrtBackend with EnablePaddleToTrt, require the "
  248. "UltraInfer is compiled with Paddle Inference Backend, "
  249. "please rebuild your UltraInfer.");
  250. #endif
  251. }
  252. void RuntimeOption::SetPaddleMKLDNNCacheSize(int size) {
  253. FDWARNING << "`RuntimeOption::SetPaddleMKLDNNCacheSize` will be removed in "
  254. "v1.2.0, please modify its member variable directly, e.g "
  255. "`option.paddle_infer_option.mkldnn_cache_size = size`."
  256. << std::endl;
  257. paddle_infer_option.mkldnn_cache_size = size;
  258. }
  259. void RuntimeOption::SetOpenVINODevice(const std::string &name) {
  260. FDWARNING << "`RuntimeOption::SetOpenVINODevice` will be removed in v1.2.0, "
  261. "please use `RuntimeOption.openvino_option.SetDeivce(const "
  262. "std::string&)` instead."
  263. << std::endl;
  264. openvino_option.SetDevice(name);
  265. }
  266. void RuntimeOption::EnableLiteFP16() {
  267. FDWARNING << "`RuntimeOption::EnableLiteFP16` will be removed in v1.2.0, "
  268. "please modify its member variables directly, e.g "
  269. "`runtime_option.paddle_lite_option.enable_fp16 = true`"
  270. << std::endl;
  271. paddle_lite_option.enable_fp16 = true;
  272. }
  273. void RuntimeOption::DisableLiteFP16() {
  274. FDWARNING << "`RuntimeOption::EnableLiteFP16` will be removed in v1.2.0, "
  275. "please modify its member variables directly, e.g "
  276. "`runtime_option.paddle_lite_option.enable_fp16 = false`"
  277. << std::endl;
  278. paddle_lite_option.enable_fp16 = false;
  279. }
  280. void RuntimeOption::EnableLiteInt8() {
  281. FDWARNING << "RuntimeOption::EnableLiteInt8 is a useless api, this calling "
  282. "will not bring any effects, and will be removed in v1.2.0. if "
  283. "you load a quantized model, it will automatically run with "
  284. "int8 mode; otherwise it will run with float mode."
  285. << std::endl;
  286. }
  287. void RuntimeOption::DisableLiteInt8() {
  288. FDWARNING << "RuntimeOption::DisableLiteInt8 is a useless api, this calling "
  289. "will not bring any effects, and will be removed in v1.2.0. if "
  290. "you load a quantized model, it will automatically run with "
  291. "int8 mode; otherwise it will run with float mode."
  292. << std::endl;
  293. }
  294. void RuntimeOption::SetLitePowerMode(LitePowerMode mode) {
  295. FDWARNING << "`RuntimeOption::SetLitePowerMode` will be removed in v1.2.0, "
  296. "please modify its member variable directly, e.g "
  297. "`runtime_option.paddle_lite_option.power_mode = 3;`"
  298. << std::endl;
  299. paddle_lite_option.power_mode = mode;
  300. }
  301. void RuntimeOption::SetLiteOptimizedModelDir(
  302. const std::string &optimized_model_dir) {
  303. FDWARNING
  304. << "`RuntimeOption::SetLiteOptimizedModelDir` will be removed in v1.2.0, "
  305. "please modify its member variable directly, e.g "
  306. "`runtime_option.paddle_lite_option.optimized_model_dir = \"...\"`"
  307. << std::endl;
  308. paddle_lite_option.optimized_model_dir = optimized_model_dir;
  309. }
  310. void RuntimeOption::SetLiteSubgraphPartitionPath(
  311. const std::string &nnadapter_subgraph_partition_config_path) {
  312. FDWARNING << "`RuntimeOption::SetLiteSubgraphPartitionPath` will be removed "
  313. "in v1.2.0, please modify its member variable directly, e.g "
  314. "`runtime_option.paddle_lite_option.nnadapter_subgraph_"
  315. "partition_config_path = \"...\";` "
  316. << std::endl;
  317. paddle_lite_option.nnadapter_subgraph_partition_config_path =
  318. nnadapter_subgraph_partition_config_path;
  319. }
  320. void RuntimeOption::SetLiteSubgraphPartitionConfigBuffer(
  321. const std::string &nnadapter_subgraph_partition_config_buffer) {
  322. FDWARNING
  323. << "`RuntimeOption::SetLiteSubgraphPartitionConfigBuffer` will be "
  324. "removed in v1.2.0, please modify its member variable directly, e.g "
  325. "`runtime_option.paddle_lite_option.nnadapter_subgraph_partition_"
  326. "config_buffer = ...`"
  327. << std::endl;
  328. paddle_lite_option.nnadapter_subgraph_partition_config_buffer =
  329. nnadapter_subgraph_partition_config_buffer;
  330. }
  331. void RuntimeOption::SetLiteContextProperties(
  332. const std::string &nnadapter_context_properties) {
  333. FDWARNING << "`RuntimeOption::SetLiteContextProperties` will be removed in "
  334. "v1.2.0, please modify its member variable directly, e.g "
  335. "`runtime_option.paddle_lite_option.nnadapter_context_"
  336. "properties = ...`"
  337. << std::endl;
  338. paddle_lite_option.nnadapter_context_properties =
  339. nnadapter_context_properties;
  340. }
  341. void RuntimeOption::SetLiteModelCacheDir(
  342. const std::string &nnadapter_model_cache_dir) {
  343. FDWARNING
  344. << "`RuntimeOption::SetLiteModelCacheDir` will be removed in v1.2.0, "
  345. "please modify its member variable directly, e.g "
  346. "`runtime_option.paddle_lite_option.nnadapter_model_cache_dir = ...`"
  347. << std::endl;
  348. paddle_lite_option.nnadapter_model_cache_dir = nnadapter_model_cache_dir;
  349. }
  350. void RuntimeOption::SetLiteDynamicShapeInfo(
  351. const std::map<std::string, std::vector<std::vector<int64_t>>>
  352. &nnadapter_dynamic_shape_info) {
  353. FDWARNING << "`RuntimeOption::SetLiteDynamicShapeInfo` will be removed in "
  354. "v1.2.0, please modify its member variable directly, e.g "
  355. "`runtime_option.paddle_lite_option.paddle_lite_option."
  356. "nnadapter_dynamic_shape_info = ...`"
  357. << std::endl;
  358. paddle_lite_option.nnadapter_dynamic_shape_info =
  359. nnadapter_dynamic_shape_info;
  360. }
  361. void RuntimeOption::SetLiteMixedPrecisionQuantizationConfigPath(
  362. const std::string &nnadapter_mixed_precision_quantization_config_path) {
  363. FDWARNING
  364. << "`RuntimeOption::SetLiteMixedPrecisionQuantizationConfigPath` will be "
  365. "removed in v1.2.0, please modify its member variable directly, e.g "
  366. "`runtime_option.paddle_lite_option.paddle_lite_option.nnadapter_"
  367. "mixed_precision_quantization_config_path = ...`"
  368. << std::endl;
  369. paddle_lite_option.nnadapter_mixed_precision_quantization_config_path =
  370. nnadapter_mixed_precision_quantization_config_path;
  371. }
  372. void RuntimeOption::SetTrtInputShape(const std::string &input_name,
  373. const std::vector<int32_t> &min_shape,
  374. const std::vector<int32_t> &opt_shape,
  375. const std::vector<int32_t> &max_shape) {
  376. FDWARNING << "`RuntimeOption::SetTrtInputShape` will be removed in v1.2.0, "
  377. "please use `RuntimeOption.trt_option.SetShape()` instead."
  378. << std::endl;
  379. trt_option.SetShape(input_name, min_shape, opt_shape, max_shape);
  380. }
  381. void RuntimeOption::SetTrtInputData(const std::string &input_name,
  382. const std::vector<float> &min_shape_data,
  383. const std::vector<float> &opt_shape_data,
  384. const std::vector<float> &max_shape_data) {
  385. FDWARNING << "`RuntimeOption::SetTrtInputData` will be removed in v1.2.0, "
  386. "please use `RuntimeOption.trt_option.SetInputData()` instead."
  387. << std::endl;
  388. trt_option.SetInputData(input_name, min_shape_data, opt_shape_data,
  389. max_shape_data);
  390. }
  391. void RuntimeOption::SetTrtMaxWorkspaceSize(size_t max_workspace_size) {
  392. FDWARNING << "`RuntimeOption::SetTrtMaxWorkspaceSize` will be removed in "
  393. "v1.2.0, please modify its member variable directly, e.g "
  394. "`RuntimeOption.trt_option.max_workspace_size = "
  395. << max_workspace_size << "`." << std::endl;
  396. trt_option.max_workspace_size = max_workspace_size;
  397. }
  398. void RuntimeOption::SetTrtMaxBatchSize(size_t max_batch_size) {
  399. FDWARNING << "`RuntimeOption::SetTrtMaxBatchSize` will be removed in v1.2.0, "
  400. "please modify its member variable directly, e.g "
  401. "`RuntimeOption.trt_option.max_batch_size = "
  402. << max_batch_size << "`." << std::endl;
  403. trt_option.max_batch_size = max_batch_size;
  404. }
  405. void RuntimeOption::EnableTrtFP16() {
  406. FDWARNING << "`RuntimeOption::EnableTrtFP16` will be removed in v1.2.0, "
  407. "please modify its member variable directly, e.g "
  408. "`runtime_option.trt_option.enable_fp16 = true;`"
  409. << std::endl;
  410. trt_option.enable_fp16 = true;
  411. }
  412. void RuntimeOption::DisableTrtFP16() {
  413. FDWARNING << "`RuntimeOption::DisableTrtFP16` will be removed in v1.2.0, "
  414. "please modify its member variable directly, e.g "
  415. "`runtime_option.trt_option.enable_fp16 = false;`"
  416. << std::endl;
  417. trt_option.enable_fp16 = false;
  418. }
  419. void RuntimeOption::EnablePinnedMemory() { enable_pinned_memory = true; }
  420. void RuntimeOption::DisablePinnedMemory() { enable_pinned_memory = false; }
  421. void RuntimeOption::SetTrtCacheFile(const std::string &cache_file_path) {
  422. FDWARNING << "`RuntimeOption::SetTrtCacheFile` will be removed in v1.2.0, "
  423. "please modify its member variable directly, e.g "
  424. "`runtime_option.trt_option.serialize_file = \""
  425. << cache_file_path << "\"." << std::endl;
  426. trt_option.serialize_file = cache_file_path;
  427. }
  428. void RuntimeOption::SetOpenVINOStreams(int num_streams) {
  429. FDWARNING << "`RuntimeOption::SetOpenVINOStreams` will be removed in v1.2.0, "
  430. "please modify its member variable directly, e.g "
  431. "`runtime_option.openvino_option.num_streams = "
  432. << num_streams << "`." << std::endl;
  433. openvino_option.num_streams = num_streams;
  434. }
  435. void RuntimeOption::EnablePaddleTrtCollectShape() {
  436. FDWARNING << "`RuntimeOption::EnablePaddleTrtCollectShape` will be removed "
  437. "in v1.2.0, please modify its member variable directly, e.g "
  438. "runtime_option.paddle_infer_option.collect_trt_shape = true`."
  439. << std::endl;
  440. paddle_infer_option.collect_trt_shape = true;
  441. }
  442. void RuntimeOption::DisablePaddleTrtCollectShape() {
  443. FDWARNING << "`RuntimeOption::DisablePaddleTrtCollectShape` will be removed "
  444. "in v1.2.0, please modify its member variable directly, e.g "
  445. "runtime_option.paddle_infer_option.collect_trt_shape = false`."
  446. << std::endl;
  447. paddle_infer_option.collect_trt_shape = false;
  448. }
  449. void RuntimeOption::DisablePaddleTrtOPs(const std::vector<std::string> &ops) {
  450. FDWARNING << "`RuntimeOption::DisablePaddleTrtOps` will be removed in "
  451. "v.1.20, please use "
  452. "`runtime_option.paddle_infer_option.DisableTrtOps` instead."
  453. << std::endl;
  454. paddle_infer_option.DisableTrtOps(ops);
  455. }
  456. void RuntimeOption::UseTVMBackend() {
  457. #ifdef ENABLE_TVM_BACKEND
  458. backend = Backend::TVM;
  459. #else
  460. FDASSERT(false, "The UltraInfer didn't compile with TVMBackend.");
  461. #endif
  462. }
  463. } // namespace ultra_infer