runtime_option.cc 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533
  1. // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. #include "ultra_infer/runtime/runtime.h"
  15. #include "ultra_infer/utils/unique_ptr.h"
  16. #include "ultra_infer/utils/utils.h"
  17. namespace ultra_infer {
  18. void RuntimeOption::SetModelPath(const std::string &model_path,
  19. const std::string &params_path,
  20. const ModelFormat &format) {
  21. model_file = model_path;
  22. params_file = params_path;
  23. model_format = format;
  24. model_from_memory_ = false;
  25. }
  26. void RuntimeOption::SetModelBuffer(const std::string &model_buffer,
  27. const std::string &params_buffer,
  28. const ModelFormat &format) {
  29. model_file = model_buffer;
  30. params_file = params_buffer;
  31. model_format = format;
  32. model_from_memory_ = true;
  33. }
  34. void RuntimeOption::UseGpu(int gpu_id) {
  35. #if defined(WITH_GPU) || defined(WITH_OPENCL)
  36. device = Device::GPU;
  37. device_id = gpu_id;
  38. #if defined(WITH_OPENCL) && defined(ENABLE_LITE_BACKEND)
  39. paddle_lite_option.device = device;
  40. #endif
  41. #else
  42. FDWARNING << "The UltraInfer didn't compile with GPU, will force to use CPU."
  43. << std::endl;
  44. device = Device::CPU;
  45. #endif
  46. }
  47. void RuntimeOption::UseCpu() { device = Device::CPU; }
  48. void RuntimeOption::UseRKNPU2(ultra_infer::rknpu2::CpuName rknpu2_name,
  49. ultra_infer::rknpu2::CoreMask rknpu2_core) {
  50. rknpu2_option.cpu_name = rknpu2_name;
  51. rknpu2_option.core_mask = rknpu2_core;
  52. device = Device::RKNPU;
  53. }
  54. void RuntimeOption::UseHorizon() { device = Device::SUNRISENPU; }
  55. void RuntimeOption::UseTimVX() {
  56. device = Device::TIMVX;
  57. paddle_lite_option.device = device;
  58. }
  59. void RuntimeOption::UseKunlunXin(int kunlunxin_id, int l3_workspace_size,
  60. bool locked, bool autotune,
  61. const std::string &autotune_file,
  62. const std::string &precision,
  63. bool adaptive_seqlen, bool enable_multi_stream,
  64. int64_t gm_default_size) {
  65. #ifdef WITH_KUNLUNXIN
  66. device = Device::KUNLUNXIN;
  67. #ifdef ENABLE_LITE_BACKEND
  68. paddle_lite_option.device = device;
  69. paddle_lite_option.device_id = kunlunxin_id;
  70. paddle_lite_option.kunlunxin_l3_workspace_size = l3_workspace_size;
  71. paddle_lite_option.kunlunxin_locked = locked;
  72. paddle_lite_option.kunlunxin_autotune = autotune;
  73. paddle_lite_option.kunlunxin_autotune_file = autotune_file;
  74. paddle_lite_option.kunlunxin_precision = precision;
  75. paddle_lite_option.kunlunxin_adaptive_seqlen = adaptive_seqlen;
  76. paddle_lite_option.kunlunxin_enable_multi_stream = enable_multi_stream;
  77. paddle_lite_option.kunlunxin_gm_default_size = gm_default_size;
  78. #endif
  79. #ifdef ENABLE_PADDLE_BACKEND
  80. paddle_infer_option.device = device;
  81. paddle_infer_option.xpu_option.kunlunxin_device_id = kunlunxin_id;
  82. paddle_infer_option.xpu_option.kunlunxin_l3_workspace_size =
  83. l3_workspace_size;
  84. paddle_infer_option.xpu_option.kunlunxin_locked = locked;
  85. paddle_infer_option.xpu_option.kunlunxin_autotune = autotune;
  86. paddle_infer_option.xpu_option.kunlunxin_autotune_file = autotune_file;
  87. paddle_infer_option.xpu_option.kunlunxin_precision = precision;
  88. paddle_infer_option.xpu_option.kunlunxin_adaptive_seqlen = adaptive_seqlen;
  89. paddle_infer_option.xpu_option.kunlunxin_enable_multi_stream =
  90. enable_multi_stream;
  91. // paddle_infer_option.xpu_option.kunlunxin_gm_default_size = gm_default_size;
  92. // use paddle_infer_option.xpu_option.SetXpuConfig() for more options.
  93. #endif
  94. #else
  95. FDWARNING
  96. << "The UltraInfer didn't compile with KUNLUNXIN, will force to use CPU."
  97. << std::endl;
  98. device = Device::CPU;
  99. #endif
  100. }
  101. void RuntimeOption::UseIpu(int device_num, int micro_batch_size,
  102. bool enable_pipelining, int batches_per_step) {
  103. #ifdef WITH_IPU
  104. device = Device::IPU;
  105. paddle_infer_option.ipu_option.ipu_device_num = device_num;
  106. paddle_infer_option.ipu_option.ipu_micro_batch_size = micro_batch_size;
  107. paddle_infer_option.ipu_option.ipu_enable_pipelining = enable_pipelining;
  108. paddle_infer_option.ipu_option.ipu_batches_per_step = batches_per_step;
  109. // use paddle_infer_option.ipu_option.SetIpuConfig() for more options.
  110. #else
  111. FDWARNING << "The UltraInfer didn't compile with IPU, will force to use CPU."
  112. << std::endl;
  113. device = Device::CPU;
  114. #endif
  115. }
  116. void RuntimeOption::UseAscend(int npu_id) {
  117. device = Device::ASCEND;
  118. paddle_lite_option.device = device;
  119. device_id = npu_id;
  120. }
  121. void RuntimeOption::UseDirectML() { device = Device::DIRECTML; }
  122. void RuntimeOption::UseSophgo() {
  123. device = Device::SOPHGOTPUD;
  124. UseSophgoBackend();
  125. }
  126. void RuntimeOption::SetExternalStream(void *external_stream) {
  127. external_stream_ = external_stream;
  128. }
  129. void RuntimeOption::SetCpuThreadNum(int thread_num) {
  130. FDASSERT(thread_num > 0, "The thread_num must be greater than 0.");
  131. cpu_thread_num = thread_num;
  132. paddle_lite_option.cpu_threads = thread_num;
  133. ort_option.intra_op_num_threads = thread_num;
  134. openvino_option.cpu_thread_num = thread_num;
  135. paddle_infer_option.cpu_thread_num = thread_num;
  136. }
  137. void RuntimeOption::SetOrtGraphOptLevel(int level) {
  138. FDWARNING << "`RuntimeOption::SetOrtGraphOptLevel` will be removed in "
  139. "v1.2.0, please modify its member variables directly, e.g "
  140. "`runtime_option.ort_option.graph_optimization_level = 99`."
  141. << std::endl;
  142. std::vector<int> supported_level{-1, 0, 1, 2};
  143. auto valid_level = std::find(supported_level.begin(), supported_level.end(),
  144. level) != supported_level.end();
  145. FDASSERT(valid_level, "The level must be -1, 0, 1, 2.");
  146. ort_option.graph_optimization_level = level;
  147. }
  148. // use paddle inference backend
  149. void RuntimeOption::UsePaddleBackend() {
  150. #ifdef ENABLE_PADDLE_BACKEND
  151. backend = Backend::PDINFER;
  152. #else
  153. FDASSERT(false, "The UltraInfer didn't compile with Paddle Inference.");
  154. #endif
  155. }
  156. // use onnxruntime backend
  157. void RuntimeOption::UseOrtBackend() {
  158. #ifdef ENABLE_ORT_BACKEND
  159. backend = Backend::ORT;
  160. #else
  161. FDASSERT(false, "The UltraInfer didn't compile with OrtBackend.");
  162. #endif
  163. }
  164. // use sophgoruntime backend
  165. void RuntimeOption::UseSophgoBackend() {
  166. #ifdef ENABLE_SOPHGO_BACKEND
  167. backend = Backend::SOPHGOTPU;
  168. #else
  169. FDASSERT(false, "The UltraInfer didn't compile with SophgoBackend.");
  170. #endif
  171. }
  172. // use poros backend
  173. void RuntimeOption::UsePorosBackend() {
  174. #ifdef ENABLE_POROS_BACKEND
  175. backend = Backend::POROS;
  176. #else
  177. FDASSERT(false, "The UltraInfer didn't compile with PorosBackend.");
  178. #endif
  179. }
  180. void RuntimeOption::UseTrtBackend() {
  181. #ifdef ENABLE_TRT_BACKEND
  182. backend = Backend::TRT;
  183. #else
  184. FDASSERT(false, "The UltraInfer didn't compile with TrtBackend.");
  185. #endif
  186. }
  187. void RuntimeOption::UseOpenVINOBackend() {
  188. #ifdef ENABLE_OPENVINO_BACKEND
  189. backend = Backend::OPENVINO;
  190. #else
  191. FDASSERT(false, "The UltraInfer didn't compile with OpenVINO.");
  192. #endif
  193. }
  194. void RuntimeOption::UseLiteBackend() {
  195. #ifdef ENABLE_LITE_BACKEND
  196. backend = Backend::LITE;
  197. #else
  198. FDASSERT(false, "The UltraInfer didn't compile with Paddle Lite.");
  199. #endif
  200. }
  201. void RuntimeOption::UseHorizonNPUBackend() {
  202. #ifdef ENABLE_HORIZON_BACKEND
  203. backend = Backend::HORIZONNPU;
  204. #else
  205. FDASSERT(false, "The UltraInfer didn't compile with horizon");
  206. #endif
  207. }
  208. void RuntimeOption::UseOMBackend() {
  209. #ifdef ENABLE_OM_BACKEND
  210. backend = Backend::OMONNPU;
  211. #else
  212. FDASSERT(false, "The FastDeploy didn't compile with npu om");
  213. #endif
  214. }
  215. void RuntimeOption::SetPaddleMKLDNN(bool pd_mkldnn) {
  216. FDWARNING << "`RuntimeOption::SetPaddleMKLDNN` will be removed in v1.2.0, "
  217. "please modify its member variable directly, e.g "
  218. "`option.paddle_infer_option.enable_mkldnn = true`"
  219. << std::endl;
  220. paddle_infer_option.enable_mkldnn = pd_mkldnn;
  221. }
  222. void RuntimeOption::DeletePaddleBackendPass(const std::string &pass_name) {
  223. FDWARNING
  224. << "`RuntimeOption::DeletePaddleBackendPass` will be removed in v1.2.0, "
  225. "please use `option.paddle_infer_option.DeletePass` instead."
  226. << std::endl;
  227. paddle_infer_option.DeletePass(pass_name);
  228. }
  229. void RuntimeOption::EnablePaddleLogInfo() {
  230. FDWARNING << "`RuntimeOption::EnablePaddleLogInfo` will be removed in "
  231. "v1.2.0, please modify its member variable directly, e.g "
  232. "`option.paddle_infer_option.enable_log_info = true`"
  233. << std::endl;
  234. paddle_infer_option.enable_log_info = true;
  235. }
  236. void RuntimeOption::DisablePaddleLogInfo() {
  237. FDWARNING << "`RuntimeOption::DisablePaddleLogInfo` will be removed in "
  238. "v1.2.0, please modify its member variable directly, e.g "
  239. "`option.paddle_infer_option.enable_log_info = false`"
  240. << std::endl;
  241. paddle_infer_option.enable_log_info = false;
  242. }
  243. void RuntimeOption::EnablePaddleToTrt() {
  244. #ifdef ENABLE_PADDLE_BACKEND
  245. FDWARNING << "`RuntimeOption::EnablePaddleToTrt` will be removed in v1.2.0, "
  246. "please modify its member variable directly, e.g "
  247. "`option.paddle_infer_option.enable_trt = true`"
  248. << std::endl;
  249. FDINFO << "While using TrtBackend with EnablePaddleToTrt, UltraInfer will "
  250. "change to use Paddle Inference Backend."
  251. << std::endl;
  252. backend = Backend::PDINFER;
  253. paddle_infer_option.enable_trt = true;
  254. #else
  255. FDASSERT(false, "While using TrtBackend with EnablePaddleToTrt, require the "
  256. "UltraInfer is compiled with Paddle Inference Backend, "
  257. "please rebuild your UltraInfer.");
  258. #endif
  259. }
  260. void RuntimeOption::SetPaddleMKLDNNCacheSize(int size) {
  261. FDWARNING << "`RuntimeOption::SetPaddleMKLDNNCacheSize` will be removed in "
  262. "v1.2.0, please modify its member variable directly, e.g "
  263. "`option.paddle_infer_option.mkldnn_cache_size = size`."
  264. << std::endl;
  265. paddle_infer_option.mkldnn_cache_size = size;
  266. }
  267. void RuntimeOption::SetOpenVINODevice(const std::string &name) {
  268. FDWARNING << "`RuntimeOption::SetOpenVINODevice` will be removed in v1.2.0, "
  269. "please use `RuntimeOption.openvino_option.SetDeivce(const "
  270. "std::string&)` instead."
  271. << std::endl;
  272. openvino_option.SetDevice(name);
  273. }
  274. void RuntimeOption::EnableLiteFP16() {
  275. FDWARNING << "`RuntimeOption::EnableLiteFP16` will be removed in v1.2.0, "
  276. "please modify its member variables directly, e.g "
  277. "`runtime_option.paddle_lite_option.enable_fp16 = true`"
  278. << std::endl;
  279. paddle_lite_option.enable_fp16 = true;
  280. }
  281. void RuntimeOption::DisableLiteFP16() {
  282. FDWARNING << "`RuntimeOption::EnableLiteFP16` will be removed in v1.2.0, "
  283. "please modify its member variables directly, e.g "
  284. "`runtime_option.paddle_lite_option.enable_fp16 = false`"
  285. << std::endl;
  286. paddle_lite_option.enable_fp16 = false;
  287. }
  288. void RuntimeOption::EnableLiteInt8() {
  289. FDWARNING << "RuntimeOption::EnableLiteInt8 is a useless api, this calling "
  290. "will not bring any effects, and will be removed in v1.2.0. if "
  291. "you load a quantized model, it will automatically run with "
  292. "int8 mode; otherwise it will run with float mode."
  293. << std::endl;
  294. }
  295. void RuntimeOption::DisableLiteInt8() {
  296. FDWARNING << "RuntimeOption::DisableLiteInt8 is a useless api, this calling "
  297. "will not bring any effects, and will be removed in v1.2.0. if "
  298. "you load a quantized model, it will automatically run with "
  299. "int8 mode; otherwise it will run with float mode."
  300. << std::endl;
  301. }
  302. void RuntimeOption::SetLitePowerMode(LitePowerMode mode) {
  303. FDWARNING << "`RuntimeOption::SetLitePowerMode` will be removed in v1.2.0, "
  304. "please modify its member variable directly, e.g "
  305. "`runtime_option.paddle_lite_option.power_mode = 3;`"
  306. << std::endl;
  307. paddle_lite_option.power_mode = mode;
  308. }
  309. void RuntimeOption::SetLiteOptimizedModelDir(
  310. const std::string &optimized_model_dir) {
  311. FDWARNING
  312. << "`RuntimeOption::SetLiteOptimizedModelDir` will be removed in v1.2.0, "
  313. "please modify its member variable directly, e.g "
  314. "`runtime_option.paddle_lite_option.optimized_model_dir = \"...\"`"
  315. << std::endl;
  316. paddle_lite_option.optimized_model_dir = optimized_model_dir;
  317. }
  318. void RuntimeOption::SetLiteSubgraphPartitionPath(
  319. const std::string &nnadapter_subgraph_partition_config_path) {
  320. FDWARNING << "`RuntimeOption::SetLiteSubgraphPartitionPath` will be removed "
  321. "in v1.2.0, please modify its member variable directly, e.g "
  322. "`runtime_option.paddle_lite_option.nnadapter_subgraph_"
  323. "partition_config_path = \"...\";` "
  324. << std::endl;
  325. paddle_lite_option.nnadapter_subgraph_partition_config_path =
  326. nnadapter_subgraph_partition_config_path;
  327. }
  328. void RuntimeOption::SetLiteSubgraphPartitionConfigBuffer(
  329. const std::string &nnadapter_subgraph_partition_config_buffer) {
  330. FDWARNING
  331. << "`RuntimeOption::SetLiteSubgraphPartitionConfigBuffer` will be "
  332. "removed in v1.2.0, please modify its member variable directly, e.g "
  333. "`runtime_option.paddle_lite_option.nnadapter_subgraph_partition_"
  334. "config_buffer = ...`"
  335. << std::endl;
  336. paddle_lite_option.nnadapter_subgraph_partition_config_buffer =
  337. nnadapter_subgraph_partition_config_buffer;
  338. }
  339. void RuntimeOption::SetLiteContextProperties(
  340. const std::string &nnadapter_context_properties) {
  341. FDWARNING << "`RuntimeOption::SetLiteContextProperties` will be removed in "
  342. "v1.2.0, please modify its member variable directly, e.g "
  343. "`runtime_option.paddle_lite_option.nnadapter_context_"
  344. "properties = ...`"
  345. << std::endl;
  346. paddle_lite_option.nnadapter_context_properties =
  347. nnadapter_context_properties;
  348. }
  349. void RuntimeOption::SetLiteModelCacheDir(
  350. const std::string &nnadapter_model_cache_dir) {
  351. FDWARNING
  352. << "`RuntimeOption::SetLiteModelCacheDir` will be removed in v1.2.0, "
  353. "please modify its member variable directly, e.g "
  354. "`runtime_option.paddle_lite_option.nnadapter_model_cache_dir = ...`"
  355. << std::endl;
  356. paddle_lite_option.nnadapter_model_cache_dir = nnadapter_model_cache_dir;
  357. }
  358. void RuntimeOption::SetLiteDynamicShapeInfo(
  359. const std::map<std::string, std::vector<std::vector<int64_t>>>
  360. &nnadapter_dynamic_shape_info) {
  361. FDWARNING << "`RuntimeOption::SetLiteDynamicShapeInfo` will be removed in "
  362. "v1.2.0, please modify its member variable directly, e.g "
  363. "`runtime_option.paddle_lite_option.paddle_lite_option."
  364. "nnadapter_dynamic_shape_info = ...`"
  365. << std::endl;
  366. paddle_lite_option.nnadapter_dynamic_shape_info =
  367. nnadapter_dynamic_shape_info;
  368. }
  369. void RuntimeOption::SetLiteMixedPrecisionQuantizationConfigPath(
  370. const std::string &nnadapter_mixed_precision_quantization_config_path) {
  371. FDWARNING
  372. << "`RuntimeOption::SetLiteMixedPrecisionQuantizationConfigPath` will be "
  373. "removed in v1.2.0, please modify its member variable directly, e.g "
  374. "`runtime_option.paddle_lite_option.paddle_lite_option.nnadapter_"
  375. "mixed_precision_quantization_config_path = ...`"
  376. << std::endl;
  377. paddle_lite_option.nnadapter_mixed_precision_quantization_config_path =
  378. nnadapter_mixed_precision_quantization_config_path;
  379. }
  380. void RuntimeOption::SetTrtInputShape(const std::string &input_name,
  381. const std::vector<int32_t> &min_shape,
  382. const std::vector<int32_t> &opt_shape,
  383. const std::vector<int32_t> &max_shape) {
  384. FDWARNING << "`RuntimeOption::SetTrtInputShape` will be removed in v1.2.0, "
  385. "please use `RuntimeOption.trt_option.SetShape()` instead."
  386. << std::endl;
  387. trt_option.SetShape(input_name, min_shape, opt_shape, max_shape);
  388. }
  389. void RuntimeOption::SetTrtInputData(const std::string &input_name,
  390. const std::vector<float> &min_shape_data,
  391. const std::vector<float> &opt_shape_data,
  392. const std::vector<float> &max_shape_data) {
  393. FDWARNING << "`RuntimeOption::SetTrtInputData` will be removed in v1.2.0, "
  394. "please use `RuntimeOption.trt_option.SetInputData()` instead."
  395. << std::endl;
  396. trt_option.SetInputData(input_name, min_shape_data, opt_shape_data,
  397. max_shape_data);
  398. }
  399. void RuntimeOption::SetTrtMaxWorkspaceSize(size_t max_workspace_size) {
  400. FDWARNING << "`RuntimeOption::SetTrtMaxWorkspaceSize` will be removed in "
  401. "v1.2.0, please modify its member variable directly, e.g "
  402. "`RuntimeOption.trt_option.max_workspace_size = "
  403. << max_workspace_size << "`." << std::endl;
  404. trt_option.max_workspace_size = max_workspace_size;
  405. }
  406. void RuntimeOption::SetTrtMaxBatchSize(size_t max_batch_size) {
  407. FDWARNING << "`RuntimeOption::SetTrtMaxBatchSize` will be removed in v1.2.0, "
  408. "please modify its member variable directly, e.g "
  409. "`RuntimeOption.trt_option.max_batch_size = "
  410. << max_batch_size << "`." << std::endl;
  411. trt_option.max_batch_size = max_batch_size;
  412. }
  413. void RuntimeOption::EnableTrtFP16() {
  414. FDWARNING << "`RuntimeOption::EnableTrtFP16` will be removed in v1.2.0, "
  415. "please modify its member variable directly, e.g "
  416. "`runtime_option.trt_option.enable_fp16 = true;`"
  417. << std::endl;
  418. trt_option.enable_fp16 = true;
  419. }
  420. void RuntimeOption::DisableTrtFP16() {
  421. FDWARNING << "`RuntimeOption::DisableTrtFP16` will be removed in v1.2.0, "
  422. "please modify its member variable directly, e.g "
  423. "`runtime_option.trt_option.enable_fp16 = false;`"
  424. << std::endl;
  425. trt_option.enable_fp16 = false;
  426. }
  427. void RuntimeOption::EnablePinnedMemory() { enable_pinned_memory = true; }
  428. void RuntimeOption::DisablePinnedMemory() { enable_pinned_memory = false; }
  429. void RuntimeOption::SetTrtCacheFile(const std::string &cache_file_path) {
  430. FDWARNING << "`RuntimeOption::SetTrtCacheFile` will be removed in v1.2.0, "
  431. "please modify its member variable directly, e.g "
  432. "`runtime_option.trt_option.serialize_file = \""
  433. << cache_file_path << "\"." << std::endl;
  434. trt_option.serialize_file = cache_file_path;
  435. }
  436. void RuntimeOption::SetOpenVINOStreams(int num_streams) {
  437. FDWARNING << "`RuntimeOption::SetOpenVINOStreams` will be removed in v1.2.0, "
  438. "please modify its member variable directly, e.g "
  439. "`runtime_option.openvino_option.num_streams = "
  440. << num_streams << "`." << std::endl;
  441. openvino_option.num_streams = num_streams;
  442. }
  443. void RuntimeOption::EnablePaddleTrtCollectShape() {
  444. FDWARNING << "`RuntimeOption::EnablePaddleTrtCollectShape` will be removed "
  445. "in v1.2.0, please modify its member variable directly, e.g "
  446. "runtime_option.paddle_infer_option.collect_trt_shape = true`."
  447. << std::endl;
  448. paddle_infer_option.collect_trt_shape = true;
  449. }
  450. void RuntimeOption::DisablePaddleTrtCollectShape() {
  451. FDWARNING << "`RuntimeOption::DisablePaddleTrtCollectShape` will be removed "
  452. "in v1.2.0, please modify its member variable directly, e.g "
  453. "runtime_option.paddle_infer_option.collect_trt_shape = false`."
  454. << std::endl;
  455. paddle_infer_option.collect_trt_shape = false;
  456. }
  457. void RuntimeOption::DisablePaddleTrtOPs(const std::vector<std::string> &ops) {
  458. FDWARNING << "`RuntimeOption::DisablePaddleTrtOps` will be removed in "
  459. "v.1.20, please use "
  460. "`runtime_option.paddle_infer_option.DisableTrtOps` instead."
  461. << std::endl;
  462. paddle_infer_option.DisableTrtOps(ops);
  463. }
  464. void RuntimeOption::UseTVMBackend() {
  465. #ifdef ENABLE_TVM_BACKEND
  466. backend = Backend::TVM;
  467. #else
  468. FDASSERT(false, "The UltraInfer didn't compile with TVMBackend.");
  469. #endif
  470. }
  471. } // namespace ultra_infer