option.h 3.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103
  1. // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. #pragma once
  15. #include "ultra_infer/core/fd_type.h"
  16. // https://github.com/PaddlePaddle/Paddle-Lite/issues/8290
  17. #if (defined(WITH_LITE_STATIC) && defined(WITH_STATIC_LIB))
  18. // Whether to output some warning messages when using the
  19. // FastDepoy static library, default OFF. These messages
  20. // are only reserve for debugging.
  21. #if defined(WITH_STATIC_WARNING)
  22. #warning You are using the UltraInfer static library. We will automatically add some registration codes for ops, kernels and passes for Paddle Lite. // NOLINT
  23. #endif
  24. #if !defined(WITH_STATIC_LIB_AT_COMPILING)
  25. #include "paddle_use_kernels.h" // NOLINT
  26. #include "paddle_use_ops.h" // NOLINT
  27. #include "paddle_use_passes.h" // NOLINT
  28. #endif
  29. #endif
  30. #include <iostream>
  31. #include <map>
  32. #include <memory>
  33. #include <string>
  34. #include <vector>
  35. namespace ultra_infer {
  36. /*! Paddle Lite power mode for mobile device. */
  37. enum LitePowerMode {
  38. LITE_POWER_HIGH = 0, ///< Use Lite Backend with high power mode
  39. LITE_POWER_LOW = 1, ///< Use Lite Backend with low power mode
  40. LITE_POWER_FULL = 2, ///< Use Lite Backend with full power mode
  41. LITE_POWER_NO_BIND = 3, ///< Use Lite Backend with no bind power mode
  42. LITE_POWER_RAND_HIGH = 4, ///< Use Lite Backend with rand high mode
  43. LITE_POWER_RAND_LOW = 5 ///< Use Lite Backend with rand low power mode
  44. };
  45. /*! @brief Option object to configure Paddle Lite backend
  46. */
  47. struct LiteBackendOption {
  48. /// Paddle Lite power mode for mobile device.
  49. int power_mode = 3;
  50. // Number of threads while use CPU
  51. int cpu_threads = 1;
  52. /// Enable use half precision
  53. bool enable_fp16 = false;
  54. // Inference device, Paddle Lite support CPU/KUNLUNXIN/TIMVX/ASCEND
  55. Device device = Device::CPU;
  56. // Index of inference device
  57. int device_id = 0;
  58. // TODO(qiuyanjun): add opencl binary path and cache settings.
  59. std::string opencl_cache_dir = "/data/local/tmp/";
  60. std::string opencl_tuned_file = "/data/local/tmp/opencl_tuned_kernels.bin";
  61. /// kunlunxin_l3_workspace_size
  62. int kunlunxin_l3_workspace_size = 0xfffc00;
  63. /// kunlunxin_locked
  64. bool kunlunxin_locked = false;
  65. /// kunlunxin_autotune
  66. bool kunlunxin_autotune = true;
  67. /// kunlunxin_autotune_file
  68. std::string kunlunxin_autotune_file = "";
  69. /// kunlunxin_precision
  70. std::string kunlunxin_precision = "int16";
  71. /// kunlunxin_adaptive_seqlen
  72. bool kunlunxin_adaptive_seqlen = false;
  73. /// kunlunxin_enable_multi_stream
  74. bool kunlunxin_enable_multi_stream = false;
  75. /// kunlunxin_gm_default_size
  76. int64_t kunlunxin_gm_default_size = 0;
  77. /// Optimized model dir for CxxConfig
  78. std::string optimized_model_dir = "";
  79. /// nnadapter_subgraph_partition_config_path
  80. std::string nnadapter_subgraph_partition_config_path = "";
  81. /// nnadapter_subgraph_partition_config_buffer
  82. std::string nnadapter_subgraph_partition_config_buffer = "";
  83. /// nnadapter_context_properties
  84. std::string nnadapter_context_properties = "";
  85. /// nnadapter_model_cache_dir
  86. std::string nnadapter_model_cache_dir = "";
  87. /// nnadapter_mixed_precision_quantization_config_path
  88. std::string nnadapter_mixed_precision_quantization_config_path = "";
  89. /// nnadapter_dynamic_shape_info
  90. std::map<std::string, std::vector<std::vector<int64_t>>>
  91. nnadapter_dynamic_shape_info = {{"", {{0}}}};
  92. /// nnadapter_device_names
  93. std::vector<std::string> nnadapter_device_names = {};
  94. };
  95. } // namespace ultra_infer