|
|
@@ -70,6 +70,8 @@ class Model {
|
|
|
* @param model_dir: the directory which contains model.yml
|
|
|
* @param use_gpu: use gpu or not when infering
|
|
|
* @param use_trt: use Tensor RT or not when infering
|
|
|
+ * @param use_trt: use mkl or not when infering
|
|
|
+ * @param mkl_thread_num: the threads of mkl when infering
|
|
|
* @param gpu_id: the id of gpu when infering with using gpu
|
|
|
* @param key: the key of encryption when using encrypted model
|
|
|
* @param use_ir_optim: use ir optimization when infering
|
|
|
@@ -78,28 +80,27 @@ class Model {
|
|
|
bool use_gpu = false,
|
|
|
bool use_trt = false,
|
|
|
bool use_mkl = true,
|
|
|
+ int mkl_thread_num = 4,
|
|
|
int gpu_id = 0,
|
|
|
std::string key = "",
|
|
|
- int mkl_thread_num = 4,
|
|
|
bool use_ir_optim = true) {
|
|
|
create_predictor(
|
|
|
model_dir,
|
|
|
use_gpu,
|
|
|
use_trt,
|
|
|
use_mkl,
|
|
|
+ mkl_thread_num,
|
|
|
gpu_id,
|
|
|
key,
|
|
|
- mkl_thread_num,
|
|
|
use_ir_optim);
|
|
|
}
|
|
|
-
|
|
|
void create_predictor(const std::string& model_dir,
|
|
|
bool use_gpu = false,
|
|
|
bool use_trt = false,
|
|
|
bool use_mkl = true,
|
|
|
+ int mkl_thread_num = 4,
|
|
|
int gpu_id = 0,
|
|
|
std::string key = "",
|
|
|
- int mkl_thread_num = 4,
|
|
|
bool use_ir_optim = true);
|
|
|
|
|
|
/*
|