|
@@ -85,7 +85,7 @@ void Model::create_predictor(const std::string& model_dir,
|
|
|
#endif
|
|
#endif
|
|
|
// enable Memory Optim
|
|
// enable Memory Optim
|
|
|
config.EnableMemoryOptim();
|
|
config.EnableMemoryOptim();
|
|
|
- if (use_trt) {
|
|
|
|
|
|
|
+ if (use_trt && use_gpu) {
|
|
|
config.EnableTensorRtEngine(
|
|
config.EnableTensorRtEngine(
|
|
|
1 << 20 /* workspace_size*/,
|
|
1 << 20 /* workspace_size*/,
|
|
|
32 /* max_batch_size*/,
|
|
32 /* max_batch_size*/,
|
|
@@ -283,7 +283,7 @@ bool Model::predict(const cv::Mat& im, DetResult* result) {
|
|
|
im_tensor->Reshape({1, 3, h, w});
|
|
im_tensor->Reshape({1, 3, h, w});
|
|
|
im_tensor->copy_from_cpu(inputs_.im_data_.data());
|
|
im_tensor->copy_from_cpu(inputs_.im_data_.data());
|
|
|
|
|
|
|
|
- if (name == "YOLOv3") {
|
|
|
|
|
|
|
+ if (name == "YOLOv3" || name == "PPYOLO") {
|
|
|
auto im_size_tensor = predictor_->GetInputTensor("im_size");
|
|
auto im_size_tensor = predictor_->GetInputTensor("im_size");
|
|
|
im_size_tensor->Reshape({1, 2});
|
|
im_size_tensor->Reshape({1, 2});
|
|
|
im_size_tensor->copy_from_cpu(inputs_.ori_im_size_.data());
|
|
im_size_tensor->copy_from_cpu(inputs_.ori_im_size_.data());
|
|
@@ -442,7 +442,7 @@ bool Model::predict(const std::vector<cv::Mat>& im_batch,
|
|
|
inputs_data.begin() + i * 3 * h * w);
|
|
inputs_data.begin() + i * 3 * h * w);
|
|
|
}
|
|
}
|
|
|
im_tensor->copy_from_cpu(inputs_data.data());
|
|
im_tensor->copy_from_cpu(inputs_data.data());
|
|
|
- if (name == "YOLOv3") {
|
|
|
|
|
|
|
+ if (name == "YOLOv3" || name == "PPYOLO") {
|
|
|
auto im_size_tensor = predictor_->GetInputTensor("im_size");
|
|
auto im_size_tensor = predictor_->GetInputTensor("im_size");
|
|
|
im_size_tensor->Reshape({batch_size, 2});
|
|
im_size_tensor->Reshape({batch_size, 2});
|
|
|
std::vector<int> inputs_data_size(batch_size * 2);
|
|
std::vector<int> inputs_data_size(batch_size * 2);
|