model_infer.cpp 4.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163
  1. // Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. #include <gflags/gflags.h>
  15. #include <string>
  16. #include <vector>
  17. #include "model_deploy/common/include/paddle_deploy.h"
  18. PaddleDeploy::Model* model;
  19. extern "C" __declspec(dllexport) void InitModel(const char* model_type, const char* model_filename, const char* params_filename, const char* cfg_file)
  20. {
  21. bool use_gpu = false;
  22. int gpu_id = 0;
  23. // create model
  24. model = PaddleDeploy::CreateModel(model_type); //FLAGS_model_type
  25. // model init
  26. model->Init(cfg_file);
  27. // inference engine init
  28. PaddleDeploy::PaddleEngineConfig engine_config;
  29. engine_config.model_filename = model_filename;
  30. engine_config.params_filename = params_filename;
  31. engine_config.use_gpu = use_gpu;
  32. engine_config.gpu_id = gpu_id;
  33. bool init = model->PaddleEngineInit(engine_config);
  34. if (init)
  35. {
  36. std::cout << "init model success" << std::endl;
  37. }
  38. }
  39. /*
  40. * img: input for predicting.
  41. *
  42. * nWidth: width of img.
  43. *
  44. * nHeight: height of img.
  45. *
  46. * nChannel: channel of img.
  47. *
  48. * output: result of pridict ,include category_id£¬score£¬coordinate¡£
  49. *
  50. * nBoxesNum£º number of box
  51. *
  52. * LabelList: label list of result
  53. */
  54. extern "C" __declspec(dllexport) void ModelPredict(const unsigned char* img, int nWidth, int nHeight,int nChannel, float* output, int* nBoxesNum, char* LabelList)
  55. {
  56. // prepare data
  57. std::vector<cv::Mat> imgs;
  58. int nType = 0;
  59. if (nChannel==1)
  60. {
  61. nType = CV_8UC1;
  62. }
  63. else if (nChannel == 2)
  64. {
  65. nType = CV_8UC2;
  66. }
  67. else if (nChannel == 3)
  68. {
  69. nType = CV_8UC3;
  70. }
  71. else if (nChannel == 4)
  72. {
  73. nType = CV_8UC4;
  74. }
  75. cv::Mat input = cv::Mat::zeros(cv::Size(nWidth, nHeight), nType);
  76. memcpy(input.data, img, nHeight * nWidth * nChannel * sizeof(uchar));
  77. //cv::imwrite("./1.png", input);
  78. imgs.push_back(std::move(input));
  79. // predict
  80. std::vector<PaddleDeploy::Result> results;
  81. bool pre = model->Predict(imgs, &results, 1);
  82. if (pre)
  83. {
  84. std::cout << "model predict success" << std::endl;
  85. }
  86. nBoxesNum[0] = results.size();
  87. std::string label ="";
  88. for (int num = 0; num < results.size(); num++)
  89. {
  90. //std::cout << "res: " << results[num] << std::endl;
  91. for (int i = 0; i < results[num].det_result->boxes.size(); i++)
  92. {
  93. //std::cout << "category: " << results[num].det_result->boxes[i].category << std::endl;
  94. label = label + results[num].det_result->boxes[i].category+ " ";
  95. // labelindex
  96. output[num * 6 + 0] = results[num].det_result->boxes[i].category_id;
  97. // score
  98. output[num * 6 + 1] = results[num].det_result->boxes[i].score;
  99. //// box
  100. output[num * 6 + 2] = results[num].det_result->boxes[i].coordinate[0];
  101. output[num * 6 + 3] = results[num].det_result->boxes[i].coordinate[1];
  102. output[num * 6 + 4] = results[num].det_result->boxes[i].coordinate[2];
  103. output[num * 6 + 5] = results[num].det_result->boxes[i].coordinate[3];
  104. }
  105. }
  106. memcpy(LabelList, label.c_str(), strlen(label.c_str()));
  107. }
  108. extern "C" __declspec(dllexport) void DestructModel()
  109. {
  110. delete model;
  111. std::cout << "destruct model success" << std::endl;
  112. }
  113. //DEFINE_string(model_filename, "", "Path of det inference model");
  114. //DEFINE_string(params_filename, "", "Path of det inference params");
  115. //DEFINE_string(cfg_file, "", "Path of yaml file");
  116. //DEFINE_string(model_type, "", "model type");
  117. //DEFINE_string(image, "", "Path of test image file");
  118. //DEFINE_bool(use_gpu, false, "Infering with GPU or CPU");
  119. //DEFINE_int32(gpu_id, 0, "GPU card id");
  120. //
  121. //int main(int argc, char** argv) {
  122. // // Parsing command-line
  123. // google::ParseCommandLineFlags(&argc, &argv, true);
  124. //
  125. // // create model
  126. // PaddleDeploy::Model* model = PaddleDeploy::CreateModel(FLAGS_model_type);
  127. //
  128. // // model init
  129. // model->Init(FLAGS_cfg_file);
  130. //
  131. // // inference engine init
  132. // PaddleDeploy::PaddleEngineConfig engine_config;
  133. // engine_config.model_filename = FLAGS_model_filename;
  134. // engine_config.params_filename = FLAGS_params_filename;
  135. // engine_config.use_gpu = FLAGS_use_gpu;
  136. // engine_config.gpu_id = FLAGS_gpu_id;
  137. // model->PaddleEngineInit(engine_config);
  138. //
  139. // // prepare data
  140. // std::vector<cv::Mat> imgs;
  141. // imgs.push_back(std::move(cv::imread(FLAGS_image)));
  142. //
  143. // // predict
  144. // std::vector<PaddleDeploy::Result> results;
  145. // model->Predict(imgs, &results, 1);
  146. //
  147. // std::cout << results[0] << std::endl;
  148. // delete model;
  149. // return 0;
  150. //}