lapjv.cc 8.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389
  1. // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. // The code is based on:
  15. // https://github.com/gatagat/lap/blob/master/lap/lapjv.cpp
  16. // The copyright of gatagat/lap is as follows:
  17. // MIT License
  18. #include <stdio.h>
  19. #include <stdlib.h>
  20. #include <string.h>
  21. #include "ultra_infer/vision/tracking/pptracking/lapjv.h"
  22. namespace ultra_infer {
  23. namespace vision {
  24. namespace tracking {
  25. /** Column-reduction and reduction transfer for a dense cost matrix.
  26. */
  27. int _ccrrt_dense(const int n, float *cost[], int *free_rows, int *x, int *y,
  28. float *v) {
  29. int n_free_rows;
  30. bool *unique;
  31. for (int i = 0; i < n; i++) {
  32. x[i] = -1;
  33. v[i] = LARGE;
  34. y[i] = 0;
  35. }
  36. for (int i = 0; i < n; i++) {
  37. for (int j = 0; j < n; j++) {
  38. const float c = cost[i][j];
  39. if (c < v[j]) {
  40. v[j] = c;
  41. y[j] = i;
  42. }
  43. }
  44. }
  45. NEW(unique, bool, n);
  46. memset(unique, TRUE, n);
  47. {
  48. int j = n;
  49. do {
  50. j--;
  51. const int i = y[j];
  52. if (x[i] < 0) {
  53. x[i] = j;
  54. } else {
  55. unique[i] = FALSE;
  56. y[j] = -1;
  57. }
  58. } while (j > 0);
  59. }
  60. n_free_rows = 0;
  61. for (int i = 0; i < n; i++) {
  62. if (x[i] < 0) {
  63. free_rows[n_free_rows++] = i;
  64. } else if (unique[i]) {
  65. const int j = x[i];
  66. float min = LARGE;
  67. for (int j2 = 0; j2 < n; j2++) {
  68. if (j2 == static_cast<int>(j)) {
  69. continue;
  70. }
  71. const float c = cost[i][j2] - v[j2];
  72. if (c < min) {
  73. min = c;
  74. }
  75. }
  76. v[j] -= min;
  77. }
  78. }
  79. FREE(unique);
  80. return n_free_rows;
  81. }
  82. /** Augmenting row reduction for a dense cost matrix.
  83. */
  84. int _carr_dense(const int n, float *cost[], const int n_free_rows,
  85. int *free_rows, int *x, int *y, float *v) {
  86. int current = 0;
  87. int new_free_rows = 0;
  88. int rr_cnt = 0;
  89. while (current < n_free_rows) {
  90. int i0;
  91. int j1, j2;
  92. float v1, v2, v1_new;
  93. bool v1_lowers;
  94. rr_cnt++;
  95. const int free_i = free_rows[current++];
  96. j1 = 0;
  97. v1 = cost[free_i][0] - v[0];
  98. j2 = -1;
  99. v2 = LARGE;
  100. for (int j = 1; j < n; j++) {
  101. const float c = cost[free_i][j] - v[j];
  102. if (c < v2) {
  103. if (c >= v1) {
  104. v2 = c;
  105. j2 = j;
  106. } else {
  107. v2 = v1;
  108. v1 = c;
  109. j2 = j1;
  110. j1 = j;
  111. }
  112. }
  113. }
  114. i0 = y[j1];
  115. v1_new = v[j1] - (v2 - v1);
  116. v1_lowers = v1_new < v[j1];
  117. if (rr_cnt < current * n) {
  118. if (v1_lowers) {
  119. v[j1] = v1_new;
  120. } else if (i0 >= 0 && j2 >= 0) {
  121. j1 = j2;
  122. i0 = y[j2];
  123. }
  124. if (i0 >= 0) {
  125. if (v1_lowers) {
  126. free_rows[--current] = i0;
  127. } else {
  128. free_rows[new_free_rows++] = i0;
  129. }
  130. }
  131. } else {
  132. if (i0 >= 0) {
  133. free_rows[new_free_rows++] = i0;
  134. }
  135. }
  136. x[free_i] = j1;
  137. y[j1] = free_i;
  138. }
  139. return new_free_rows;
  140. }
  141. /** Find columns with minimum d[j] and put them on the SCAN list.
  142. */
  143. int _find_dense(const int n, int lo, float *d, int *cols, int *y) {
  144. int hi = lo + 1;
  145. float mind = d[cols[lo]];
  146. for (int k = hi; k < n; k++) {
  147. int j = cols[k];
  148. if (d[j] <= mind) {
  149. if (d[j] < mind) {
  150. hi = lo;
  151. mind = d[j];
  152. }
  153. cols[k] = cols[hi];
  154. cols[hi++] = j;
  155. }
  156. }
  157. return hi;
  158. }
  159. // Scan all columns in TODO starting from arbitrary column in SCAN
  160. // and try to decrease d of the TODO columns using the SCAN column.
  161. int _scan_dense(const int n, float *cost[], int *plo, int *phi, float *d,
  162. int *cols, int *pred, int *y, float *v) {
  163. int lo = *plo;
  164. int hi = *phi;
  165. float h, cred_ij;
  166. while (lo != hi) {
  167. int j = cols[lo++];
  168. const int i = y[j];
  169. const float mind = d[j];
  170. h = cost[i][j] - v[j] - mind;
  171. // For all columns in TODO
  172. for (int k = hi; k < n; k++) {
  173. j = cols[k];
  174. cred_ij = cost[i][j] - v[j] - h;
  175. if (cred_ij < d[j]) {
  176. d[j] = cred_ij;
  177. pred[j] = i;
  178. if (cred_ij == mind) {
  179. if (y[j] < 0) {
  180. return j;
  181. }
  182. cols[k] = cols[hi];
  183. cols[hi++] = j;
  184. }
  185. }
  186. }
  187. }
  188. *plo = lo;
  189. *phi = hi;
  190. return -1;
  191. }
  192. /** Single iteration of modified Dijkstra shortest path algorithm as explained
  193. * in the JV paper.
  194. *
  195. * This is a dense matrix version.
  196. *
  197. * \return The closest free column index.
  198. */
  199. int find_path_dense(const int n, float *cost[], const int start_i, int *y,
  200. float *v, int *pred) {
  201. int lo = 0, hi = 0;
  202. int final_j = -1;
  203. int n_ready = 0;
  204. int *cols;
  205. float *d;
  206. NEW(cols, int, n);
  207. NEW(d, float, n);
  208. for (int i = 0; i < n; i++) {
  209. cols[i] = i;
  210. pred[i] = start_i;
  211. d[i] = cost[start_i][i] - v[i];
  212. }
  213. while (final_j == -1) {
  214. // No columns left on the SCAN list.
  215. if (lo == hi) {
  216. n_ready = lo;
  217. hi = _find_dense(n, lo, d, cols, y);
  218. for (int k = lo; k < hi; k++) {
  219. const int j = cols[k];
  220. if (y[j] < 0) {
  221. final_j = j;
  222. }
  223. }
  224. }
  225. if (final_j == -1) {
  226. final_j = _scan_dense(n, cost, &lo, &hi, d, cols, pred, y, v);
  227. }
  228. }
  229. {
  230. const float mind = d[cols[lo]];
  231. for (int k = 0; k < n_ready; k++) {
  232. const int j = cols[k];
  233. v[j] += d[j] - mind;
  234. }
  235. }
  236. FREE(cols);
  237. FREE(d);
  238. return final_j;
  239. }
  240. /** Augment for a dense cost matrix.
  241. */
  242. int _ca_dense(const int n, float *cost[], const int n_free_rows, int *free_rows,
  243. int *x, int *y, float *v) {
  244. int *pred;
  245. NEW(pred, int, n);
  246. for (int *pfree_i = free_rows; pfree_i < free_rows + n_free_rows; pfree_i++) {
  247. int i = -1, j;
  248. int k = 0;
  249. j = find_path_dense(n, cost, *pfree_i, y, v, pred);
  250. while (i != *pfree_i) {
  251. i = pred[j];
  252. y[j] = i;
  253. SWAP_INDICES(j, x[i]);
  254. k++;
  255. }
  256. }
  257. FREE(pred);
  258. return 0;
  259. }
  260. /** Solve dense sparse LAP.
  261. */
  262. int lapjv_internal(const cv::Mat &cost, const bool extend_cost,
  263. const float cost_limit, int *x, int *y) {
  264. int n_rows = cost.rows;
  265. int n_cols = cost.cols;
  266. int n;
  267. if (n_rows == n_cols) {
  268. n = n_rows;
  269. } else if (!extend_cost) {
  270. throw std::invalid_argument(
  271. "Square cost array expected. If cost is intentionally non-square, pass "
  272. "extend_cost=True.");
  273. }
  274. // Get extend cost
  275. if (extend_cost || cost_limit < LARGE) {
  276. n = n_rows + n_cols;
  277. }
  278. cv::Mat cost_expand(n, n, CV_32F);
  279. float expand_value;
  280. if (cost_limit < LARGE) {
  281. expand_value = cost_limit / 2;
  282. } else {
  283. double max_v;
  284. minMaxLoc(cost, nullptr, &max_v);
  285. expand_value = static_cast<float>(max_v) + 1.;
  286. }
  287. for (int i = 0; i < n; ++i) {
  288. for (int j = 0; j < n; ++j) {
  289. cost_expand.at<float>(i, j) = expand_value;
  290. if (i >= n_rows && j >= n_cols) {
  291. cost_expand.at<float>(i, j) = 0;
  292. } else if (i < n_rows && j < n_cols) {
  293. cost_expand.at<float>(i, j) = cost.at<float>(i, j);
  294. }
  295. }
  296. }
  297. // Convert Mat to pointer array
  298. float **cost_ptr;
  299. NEW(cost_ptr, float *, n);
  300. for (int i = 0; i < n; ++i) {
  301. NEW(cost_ptr[i], float, n);
  302. }
  303. for (int i = 0; i < n; ++i) {
  304. for (int j = 0; j < n; ++j) {
  305. cost_ptr[i][j] = cost_expand.at<float>(i, j);
  306. }
  307. }
  308. int ret;
  309. int *free_rows;
  310. float *v;
  311. int *x_c;
  312. int *y_c;
  313. NEW(free_rows, int, n);
  314. NEW(v, float, n);
  315. NEW(x_c, int, n);
  316. NEW(y_c, int, n);
  317. ret = _ccrrt_dense(n, cost_ptr, free_rows, x_c, y_c, v);
  318. int i = 0;
  319. while (ret > 0 && i < 2) {
  320. ret = _carr_dense(n, cost_ptr, ret, free_rows, x_c, y_c, v);
  321. i++;
  322. }
  323. if (ret > 0) {
  324. ret = _ca_dense(n, cost_ptr, ret, free_rows, x_c, y_c, v);
  325. }
  326. FREE(v);
  327. FREE(free_rows);
  328. for (int i = 0; i < n; ++i) {
  329. FREE(cost_ptr[i]);
  330. }
  331. FREE(cost_ptr);
  332. if (ret != 0) {
  333. if (ret == -1) {
  334. throw "Out of memory.";
  335. }
  336. throw "Unknown error (lapjv_internal)";
  337. }
  338. // Get output of x, y, opt
  339. for (int i = 0; i < n; ++i) {
  340. if (i < n_rows) {
  341. x[i] = x_c[i];
  342. if (x[i] >= n_cols) {
  343. x[i] = -1;
  344. }
  345. }
  346. if (i < n_cols) {
  347. y[i] = y_c[i];
  348. if (y[i] >= n_rows) {
  349. y[i] = -1;
  350. }
  351. }
  352. }
  353. FREE(x_c);
  354. FREE(y_c);
  355. return ret;
  356. }
  357. } // namespace tracking
  358. } // namespace vision
  359. } // namespace ultra_infer