lapjv.cpp 8.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409
  1. // Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. // The code is based on:
  15. // https://github.com/gatagat/lap/blob/master/lap/lapjv.cpp
  16. // Ths copyright of gatagat/lap is as follows:
  17. // MIT License
  18. #include <stdio.h>
  19. #include <stdlib.h>
  20. #include <string.h>
  21. #include "include/lapjv.h"
  22. namespace PaddleDetection {
  23. /** Column-reduction and reduction transfer for a dense cost matrix.
  24. */
  25. int _ccrrt_dense(
  26. const int n, float *cost[], int *free_rows, int *x, int *y, float *v) {
  27. int n_free_rows;
  28. bool *unique;
  29. for (int i = 0; i < n; i++) {
  30. x[i] = -1;
  31. v[i] = LARGE;
  32. y[i] = 0;
  33. }
  34. for (int i = 0; i < n; i++) {
  35. for (int j = 0; j < n; j++) {
  36. const float c = cost[i][j];
  37. if (c < v[j]) {
  38. v[j] = c;
  39. y[j] = i;
  40. }
  41. }
  42. }
  43. NEW(unique, bool, n);
  44. memset(unique, TRUE, n);
  45. {
  46. int j = n;
  47. do {
  48. j--;
  49. const int i = y[j];
  50. if (x[i] < 0) {
  51. x[i] = j;
  52. } else {
  53. unique[i] = FALSE;
  54. y[j] = -1;
  55. }
  56. } while (j > 0);
  57. }
  58. n_free_rows = 0;
  59. for (int i = 0; i < n; i++) {
  60. if (x[i] < 0) {
  61. free_rows[n_free_rows++] = i;
  62. } else if (unique[i]) {
  63. const int j = x[i];
  64. float min = LARGE;
  65. for (int j2 = 0; j2 < n; j2++) {
  66. if (j2 == static_cast<int>(j)) {
  67. continue;
  68. }
  69. const float c = cost[i][j2] - v[j2];
  70. if (c < min) {
  71. min = c;
  72. }
  73. }
  74. v[j] -= min;
  75. }
  76. }
  77. FREE(unique);
  78. return n_free_rows;
  79. }
  80. /** Augmenting row reduction for a dense cost matrix.
  81. */
  82. int _carr_dense(const int n,
  83. float *cost[],
  84. const int n_free_rows,
  85. int *free_rows,
  86. int *x,
  87. int *y,
  88. float *v) {
  89. int current = 0;
  90. int new_free_rows = 0;
  91. int rr_cnt = 0;
  92. while (current < n_free_rows) {
  93. int i0;
  94. int j1, j2;
  95. float v1, v2, v1_new;
  96. bool v1_lowers;
  97. rr_cnt++;
  98. const int free_i = free_rows[current++];
  99. j1 = 0;
  100. v1 = cost[free_i][0] - v[0];
  101. j2 = -1;
  102. v2 = LARGE;
  103. for (int j = 1; j < n; j++) {
  104. const float c = cost[free_i][j] - v[j];
  105. if (c < v2) {
  106. if (c >= v1) {
  107. v2 = c;
  108. j2 = j;
  109. } else {
  110. v2 = v1;
  111. v1 = c;
  112. j2 = j1;
  113. j1 = j;
  114. }
  115. }
  116. }
  117. i0 = y[j1];
  118. v1_new = v[j1] - (v2 - v1);
  119. v1_lowers = v1_new < v[j1];
  120. if (rr_cnt < current * n) {
  121. if (v1_lowers) {
  122. v[j1] = v1_new;
  123. } else if (i0 >= 0 && j2 >= 0) {
  124. j1 = j2;
  125. i0 = y[j2];
  126. }
  127. if (i0 >= 0) {
  128. if (v1_lowers) {
  129. free_rows[--current] = i0;
  130. } else {
  131. free_rows[new_free_rows++] = i0;
  132. }
  133. }
  134. } else {
  135. if (i0 >= 0) {
  136. free_rows[new_free_rows++] = i0;
  137. }
  138. }
  139. x[free_i] = j1;
  140. y[j1] = free_i;
  141. }
  142. return new_free_rows;
  143. }
  144. /** Find columns with minimum d[j] and put them on the SCAN list.
  145. */
  146. int _find_dense(const int n, int lo, float *d, int *cols, int *y) {
  147. int hi = lo + 1;
  148. float mind = d[cols[lo]];
  149. for (int k = hi; k < n; k++) {
  150. int j = cols[k];
  151. if (d[j] <= mind) {
  152. if (d[j] < mind) {
  153. hi = lo;
  154. mind = d[j];
  155. }
  156. cols[k] = cols[hi];
  157. cols[hi++] = j;
  158. }
  159. }
  160. return hi;
  161. }
  162. // Scan all columns in TODO starting from arbitrary column in SCAN
  163. // and try to decrease d of the TODO columns using the SCAN column.
  164. int _scan_dense(const int n,
  165. float *cost[],
  166. int *plo,
  167. int *phi,
  168. float *d,
  169. int *cols,
  170. int *pred,
  171. int *y,
  172. float *v) {
  173. int lo = *plo;
  174. int hi = *phi;
  175. float h, cred_ij;
  176. while (lo != hi) {
  177. int j = cols[lo++];
  178. const int i = y[j];
  179. const float mind = d[j];
  180. h = cost[i][j] - v[j] - mind;
  181. // For all columns in TODO
  182. for (int k = hi; k < n; k++) {
  183. j = cols[k];
  184. cred_ij = cost[i][j] - v[j] - h;
  185. if (cred_ij < d[j]) {
  186. d[j] = cred_ij;
  187. pred[j] = i;
  188. if (cred_ij == mind) {
  189. if (y[j] < 0) {
  190. return j;
  191. }
  192. cols[k] = cols[hi];
  193. cols[hi++] = j;
  194. }
  195. }
  196. }
  197. }
  198. *plo = lo;
  199. *phi = hi;
  200. return -1;
  201. }
  202. /** Single iteration of modified Dijkstra shortest path algorithm as explained
  203. * in the JV paper.
  204. *
  205. * This is a dense matrix version.
  206. *
  207. * \return The closest free column index.
  208. */
  209. int find_path_dense(const int n,
  210. float *cost[],
  211. const int start_i,
  212. int *y,
  213. float *v,
  214. int *pred) {
  215. int lo = 0, hi = 0;
  216. int final_j = -1;
  217. int n_ready = 0;
  218. int *cols;
  219. float *d;
  220. NEW(cols, int, n);
  221. NEW(d, float, n);
  222. for (int i = 0; i < n; i++) {
  223. cols[i] = i;
  224. pred[i] = start_i;
  225. d[i] = cost[start_i][i] - v[i];
  226. }
  227. while (final_j == -1) {
  228. // No columns left on the SCAN list.
  229. if (lo == hi) {
  230. n_ready = lo;
  231. hi = _find_dense(n, lo, d, cols, y);
  232. for (int k = lo; k < hi; k++) {
  233. const int j = cols[k];
  234. if (y[j] < 0) {
  235. final_j = j;
  236. }
  237. }
  238. }
  239. if (final_j == -1) {
  240. final_j = _scan_dense(n, cost, &lo, &hi, d, cols, pred, y, v);
  241. }
  242. }
  243. {
  244. const float mind = d[cols[lo]];
  245. for (int k = 0; k < n_ready; k++) {
  246. const int j = cols[k];
  247. v[j] += d[j] - mind;
  248. }
  249. }
  250. FREE(cols);
  251. FREE(d);
  252. return final_j;
  253. }
  254. /** Augment for a dense cost matrix.
  255. */
  256. int _ca_dense(const int n,
  257. float *cost[],
  258. const int n_free_rows,
  259. int *free_rows,
  260. int *x,
  261. int *y,
  262. float *v) {
  263. int *pred;
  264. NEW(pred, int, n);
  265. for (int *pfree_i = free_rows; pfree_i < free_rows + n_free_rows; pfree_i++) {
  266. int i = -1, j;
  267. int k = 0;
  268. j = find_path_dense(n, cost, *pfree_i, y, v, pred);
  269. while (i != *pfree_i) {
  270. i = pred[j];
  271. y[j] = i;
  272. SWAP_INDICES(j, x[i]);
  273. k++;
  274. }
  275. }
  276. FREE(pred);
  277. return 0;
  278. }
  279. /** Solve dense sparse LAP.
  280. */
  281. int lapjv_internal(const cv::Mat &cost,
  282. const bool extend_cost,
  283. const float cost_limit,
  284. int *x,
  285. int *y) {
  286. int n_rows = cost.rows;
  287. int n_cols = cost.cols;
  288. int n;
  289. if (n_rows == n_cols) {
  290. n = n_rows;
  291. } else if (!extend_cost) {
  292. throw std::invalid_argument(
  293. "Square cost array expected. If cost is intentionally non-square, pass "
  294. "extend_cost=True.");
  295. }
  296. // Get extend cost
  297. if (extend_cost || cost_limit < LARGE) {
  298. n = n_rows + n_cols;
  299. }
  300. cv::Mat cost_expand(n, n, CV_32F);
  301. float expand_value;
  302. if (cost_limit < LARGE) {
  303. expand_value = cost_limit / 2;
  304. } else {
  305. double max_v;
  306. minMaxLoc(cost, nullptr, &max_v);
  307. expand_value = static_cast<float>(max_v) + 1.;
  308. }
  309. for (int i = 0; i < n; ++i) {
  310. for (int j = 0; j < n; ++j) {
  311. cost_expand.at<float>(i, j) = expand_value;
  312. if (i >= n_rows && j >= n_cols) {
  313. cost_expand.at<float>(i, j) = 0;
  314. } else if (i < n_rows && j < n_cols) {
  315. cost_expand.at<float>(i, j) = cost.at<float>(i, j);
  316. }
  317. }
  318. }
  319. // Convert Mat to pointer array
  320. float **cost_ptr;
  321. NEW(cost_ptr, float *, n);
  322. for (int i = 0; i < n; ++i) {
  323. NEW(cost_ptr[i], float, n);
  324. }
  325. for (int i = 0; i < n; ++i) {
  326. for (int j = 0; j < n; ++j) {
  327. cost_ptr[i][j] = cost_expand.at<float>(i, j);
  328. }
  329. }
  330. int ret;
  331. int *free_rows;
  332. float *v;
  333. int *x_c;
  334. int *y_c;
  335. NEW(free_rows, int, n);
  336. NEW(v, float, n);
  337. NEW(x_c, int, n);
  338. NEW(y_c, int, n);
  339. ret = _ccrrt_dense(n, cost_ptr, free_rows, x_c, y_c, v);
  340. int i = 0;
  341. while (ret > 0 && i < 2) {
  342. ret = _carr_dense(n, cost_ptr, ret, free_rows, x_c, y_c, v);
  343. i++;
  344. }
  345. if (ret > 0) {
  346. ret = _ca_dense(n, cost_ptr, ret, free_rows, x_c, y_c, v);
  347. }
  348. FREE(v);
  349. FREE(free_rows);
  350. for (int i = 0; i < n; ++i) {
  351. FREE(cost_ptr[i]);
  352. }
  353. FREE(cost_ptr);
  354. if (ret != 0) {
  355. if (ret == -1) {
  356. throw "Out of memory.";
  357. }
  358. throw "Unknown error (lapjv_internal)";
  359. }
  360. // Get output of x, y, opt
  361. for (int i = 0; i < n; ++i) {
  362. if (i < n_rows) {
  363. x[i] = x_c[i];
  364. if (x[i] >= n_cols) {
  365. x[i] = -1;
  366. }
  367. }
  368. if (i < n_cols) {
  369. y[i] = y_c[i];
  370. if (y[i] >= n_rows) {
  371. y[i] = -1;
  372. }
  373. }
  374. }
  375. FREE(x_c);
  376. FREE(y_c);
  377. return ret;
  378. }
  379. } // namespace PaddleDetection