tracker_reid_motion.py 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397
  1. import numpy as np
  2. from collections import deque
  3. import os
  4. import os.path as osp
  5. import copy
  6. import torch
  7. import torch.nn.functional as F
  8. from mot_online.kalman_filter import KalmanFilter
  9. from mot_online.basetrack import BaseTrack, TrackState
  10. from mot_online import matching
  11. class STrack(BaseTrack):
  12. shared_kalman = KalmanFilter()
  13. def __init__(self, tlwh, score, temp_feat, buffer_size=30):
  14. # wait activate
  15. self._tlwh = np.asarray(tlwh, dtype=np.float)
  16. self.kalman_filter = None
  17. self.mean, self.covariance = None, None
  18. self.is_activated = False
  19. self.score = score
  20. self.tracklet_len = 0
  21. self.smooth_feat = None
  22. self.update_features(temp_feat)
  23. self.features = deque([], maxlen=buffer_size)
  24. self.alpha = 0.9
  25. def update_features(self, feat):
  26. feat /= np.linalg.norm(feat)
  27. self.curr_feat = feat
  28. if self.smooth_feat is None:
  29. self.smooth_feat = feat
  30. else:
  31. self.smooth_feat = self.alpha * self.smooth_feat + (1 - self.alpha) * feat
  32. self.features.append(feat)
  33. self.smooth_feat /= np.linalg.norm(self.smooth_feat)
  34. def predict(self):
  35. mean_state = self.mean.copy()
  36. if self.state != TrackState.Tracked:
  37. mean_state[7] = 0
  38. self.mean, self.covariance = self.kalman_filter.predict(mean_state, self.covariance)
  39. @staticmethod
  40. def multi_predict(stracks):
  41. if len(stracks) > 0:
  42. multi_mean = np.asarray([st.mean.copy() for st in stracks])
  43. multi_covariance = np.asarray([st.covariance for st in stracks])
  44. for i, st in enumerate(stracks):
  45. if st.state != TrackState.Tracked:
  46. multi_mean[i][7] = 0
  47. multi_mean, multi_covariance = STrack.shared_kalman.multi_predict(multi_mean, multi_covariance)
  48. for i, (mean, cov) in enumerate(zip(multi_mean, multi_covariance)):
  49. stracks[i].mean = mean
  50. stracks[i].covariance = cov
  51. def activate(self, kalman_filter, frame_id):
  52. """Start a new tracklet"""
  53. self.kalman_filter = kalman_filter
  54. self.track_id = self.next_id()
  55. self.mean, self.covariance = self.kalman_filter.initiate(self.tlwh_to_xyah(self._tlwh))
  56. self.tracklet_len = 0
  57. self.state = TrackState.Tracked
  58. if frame_id == 1:
  59. self.is_activated = True
  60. # self.is_activated = True
  61. self.frame_id = frame_id
  62. self.start_frame = frame_id
  63. def re_activate(self, new_track, frame_id, new_id=False):
  64. self.mean, self.covariance = self.kalman_filter.update(
  65. self.mean, self.covariance, self.tlwh_to_xyah(new_track.tlwh)
  66. )
  67. self.update_features(new_track.curr_feat)
  68. self.tracklet_len = 0
  69. self.state = TrackState.Tracked
  70. self.is_activated = True
  71. self.frame_id = frame_id
  72. if new_id:
  73. self.track_id = self.next_id()
  74. def update(self, new_track, frame_id, update_feature=True):
  75. """
  76. Update a matched track
  77. :type new_track: STrack
  78. :type frame_id: int
  79. :type update_feature: bool
  80. :return:
  81. """
  82. self.frame_id = frame_id
  83. self.tracklet_len += 1
  84. new_tlwh = new_track.tlwh
  85. self.mean, self.covariance = self.kalman_filter.update(
  86. self.mean, self.covariance, self.tlwh_to_xyah(new_tlwh))
  87. self.state = TrackState.Tracked
  88. self.is_activated = True
  89. self.score = new_track.score
  90. if update_feature:
  91. self.update_features(new_track.curr_feat)
  92. @property
  93. # @jit(nopython=True)
  94. def tlwh(self):
  95. """Get current position in bounding box format `(top left x, top left y,
  96. width, height)`.
  97. """
  98. if self.mean is None:
  99. return self._tlwh.copy()
  100. ret = self.mean[:4].copy()
  101. ret[2] *= ret[3]
  102. ret[:2] -= ret[2:] / 2
  103. return ret
  104. @property
  105. # @jit(nopython=True)
  106. def tlbr(self):
  107. """Convert bounding box to format `(min x, min y, max x, max y)`, i.e.,
  108. `(top left, bottom right)`.
  109. """
  110. ret = self.tlwh.copy()
  111. ret[2:] += ret[:2]
  112. return ret
  113. @staticmethod
  114. # @jit(nopython=True)
  115. def tlwh_to_xyah(tlwh):
  116. """Convert bounding box to format `(center x, center y, aspect ratio,
  117. height)`, where the aspect ratio is `width / height`.
  118. """
  119. ret = np.asarray(tlwh).copy()
  120. ret[:2] += ret[2:] / 2
  121. ret[2] /= ret[3]
  122. return ret
  123. def to_xyah(self):
  124. return self.tlwh_to_xyah(self.tlwh)
  125. @staticmethod
  126. # @jit(nopython=True)
  127. def tlbr_to_tlwh(tlbr):
  128. ret = np.asarray(tlbr).copy()
  129. ret[2:] -= ret[:2]
  130. return ret
  131. @staticmethod
  132. # @jit(nopython=True)
  133. def tlwh_to_tlbr(tlwh):
  134. ret = np.asarray(tlwh).copy()
  135. ret[2:] += ret[:2]
  136. return ret
  137. def __repr__(self):
  138. return 'OT_{}_({}-{})'.format(self.track_id, self.start_frame, self.end_frame)
  139. class BYTETracker(object):
  140. def __init__(self, frame_rate=30):
  141. self.tracked_stracks = [] # type: list[STrack]
  142. self.lost_stracks = [] # type: list[STrack]
  143. self.removed_stracks = [] # type: list[STrack]
  144. self.frame_id = 0
  145. self.low_thresh = 0.2
  146. self.track_thresh = 0.8
  147. self.det_thresh = self.track_thresh + 0.1
  148. self.buffer_size = int(frame_rate / 30.0 * 30)
  149. self.max_time_lost = self.buffer_size
  150. self.kalman_filter = KalmanFilter()
  151. # def update(self, output_results):
  152. def update(self, det_bboxes, det_labels, frame_id, track_feats):
  153. # self.frame_id += 1
  154. self.frame_id = frame_id + 1
  155. activated_starcks = []
  156. refind_stracks = []
  157. lost_stracks = []
  158. removed_stracks = []
  159. # scores = output_results[:, 4]
  160. # bboxes = output_results[:, :4] # x1y1x2y2
  161. scores = det_bboxes[:, 4].cpu().numpy()
  162. bboxes = det_bboxes[:, :4].cpu().numpy()
  163. track_feature = F.normalize(track_feats).cpu().numpy()
  164. remain_inds = scores > self.track_thresh
  165. dets = bboxes[remain_inds]
  166. scores_keep = scores[remain_inds]
  167. id_feature = track_feature[remain_inds]
  168. inds_low = scores > self.low_thresh
  169. inds_high = scores < self.track_thresh
  170. inds_second = np.logical_and(inds_low, inds_high)
  171. dets_second = bboxes[inds_second]
  172. scores_second = scores[inds_second]
  173. id_feature_second = track_feature[inds_second]
  174. if len(dets) > 0:
  175. '''Detections'''
  176. detections = [STrack(STrack.tlbr_to_tlwh(tlbr), s, f) for
  177. (tlbr, s, f) in zip(dets, scores_keep, id_feature)]
  178. else:
  179. detections = []
  180. ''' Add newly detected tracklets to tracked_stracks'''
  181. unconfirmed = []
  182. tracked_stracks = [] # type: list[STrack]
  183. for track in self.tracked_stracks:
  184. if not track.is_activated:
  185. unconfirmed.append(track)
  186. else:
  187. tracked_stracks.append(track)
  188. ''' Step 2: First association, with Kalman and IOU'''
  189. strack_pool = joint_stracks(tracked_stracks, self.lost_stracks)
  190. # Predict the current location with KF
  191. STrack.multi_predict(strack_pool)
  192. dists = matching.embedding_distance(strack_pool, detections)
  193. dists = matching.fuse_motion(self.kalman_filter, dists, strack_pool, detections)
  194. matches, u_track, u_detection = matching.linear_assignment(dists, thresh=0.6)
  195. # dists = matching.iou_distance(strack_pool, detections)
  196. # matches, u_track, u_detection = matching.linear_assignment(dists, thresh=0.8)
  197. for itracked, idet in matches:
  198. track = strack_pool[itracked]
  199. det = detections[idet]
  200. if track.state == TrackState.Tracked:
  201. track.update(detections[idet], self.frame_id)
  202. activated_starcks.append(track)
  203. else:
  204. track.re_activate(det, self.frame_id, new_id=False)
  205. refind_stracks.append(track)
  206. ''' Step 3: Second association, with IOU'''
  207. detections = [detections[i] for i in u_detection]
  208. r_tracked_stracks = [strack_pool[i] for i in u_track if strack_pool[i].state == TrackState.Tracked]
  209. dists = matching.iou_distance(r_tracked_stracks, detections)
  210. matches, u_track, u_detection = matching.linear_assignment(dists, thresh=0.5)
  211. for itracked, idet in matches:
  212. track = r_tracked_stracks[itracked]
  213. det = detections[idet]
  214. if track.state == TrackState.Tracked:
  215. track.update(det, self.frame_id)
  216. activated_starcks.append(track)
  217. else:
  218. track.re_activate(det, self.frame_id, new_id=False)
  219. refind_stracks.append(track)
  220. ''' Step 3.5: Second association, with IOU'''
  221. # association the untrack to the low score detections
  222. if len(dets_second) > 0:
  223. '''Detections'''
  224. detections_second = [STrack(STrack.tlbr_to_tlwh(tlbr), s, f) for
  225. (tlbr, s, f) in zip(dets_second, scores_second, id_feature_second)]
  226. else:
  227. detections_second = []
  228. second_tracked_stracks = [r_tracked_stracks[i] for i in u_track if r_tracked_stracks[i].state == TrackState.Tracked]
  229. dists = matching.iou_distance(second_tracked_stracks, detections_second)
  230. matches, u_track, u_detection_second = matching.linear_assignment(dists, thresh=0.5)
  231. for itracked, idet in matches:
  232. track = second_tracked_stracks[itracked]
  233. det = detections_second[idet]
  234. if track.state == TrackState.Tracked:
  235. track.update(det, self.frame_id)
  236. activated_starcks.append(track)
  237. else:
  238. track.re_activate(det, self.frame_id, new_id=False)
  239. refind_stracks.append(track)
  240. for it in u_track:
  241. #track = r_tracked_stracks[it]
  242. track = second_tracked_stracks[it]
  243. if not track.state == TrackState.Lost:
  244. track.mark_lost()
  245. lost_stracks.append(track)
  246. '''Deal with unconfirmed tracks, usually tracks with only one beginning frame'''
  247. detections = [detections[i] for i in u_detection]
  248. dists = matching.iou_distance(unconfirmed, detections)
  249. matches, u_unconfirmed, u_detection = matching.linear_assignment(dists, thresh=0.7)
  250. for itracked, idet in matches:
  251. unconfirmed[itracked].update(detections[idet], self.frame_id)
  252. activated_starcks.append(unconfirmed[itracked])
  253. for it in u_unconfirmed:
  254. track = unconfirmed[it]
  255. track.mark_removed()
  256. removed_stracks.append(track)
  257. """ Step 4: Init new stracks"""
  258. for inew in u_detection:
  259. track = detections[inew]
  260. if track.score < self.det_thresh:
  261. continue
  262. track.activate(self.kalman_filter, self.frame_id)
  263. activated_starcks.append(track)
  264. """ Step 5: Update state"""
  265. for track in self.lost_stracks:
  266. if self.frame_id - track.end_frame > self.max_time_lost:
  267. track.mark_removed()
  268. removed_stracks.append(track)
  269. # print('Ramained match {} s'.format(t4-t3))
  270. self.tracked_stracks = [t for t in self.tracked_stracks if t.state == TrackState.Tracked]
  271. self.tracked_stracks = joint_stracks(self.tracked_stracks, activated_starcks)
  272. self.tracked_stracks = joint_stracks(self.tracked_stracks, refind_stracks)
  273. self.lost_stracks = sub_stracks(self.lost_stracks, self.tracked_stracks)
  274. self.lost_stracks.extend(lost_stracks)
  275. self.lost_stracks = sub_stracks(self.lost_stracks, self.removed_stracks)
  276. self.removed_stracks.extend(removed_stracks)
  277. self.tracked_stracks, self.lost_stracks = remove_duplicate_stracks(self.tracked_stracks, self.lost_stracks)
  278. # get scores of lost tracks
  279. output_stracks = [track for track in self.tracked_stracks if track.is_activated]
  280. # return output_stracks
  281. bboxes = []
  282. labels = []
  283. ids = []
  284. for track in output_stracks:
  285. if track.is_activated:
  286. track_bbox = track.tlbr
  287. bboxes.append([track_bbox[0], track_bbox[1], track_bbox[2], track_bbox[3], track.score])
  288. labels.append(0)
  289. ids.append(track.track_id)
  290. return torch.tensor(bboxes), torch.tensor(labels), torch.tensor(ids)
  291. def joint_stracks(tlista, tlistb):
  292. exists = {}
  293. res = []
  294. for t in tlista:
  295. exists[t.track_id] = 1
  296. res.append(t)
  297. for t in tlistb:
  298. tid = t.track_id
  299. if not exists.get(tid, 0):
  300. exists[tid] = 1
  301. res.append(t)
  302. return res
  303. def sub_stracks(tlista, tlistb):
  304. stracks = {}
  305. for t in tlista:
  306. stracks[t.track_id] = t
  307. for t in tlistb:
  308. tid = t.track_id
  309. if stracks.get(tid, 0):
  310. del stracks[tid]
  311. return list(stracks.values())
  312. def remove_duplicate_stracks(stracksa, stracksb):
  313. pdist = matching.iou_distance(stracksa, stracksb)
  314. pairs = np.where(pdist < 0.15)
  315. dupa, dupb = list(), list()
  316. for p, q in zip(*pairs):
  317. timep = stracksa[p].frame_id - stracksa[p].start_frame
  318. timeq = stracksb[q].frame_id - stracksb[q].start_frame
  319. if timep > timeq:
  320. dupb.append(q)
  321. else:
  322. dupa.append(p)
  323. resa = [t for i, t in enumerate(stracksa) if not i in dupa]
  324. resb = [t for i, t in enumerate(stracksb) if not i in dupb]
  325. return resa, resb
  326. def remove_fp_stracks(stracksa, n_frame=10):
  327. remain = []
  328. for t in stracksa:
  329. score_5 = t.score_list[-n_frame:]
  330. score_5 = np.array(score_5, dtype=np.float32)
  331. index = score_5 < 0.45
  332. num = np.sum(index)
  333. if num < n_frame:
  334. remain.append(t)
  335. return remain