tracker.py 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423
  1. # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. from __future__ import absolute_import
  15. from __future__ import division
  16. from __future__ import print_function
  17. import os
  18. import cv2
  19. import glob
  20. import paddle
  21. import numpy as np
  22. from paddlex.ppdet.core.workspace import create
  23. from paddlex.ppdet.utils.checkpoint import load_weight, load_pretrain_weight
  24. from paddlex.ppdet.modeling.mot.utils import Timer, load_det_results
  25. from paddlex.ppdet.modeling.mot import visualization as mot_vis
  26. from paddlex.ppdet.metrics import Metric, MOTMetric
  27. import paddlex.ppdet.utils.stats as stats
  28. from .callbacks import Callback, ComposeCallback
  29. from .export_utils import _dump_infer_config
  30. from paddlex.ppdet.utils.logger import setup_logger
  31. logger = setup_logger(__name__)
  32. __all__ = ['Tracker']
  33. class Tracker(object):
  34. def __init__(self, cfg, mode='eval'):
  35. self.cfg = cfg
  36. assert mode.lower() in ['test', 'eval'], \
  37. "mode should be 'test' or 'eval'"
  38. self.mode = mode.lower()
  39. self.optimizer = None
  40. # build MOT data loader
  41. self.dataset = cfg['{}MOTDataset'.format(self.mode.capitalize())]
  42. # build model
  43. self.model = create(cfg.architecture)
  44. self.status = {}
  45. self.start_epoch = 0
  46. # initial default callbacks
  47. self._init_callbacks()
  48. # initial default metrics
  49. self._init_metrics()
  50. self._reset_metrics()
  51. def _init_callbacks(self):
  52. self._callbacks = []
  53. self._compose_callback = None
  54. def _init_metrics(self):
  55. if self.mode in ['test']:
  56. self._metrics = []
  57. return
  58. if self.cfg.metric == 'MOT':
  59. self._metrics = [MOTMetric(), ]
  60. else:
  61. logger.warn("Metric not support for metric type {}".format(
  62. self.cfg.metric))
  63. self._metrics = []
  64. def _reset_metrics(self):
  65. for metric in self._metrics:
  66. metric.reset()
  67. def register_callbacks(self, callbacks):
  68. callbacks = [h for h in list(callbacks) if h is not None]
  69. for c in callbacks:
  70. assert isinstance(c, Callback), \
  71. "metrics shoule be instances of subclass of Metric"
  72. self._callbacks.extend(callbacks)
  73. self._compose_callback = ComposeCallback(self._callbacks)
  74. def register_metrics(self, metrics):
  75. metrics = [m for m in list(metrics) if m is not None]
  76. for m in metrics:
  77. assert isinstance(m, Metric), \
  78. "metrics shoule be instances of subclass of Metric"
  79. self._metrics.extend(metrics)
  80. def load_weights_jde(self, weights):
  81. load_weight(self.model, weights, self.optimizer)
  82. def load_weights_sde(self, det_weights, reid_weights):
  83. if self.model.detector:
  84. load_weight(self.model.detector, det_weights, self.optimizer)
  85. load_weight(self.model.reid, reid_weights, self.optimizer)
  86. def _eval_seq_jde(self,
  87. dataloader,
  88. save_dir=None,
  89. show_image=False,
  90. frame_rate=30):
  91. if save_dir:
  92. if not os.path.exists(save_dir): os.makedirs(save_dir)
  93. tracker = self.model.tracker
  94. tracker.max_time_lost = int(frame_rate / 30.0 * tracker.track_buffer)
  95. timer = Timer()
  96. results = []
  97. frame_id = 0
  98. self.status['mode'] = 'track'
  99. self.model.eval()
  100. for step_id, data in enumerate(dataloader):
  101. self.status['step_id'] = step_id
  102. if frame_id % 40 == 0:
  103. logger.info('Processing frame {} ({:.2f} fps)'.format(
  104. frame_id, 1. / max(1e-5, timer.average_time)))
  105. # forward
  106. timer.tic()
  107. online_targets = self.model(data)
  108. online_tlwhs, online_ids = [], []
  109. for t in online_targets:
  110. tlwh = t.tlwh
  111. tid = t.track_id
  112. vertical = tlwh[2] / tlwh[3] > 1.6
  113. if tlwh[2] * tlwh[3] > tracker.min_box_area and not vertical:
  114. online_tlwhs.append(tlwh)
  115. online_ids.append(tid)
  116. timer.toc()
  117. # save results
  118. results.append((frame_id + 1, online_tlwhs, online_ids))
  119. self.save_results(data, frame_id, online_ids, online_tlwhs,
  120. timer.average_time, show_image, save_dir)
  121. frame_id += 1
  122. return results, frame_id, timer.average_time, timer.calls
  123. def _eval_seq_sde(self,
  124. dataloader,
  125. save_dir=None,
  126. show_image=False,
  127. frame_rate=30,
  128. det_file=''):
  129. if save_dir:
  130. if not os.path.exists(save_dir): os.makedirs(save_dir)
  131. tracker = self.model.tracker
  132. use_detector = False if not self.model.detector else True
  133. timer = Timer()
  134. results = []
  135. frame_id = 0
  136. self.status['mode'] = 'track'
  137. self.model.eval()
  138. self.model.reid.eval()
  139. if not use_detector:
  140. dets_list = load_det_results(det_file, len(dataloader))
  141. logger.info('Finish loading detection results file {}.'.format(
  142. det_file))
  143. for step_id, data in enumerate(dataloader):
  144. self.status['step_id'] = step_id
  145. if frame_id % 40 == 0:
  146. logger.info('Processing frame {} ({:.2f} fps)'.format(
  147. frame_id, 1. / max(1e-5, timer.average_time)))
  148. timer.tic()
  149. if not use_detector:
  150. timer.tic()
  151. dets = dets_list[frame_id]
  152. bbox_tlwh = paddle.to_tensor(dets['bbox'], dtype='float32')
  153. pred_scores = paddle.to_tensor(dets['score'], dtype='float32')
  154. if bbox_tlwh.shape[0] > 0:
  155. pred_bboxes = paddle.concat(
  156. (bbox_tlwh[:, 0:2],
  157. bbox_tlwh[:, 2:4] + bbox_tlwh[:, 0:2]),
  158. axis=1)
  159. else:
  160. pred_bboxes = []
  161. pred_scores = []
  162. data.update({
  163. 'pred_bboxes': pred_bboxes,
  164. 'pred_scores': pred_scores
  165. })
  166. # forward
  167. timer.tic()
  168. online_targets = self.model(data)
  169. online_tlwhs = []
  170. online_ids = []
  171. for track in online_targets:
  172. if not track.is_confirmed() or track.time_since_update > 1:
  173. continue
  174. tlwh = track.to_tlwh()
  175. track_id = track.track_id
  176. online_tlwhs.append(tlwh)
  177. online_ids.append(track_id)
  178. timer.toc()
  179. # save results
  180. results.append((frame_id + 1, online_tlwhs, online_ids))
  181. self.save_results(data, frame_id, online_ids, online_tlwhs,
  182. timer.average_time, show_image, save_dir)
  183. frame_id += 1
  184. return results, frame_id, timer.average_time, timer.calls
  185. def mot_evaluate(self,
  186. data_root,
  187. seqs,
  188. output_dir,
  189. data_type='mot',
  190. model_type='JDE',
  191. save_images=False,
  192. save_videos=False,
  193. show_image=False,
  194. det_results_dir=''):
  195. if not os.path.exists(output_dir): os.makedirs(output_dir)
  196. result_root = os.path.join(output_dir, 'mot_results')
  197. if not os.path.exists(result_root): os.makedirs(result_root)
  198. assert data_type in ['mot', 'kitti'], \
  199. "data_type should be 'mot' or 'kitti'"
  200. assert model_type in ['JDE', 'DeepSORT', 'FairMOT'], \
  201. "model_type should be 'JDE', 'DeepSORT' or 'FairMOT'"
  202. # run tracking
  203. n_frame = 0
  204. timer_avgs, timer_calls = [], []
  205. for seq in seqs:
  206. save_dir = os.path.join(output_dir, 'mot_outputs',
  207. seq) if save_images or save_videos else None
  208. logger.info('start seq: {}'.format(seq))
  209. infer_dir = os.path.join(data_root, seq, 'img1')
  210. images = self.get_infer_images(infer_dir)
  211. self.dataset.set_images(images)
  212. dataloader = create('EvalMOTReader')(self.dataset, 0)
  213. result_filename = os.path.join(result_root, '{}.txt'.format(seq))
  214. meta_info = open(os.path.join(data_root, seq, 'seqinfo.ini')).read()
  215. frame_rate = int(meta_info[meta_info.find('frameRate') + 10:
  216. meta_info.find('\nseqLength')])
  217. if model_type in ['JDE', 'FairMOT']:
  218. results, nf, ta, tc = self._eval_seq_jde(
  219. dataloader,
  220. save_dir=save_dir,
  221. show_image=show_image,
  222. frame_rate=frame_rate)
  223. elif model_type in ['DeepSORT']:
  224. results, nf, ta, tc = self._eval_seq_sde(
  225. dataloader,
  226. save_dir=save_dir,
  227. show_image=show_image,
  228. frame_rate=frame_rate,
  229. det_file=os.path.join(det_results_dir,
  230. '{}.txt'.format(seq)))
  231. else:
  232. raise ValueError(model_type)
  233. self.write_mot_results(result_filename, results, data_type)
  234. n_frame += nf
  235. timer_avgs.append(ta)
  236. timer_calls.append(tc)
  237. if save_videos:
  238. output_video_path = os.path.join(save_dir, '..',
  239. '{}_vis.mp4'.format(seq))
  240. cmd_str = 'ffmpeg -f image2 -i {}/%05d.jpg {}'.format(
  241. save_dir, output_video_path)
  242. os.system(cmd_str)
  243. logger.info('Save video in {}.'.format(output_video_path))
  244. logger.info('Evaluate seq: {}'.format(seq))
  245. # update metrics
  246. for metric in self._metrics:
  247. metric.update(data_root, seq, data_type, result_root,
  248. result_filename)
  249. timer_avgs = np.asarray(timer_avgs)
  250. timer_calls = np.asarray(timer_calls)
  251. all_time = np.dot(timer_avgs, timer_calls)
  252. avg_time = all_time / np.sum(timer_calls)
  253. logger.info('Time elapsed: {:.2f} seconds, FPS: {:.2f}'.format(
  254. all_time, 1.0 / avg_time))
  255. # accumulate metric to log out
  256. for metric in self._metrics:
  257. metric.accumulate()
  258. metric.log()
  259. # reset metric states for metric may performed multiple times
  260. self._reset_metrics()
  261. def get_infer_images(self, infer_dir):
  262. assert infer_dir is None or os.path.isdir(infer_dir), \
  263. "{} is not a directory".format(infer_dir)
  264. images = set()
  265. assert os.path.isdir(infer_dir), \
  266. "infer_dir {} is not a directory".format(infer_dir)
  267. exts = ['jpg', 'jpeg', 'png', 'bmp']
  268. exts += [ext.upper() for ext in exts]
  269. for ext in exts:
  270. images.update(glob.glob('{}/*.{}'.format(infer_dir, ext)))
  271. images = list(images)
  272. images.sort()
  273. assert len(images) > 0, "no image found in {}".format(infer_dir)
  274. logger.info("Found {} inference images in total.".format(len(images)))
  275. return images
  276. def mot_predict(self,
  277. video_file,
  278. output_dir,
  279. data_type='mot',
  280. model_type='JDE',
  281. save_images=False,
  282. save_videos=True,
  283. show_image=False,
  284. det_results_dir=''):
  285. if not os.path.exists(output_dir): os.makedirs(output_dir)
  286. result_root = os.path.join(output_dir, 'mot_results')
  287. if not os.path.exists(result_root): os.makedirs(result_root)
  288. assert data_type in ['mot', 'kitti'], \
  289. "data_type should be 'mot' or 'kitti'"
  290. assert model_type in ['JDE', 'DeepSORT', 'FairMOT'], \
  291. "model_type should be 'JDE', 'DeepSORT' or 'FairMOT'"
  292. # run tracking
  293. seq = video_file.split('/')[-1].split('.')[0]
  294. save_dir = os.path.join(output_dir, 'mot_outputs',
  295. seq) if save_images or save_videos else None
  296. logger.info('Starting tracking {}'.format(video_file))
  297. self.dataset.set_video(video_file)
  298. dataloader = create('TestMOTReader')(self.dataset, 0)
  299. result_filename = os.path.join(result_root, '{}.txt'.format(seq))
  300. frame_rate = self.dataset.frame_rate
  301. if model_type in ['JDE', 'FairMOT']:
  302. results, nf, ta, tc = self._eval_seq_jde(
  303. dataloader,
  304. save_dir=save_dir,
  305. show_image=show_image,
  306. frame_rate=frame_rate)
  307. elif model_type in ['DeepSORT']:
  308. results, nf, ta, tc = self._eval_seq_sde(
  309. dataloader,
  310. save_dir=save_dir,
  311. show_image=show_image,
  312. frame_rate=frame_rate,
  313. det_file=os.path.join(det_results_dir, '{}.txt'.format(seq)))
  314. else:
  315. raise ValueError(model_type)
  316. if save_videos:
  317. output_video_path = os.path.join(save_dir, '..',
  318. '{}_vis.mp4'.format(seq))
  319. cmd_str = 'ffmpeg -f image2 -i {}/%05d.jpg {}'.format(
  320. save_dir, output_video_path)
  321. os.system(cmd_str)
  322. logger.info('Save video in {}'.format(output_video_path))
  323. def write_mot_results(self, filename, results, data_type='mot'):
  324. if data_type in ['mot', 'mcmot', 'lab']:
  325. save_format = '{frame},{id},{x1},{y1},{w},{h},1,-1,-1,-1\n'
  326. elif data_type == 'kitti':
  327. save_format = '{frame} {id} pedestrian 0 0 -10 {x1} {y1} {x2} {y2} -10 -10 -10 -1000 -1000 -1000 -10\n'
  328. else:
  329. raise ValueError(data_type)
  330. with open(filename, 'w') as f:
  331. for frame_id, tlwhs, track_ids in results:
  332. if data_type == 'kitti':
  333. frame_id -= 1
  334. for tlwh, track_id in zip(tlwhs, track_ids):
  335. if track_id < 0:
  336. continue
  337. x1, y1, w, h = tlwh
  338. x2, y2 = x1 + w, y1 + h
  339. line = save_format.format(
  340. frame=frame_id,
  341. id=track_id,
  342. x1=x1,
  343. y1=y1,
  344. x2=x2,
  345. y2=y2,
  346. w=w,
  347. h=h)
  348. f.write(line)
  349. logger.info('MOT results save in {}'.format(filename))
  350. def save_results(self, data, frame_id, online_ids, online_tlwhs,
  351. average_time, show_image, save_dir):
  352. if show_image or save_dir is not None:
  353. assert 'ori_image' in data
  354. img0 = data['ori_image'].numpy()[0]
  355. online_im = mot_vis.plot_tracking(
  356. img0,
  357. online_tlwhs,
  358. online_ids,
  359. frame_id=frame_id,
  360. fps=1. / average_time)
  361. if show_image:
  362. cv2.imshow('online_im', online_im)
  363. if save_dir is not None:
  364. cv2.imwrite(
  365. os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)),
  366. online_im)