mot_evaluator.py 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676
  1. from collections import defaultdict
  2. from loguru import logger
  3. from tqdm import tqdm
  4. import torch
  5. from yolox.utils import (
  6. gather,
  7. is_main_process,
  8. postprocess,
  9. synchronize,
  10. time_synchronized,
  11. xyxy2xywh
  12. )
  13. from yolox.tracker.byte_tracker import BYTETracker
  14. from yolox.sort_tracker.sort import Sort
  15. from yolox.deepsort_tracker.deepsort import DeepSort
  16. from yolox.motdt_tracker.motdt_tracker import OnlineTracker
  17. import contextlib
  18. import io
  19. import os
  20. import itertools
  21. import json
  22. import tempfile
  23. import time
  24. def write_results(filename, results):
  25. save_format = '{frame},{id},{x1},{y1},{w},{h},{s},-1,-1,-1\n'
  26. with open(filename, 'w') as f:
  27. for frame_id, tlwhs, track_ids, scores in results:
  28. for tlwh, track_id, score in zip(tlwhs, track_ids, scores):
  29. if track_id < 0:
  30. continue
  31. x1, y1, w, h = tlwh
  32. line = save_format.format(frame=frame_id, id=track_id, x1=round(x1, 1), y1=round(y1, 1), w=round(w, 1), h=round(h, 1), s=round(score, 2))
  33. f.write(line)
  34. logger.info('save results to {}'.format(filename))
  35. def write_results_no_score(filename, results):
  36. save_format = '{frame},{id},{x1},{y1},{w},{h},-1,-1,-1,-1\n'
  37. with open(filename, 'w') as f:
  38. for frame_id, tlwhs, track_ids in results:
  39. for tlwh, track_id in zip(tlwhs, track_ids):
  40. if track_id < 0:
  41. continue
  42. x1, y1, w, h = tlwh
  43. line = save_format.format(frame=frame_id, id=track_id, x1=round(x1, 1), y1=round(y1, 1), w=round(w, 1), h=round(h, 1))
  44. f.write(line)
  45. logger.info('save results to {}'.format(filename))
  46. class MOTEvaluator:
  47. """
  48. COCO AP Evaluation class. All the data in the val2017 dataset are processed
  49. and evaluated by COCO API.
  50. """
  51. def __init__(
  52. self, args, dataloader, img_size, confthre, nmsthre, num_classes):
  53. """
  54. Args:
  55. dataloader (Dataloader): evaluate dataloader.
  56. img_size (int): image size after preprocess. images are resized
  57. to squares whose shape is (img_size, img_size).
  58. confthre (float): confidence threshold ranging from 0 to 1, which
  59. is defined in the config_files file.
  60. nmsthre (float): IoU threshold of non-max supression ranging from 0 to 1.
  61. """
  62. self.dataloader = dataloader
  63. self.img_size = img_size
  64. self.confthre = confthre
  65. self.nmsthre = nmsthre
  66. self.num_classes = num_classes
  67. self.args = args
  68. def evaluate(
  69. self,
  70. model,
  71. distributed=False,
  72. half=False,
  73. trt_file=None,
  74. decoder=None,
  75. test_size=None,
  76. result_folder=None
  77. ):
  78. """
  79. COCO average precision (AP) Evaluation. Iterate inference on the test dataset
  80. and the results are evaluated by COCO API.
  81. NOTE: This function will change training mode to False, please save states if needed.
  82. Args:
  83. model : model to evaluate.
  84. Returns:
  85. ap50_95 (float) : COCO AP of IoU=50:95
  86. ap50 (float) : COCO AP of IoU=50
  87. summary (sr): summary info of evaluation.
  88. """
  89. # TODO half to amp_test
  90. tensor_type = torch.cuda.HalfTensor if half else torch.cuda.FloatTensor
  91. model = model.eval()
  92. if half:
  93. model = model.half()
  94. ids = []
  95. data_list = []
  96. results = []
  97. video_names = defaultdict()
  98. progress_bar = tqdm if is_main_process() else iter
  99. inference_time = 0
  100. track_time = 0
  101. n_samples = len(self.dataloader) - 1
  102. if trt_file is not None:
  103. from torch2trt import TRTModule
  104. model_trt = TRTModule()
  105. model_trt.load_state_dict(torch.load(trt_file))
  106. x = torch.ones(1, 3, test_size[0], test_size[1]).cuda()
  107. model(x)
  108. model = model_trt
  109. tracker = BYTETracker(self.args)
  110. ori_thresh = self.args.track_thresh
  111. for cur_iter, (imgs, _, info_imgs, ids) in enumerate(
  112. progress_bar(self.dataloader)
  113. ):
  114. with torch.no_grad():
  115. # init tracker
  116. frame_id = info_imgs[2].item()
  117. video_id = info_imgs[3].item()
  118. img_file_name = info_imgs[4]
  119. video_name = img_file_name[0].split('/')[0]
  120. if video_name == 'MOT17-05-FRCNN' or video_name == 'MOT17-06-FRCNN':
  121. self.args.track_buffer = 14
  122. elif video_name == 'MOT17-13-FRCNN' or video_name == 'MOT17-14-FRCNN':
  123. self.args.track_buffer = 25
  124. else:
  125. self.args.track_buffer = 30
  126. if video_name == 'MOT17-01-FRCNN':
  127. self.args.track_thresh = 0.65
  128. elif video_name == 'MOT17-06-FRCNN':
  129. self.args.track_thresh = 0.65
  130. elif video_name == 'MOT17-12-FRCNN':
  131. self.args.track_thresh = 0.7
  132. elif video_name == 'MOT17-14-FRCNN':
  133. self.args.track_thresh = 0.67
  134. elif video_name in ['MOT20-06', 'MOT20-08']:
  135. self.args.track_thresh = 0.3
  136. else:
  137. self.args.track_thresh = ori_thresh
  138. if video_name not in video_names:
  139. video_names[video_id] = video_name
  140. if frame_id == 1:
  141. tracker = BYTETracker(self.args)
  142. if len(results) != 0:
  143. result_filename = os.path.join(result_folder, '{}.txt'.format(video_names[video_id - 1]))
  144. write_results(result_filename, results)
  145. results = []
  146. imgs = imgs.type(tensor_type)
  147. # skip the the last iters since batchsize might be not enough for batch inference
  148. is_time_record = cur_iter < len(self.dataloader) - 1
  149. if is_time_record:
  150. start = time.time()
  151. outputs = model(imgs)
  152. if decoder is not None:
  153. outputs = decoder(outputs, dtype=outputs.type())
  154. outputs = postprocess(outputs, self.num_classes, self.confthre, self.nmsthre)
  155. if is_time_record:
  156. infer_end = time_synchronized()
  157. inference_time += infer_end - start
  158. output_results = self.convert_to_coco_format(outputs, info_imgs, ids)
  159. data_list.extend(output_results)
  160. # run tracking
  161. if outputs[0] is not None:
  162. online_targets = tracker.update(outputs[0], info_imgs, self.img_size)
  163. online_tlwhs = []
  164. online_ids = []
  165. online_scores = []
  166. for t in online_targets:
  167. tlwh = t.tlwh
  168. tid = t.track_id
  169. vertical = tlwh[2] / tlwh[3] > 1.6
  170. if tlwh[2] * tlwh[3] > self.args.min_box_area and not vertical:
  171. online_tlwhs.append(tlwh)
  172. online_ids.append(tid)
  173. online_scores.append(t.score)
  174. # save results
  175. results.append((frame_id, online_tlwhs, online_ids, online_scores))
  176. if is_time_record:
  177. track_end = time_synchronized()
  178. track_time += track_end - infer_end
  179. if cur_iter == len(self.dataloader) - 1:
  180. result_filename = os.path.join(result_folder, '{}.txt'.format(video_names[video_id]))
  181. write_results(result_filename, results)
  182. statistics = torch.cuda.FloatTensor([inference_time, track_time, n_samples])
  183. if distributed:
  184. data_list = gather(data_list, dst=0)
  185. data_list = list(itertools.chain(*data_list))
  186. torch.distributed.reduce(statistics, dst=0)
  187. eval_results = self.evaluate_prediction(data_list, statistics)
  188. synchronize()
  189. return eval_results
  190. def evaluate_sort(
  191. self,
  192. model,
  193. distributed=False,
  194. half=False,
  195. trt_file=None,
  196. decoder=None,
  197. test_size=None,
  198. result_folder=None
  199. ):
  200. """
  201. COCO average precision (AP) Evaluation. Iterate inference on the test dataset
  202. and the results are evaluated by COCO API.
  203. NOTE: This function will change training mode to False, please save states if needed.
  204. Args:
  205. model : model to evaluate.
  206. Returns:
  207. ap50_95 (float) : COCO AP of IoU=50:95
  208. ap50 (float) : COCO AP of IoU=50
  209. summary (sr): summary info of evaluation.
  210. """
  211. # TODO half to amp_test
  212. tensor_type = torch.cuda.HalfTensor if half else torch.cuda.FloatTensor
  213. model = model.eval()
  214. if half:
  215. model = model.half()
  216. ids = []
  217. data_list = []
  218. results = []
  219. video_names = defaultdict()
  220. progress_bar = tqdm if is_main_process() else iter
  221. inference_time = 0
  222. track_time = 0
  223. n_samples = len(self.dataloader) - 1
  224. if trt_file is not None:
  225. from torch2trt import TRTModule
  226. model_trt = TRTModule()
  227. model_trt.load_state_dict(torch.load(trt_file))
  228. x = torch.ones(1, 3, test_size[0], test_size[1]).cuda()
  229. model(x)
  230. model = model_trt
  231. tracker = Sort(self.args.track_thresh)
  232. for cur_iter, (imgs, _, info_imgs, ids) in enumerate(
  233. progress_bar(self.dataloader)
  234. ):
  235. with torch.no_grad():
  236. # init tracker
  237. frame_id = info_imgs[2].item()
  238. video_id = info_imgs[3].item()
  239. img_file_name = info_imgs[4]
  240. video_name = img_file_name[0].split('/')[0]
  241. if video_name not in video_names:
  242. video_names[video_id] = video_name
  243. if frame_id == 1:
  244. tracker = Sort(self.args.track_thresh)
  245. if len(results) != 0:
  246. result_filename = os.path.join(result_folder, '{}.txt'.format(video_names[video_id - 1]))
  247. write_results_no_score(result_filename, results)
  248. results = []
  249. imgs = imgs.type(tensor_type)
  250. # skip the the last iters since batchsize might be not enough for batch inference
  251. is_time_record = cur_iter < len(self.dataloader) - 1
  252. if is_time_record:
  253. start = time.time()
  254. outputs = model(imgs)
  255. if decoder is not None:
  256. outputs = decoder(outputs, dtype=outputs.type())
  257. outputs = postprocess(outputs, self.num_classes, self.confthre, self.nmsthre)
  258. if is_time_record:
  259. infer_end = time_synchronized()
  260. inference_time += infer_end - start
  261. output_results = self.convert_to_coco_format(outputs, info_imgs, ids)
  262. data_list.extend(output_results)
  263. # run tracking
  264. online_targets = tracker.update(outputs[0], info_imgs, self.img_size)
  265. online_tlwhs = []
  266. online_ids = []
  267. for t in online_targets:
  268. tlwh = [t[0], t[1], t[2] - t[0], t[3] - t[1]]
  269. tid = t[4]
  270. vertical = tlwh[2] / tlwh[3] > 1.6
  271. if tlwh[2] * tlwh[3] > self.args.min_box_area and not vertical:
  272. online_tlwhs.append(tlwh)
  273. online_ids.append(tid)
  274. # save results
  275. results.append((frame_id, online_tlwhs, online_ids))
  276. if is_time_record:
  277. track_end = time_synchronized()
  278. track_time += track_end - infer_end
  279. if cur_iter == len(self.dataloader) - 1:
  280. result_filename = os.path.join(result_folder, '{}.txt'.format(video_names[video_id]))
  281. write_results_no_score(result_filename, results)
  282. statistics = torch.cuda.FloatTensor([inference_time, track_time, n_samples])
  283. if distributed:
  284. data_list = gather(data_list, dst=0)
  285. data_list = list(itertools.chain(*data_list))
  286. torch.distributed.reduce(statistics, dst=0)
  287. eval_results = self.evaluate_prediction(data_list, statistics)
  288. synchronize()
  289. return eval_results
  290. def evaluate_deepsort(
  291. self,
  292. model,
  293. distributed=False,
  294. half=False,
  295. trt_file=None,
  296. decoder=None,
  297. test_size=None,
  298. result_folder=None,
  299. model_folder=None
  300. ):
  301. """
  302. COCO average precision (AP) Evaluation. Iterate inference on the test dataset
  303. and the results are evaluated by COCO API.
  304. NOTE: This function will change training mode to False, please save states if needed.
  305. Args:
  306. model : model to evaluate.
  307. Returns:
  308. ap50_95 (float) : COCO AP of IoU=50:95
  309. ap50 (float) : COCO AP of IoU=50
  310. summary (sr): summary info of evaluation.
  311. """
  312. # TODO half to amp_test
  313. tensor_type = torch.cuda.HalfTensor if half else torch.cuda.FloatTensor
  314. model = model.eval()
  315. if half:
  316. model = model.half()
  317. ids = []
  318. data_list = []
  319. results = []
  320. video_names = defaultdict()
  321. progress_bar = tqdm if is_main_process() else iter
  322. inference_time = 0
  323. track_time = 0
  324. n_samples = len(self.dataloader) - 1
  325. if trt_file is not None:
  326. from torch2trt import TRTModule
  327. model_trt = TRTModule()
  328. model_trt.load_state_dict(torch.load(trt_file))
  329. x = torch.ones(1, 3, test_size[0], test_size[1]).cuda()
  330. model(x)
  331. model = model_trt
  332. tracker = DeepSort(model_folder, min_confidence=self.args.track_thresh)
  333. for cur_iter, (imgs, _, info_imgs, ids) in enumerate(
  334. progress_bar(self.dataloader)
  335. ):
  336. with torch.no_grad():
  337. # init tracker
  338. frame_id = info_imgs[2].item()
  339. video_id = info_imgs[3].item()
  340. img_file_name = info_imgs[4]
  341. video_name = img_file_name[0].split('/')[0]
  342. if video_name not in video_names:
  343. video_names[video_id] = video_name
  344. if frame_id == 1:
  345. tracker = DeepSort(model_folder, min_confidence=self.args.track_thresh)
  346. if len(results) != 0:
  347. result_filename = os.path.join(result_folder, '{}.txt'.format(video_names[video_id - 1]))
  348. write_results_no_score(result_filename, results)
  349. results = []
  350. imgs = imgs.type(tensor_type)
  351. # skip the the last iters since batchsize might be not enough for batch inference
  352. is_time_record = cur_iter < len(self.dataloader) - 1
  353. if is_time_record:
  354. start = time.time()
  355. outputs = model(imgs)
  356. if decoder is not None:
  357. outputs = decoder(outputs, dtype=outputs.type())
  358. outputs = postprocess(outputs, self.num_classes, self.confthre, self.nmsthre)
  359. if is_time_record:
  360. infer_end = time_synchronized()
  361. inference_time += infer_end - start
  362. output_results = self.convert_to_coco_format(outputs, info_imgs, ids)
  363. data_list.extend(output_results)
  364. # run tracking
  365. online_targets = tracker.update(outputs[0], info_imgs, self.img_size, img_file_name[0])
  366. online_tlwhs = []
  367. online_ids = []
  368. for t in online_targets:
  369. tlwh = [t[0], t[1], t[2] - t[0], t[3] - t[1]]
  370. tid = t[4]
  371. vertical = tlwh[2] / tlwh[3] > 1.6
  372. if tlwh[2] * tlwh[3] > self.args.min_box_area and not vertical:
  373. online_tlwhs.append(tlwh)
  374. online_ids.append(tid)
  375. # save results
  376. results.append((frame_id, online_tlwhs, online_ids))
  377. if is_time_record:
  378. track_end = time_synchronized()
  379. track_time += track_end - infer_end
  380. if cur_iter == len(self.dataloader) - 1:
  381. result_filename = os.path.join(result_folder, '{}.txt'.format(video_names[video_id]))
  382. write_results_no_score(result_filename, results)
  383. statistics = torch.cuda.FloatTensor([inference_time, track_time, n_samples])
  384. if distributed:
  385. data_list = gather(data_list, dst=0)
  386. data_list = list(itertools.chain(*data_list))
  387. torch.distributed.reduce(statistics, dst=0)
  388. eval_results = self.evaluate_prediction(data_list, statistics)
  389. synchronize()
  390. return eval_results
  391. def evaluate_motdt(
  392. self,
  393. model,
  394. distributed=False,
  395. half=False,
  396. trt_file=None,
  397. decoder=None,
  398. test_size=None,
  399. result_folder=None,
  400. model_folder=None
  401. ):
  402. """
  403. COCO average precision (AP) Evaluation. Iterate inference on the test dataset
  404. and the results are evaluated by COCO API.
  405. NOTE: This function will change training mode to False, please save states if needed.
  406. Args:
  407. model : model to evaluate.
  408. Returns:
  409. ap50_95 (float) : COCO AP of IoU=50:95
  410. ap50 (float) : COCO AP of IoU=50
  411. summary (sr): summary info of evaluation.
  412. """
  413. # TODO half to amp_test
  414. tensor_type = torch.cuda.HalfTensor if half else torch.cuda.FloatTensor
  415. model = model.eval()
  416. if half:
  417. model = model.half()
  418. ids = []
  419. data_list = []
  420. results = []
  421. video_names = defaultdict()
  422. progress_bar = tqdm if is_main_process() else iter
  423. inference_time = 0
  424. track_time = 0
  425. n_samples = len(self.dataloader) - 1
  426. if trt_file is not None:
  427. from torch2trt import TRTModule
  428. model_trt = TRTModule()
  429. model_trt.load_state_dict(torch.load(trt_file))
  430. x = torch.ones(1, 3, test_size[0], test_size[1]).cuda()
  431. model(x)
  432. model = model_trt
  433. tracker = OnlineTracker(model_folder, min_cls_score=self.args.track_thresh)
  434. for cur_iter, (imgs, _, info_imgs, ids) in enumerate(
  435. progress_bar(self.dataloader)
  436. ):
  437. with torch.no_grad():
  438. # init tracker
  439. frame_id = info_imgs[2].item()
  440. video_id = info_imgs[3].item()
  441. img_file_name = info_imgs[4]
  442. video_name = img_file_name[0].split('/')[0]
  443. if video_name not in video_names:
  444. video_names[video_id] = video_name
  445. if frame_id == 1:
  446. tracker = OnlineTracker(model_folder, min_cls_score=self.args.track_thresh)
  447. if len(results) != 0:
  448. result_filename = os.path.join(result_folder, '{}.txt'.format(video_names[video_id - 1]))
  449. write_results(result_filename, results)
  450. results = []
  451. imgs = imgs.type(tensor_type)
  452. # skip the the last iters since batchsize might be not enough for batch inference
  453. is_time_record = cur_iter < len(self.dataloader) - 1
  454. if is_time_record:
  455. start = time.time()
  456. outputs = model(imgs)
  457. if decoder is not None:
  458. outputs = decoder(outputs, dtype=outputs.type())
  459. outputs = postprocess(outputs, self.num_classes, self.confthre, self.nmsthre)
  460. if is_time_record:
  461. infer_end = time_synchronized()
  462. inference_time += infer_end - start
  463. output_results = self.convert_to_coco_format(outputs, info_imgs, ids)
  464. data_list.extend(output_results)
  465. # run tracking
  466. online_targets = tracker.update(outputs[0], info_imgs, self.img_size, img_file_name[0])
  467. online_tlwhs = []
  468. online_ids = []
  469. online_scores = []
  470. for t in online_targets:
  471. tlwh = t.tlwh
  472. tid = t.track_id
  473. vertical = tlwh[2] / tlwh[3] > 1.6
  474. if tlwh[2] * tlwh[3] > self.args.min_box_area and not vertical:
  475. online_tlwhs.append(tlwh)
  476. online_ids.append(tid)
  477. online_scores.append(t.score)
  478. # save results
  479. results.append((frame_id, online_tlwhs, online_ids, online_scores))
  480. if is_time_record:
  481. track_end = time_synchronized()
  482. track_time += track_end - infer_end
  483. if cur_iter == len(self.dataloader) - 1:
  484. result_filename = os.path.join(result_folder, '{}.txt'.format(video_names[video_id]))
  485. write_results(result_filename, results)
  486. statistics = torch.cuda.FloatTensor([inference_time, track_time, n_samples])
  487. if distributed:
  488. data_list = gather(data_list, dst=0)
  489. data_list = list(itertools.chain(*data_list))
  490. torch.distributed.reduce(statistics, dst=0)
  491. eval_results = self.evaluate_prediction(data_list, statistics)
  492. synchronize()
  493. return eval_results
  494. def convert_to_coco_format(self, outputs, info_imgs, ids):
  495. data_list = []
  496. for (output, img_h, img_w, img_id) in zip(
  497. outputs, info_imgs[0], info_imgs[1], ids
  498. ):
  499. if output is None:
  500. continue
  501. output = output.cpu()
  502. bboxes = output[:, 0:4]
  503. # preprocessing: resize
  504. scale = min(
  505. self.img_size[0] / float(img_h), self.img_size[1] / float(img_w)
  506. )
  507. bboxes /= scale
  508. bboxes = xyxy2xywh(bboxes)
  509. cls = output[:, 6]
  510. scores = output[:, 4] * output[:, 5]
  511. for ind in range(bboxes.shape[0]):
  512. label = self.dataloader.dataset.class_ids[int(cls[ind])]
  513. pred_data = {
  514. "image_id": int(img_id),
  515. "category_id": label,
  516. "bbox": bboxes[ind].numpy().tolist(),
  517. "score": scores[ind].numpy().item(),
  518. "segmentation": [],
  519. } # COCO json format
  520. data_list.append(pred_data)
  521. return data_list
  522. def evaluate_prediction(self, data_dict, statistics):
  523. if not is_main_process():
  524. return 0, 0, None
  525. logger.info("Evaluate in main process...")
  526. annType = ["segm", "bbox", "keypoints"]
  527. inference_time = statistics[0].item()
  528. track_time = statistics[1].item()
  529. n_samples = statistics[2].item()
  530. a_infer_time = 1000 * inference_time / (n_samples * self.dataloader.batch_size)
  531. a_track_time = 1000 * track_time / (n_samples * self.dataloader.batch_size)
  532. time_info = ", ".join(
  533. [
  534. "Average {} time: {:.2f} ms".format(k, v)
  535. for k, v in zip(
  536. ["forward", "track", "inference"],
  537. [a_infer_time, a_track_time, (a_infer_time + a_track_time)],
  538. )
  539. ]
  540. )
  541. info = time_info + "\n"
  542. # Evaluate the Dt (detection) json comparing with the ground truth
  543. if len(data_dict) > 0:
  544. cocoGt = self.dataloader.dataset.coco
  545. # TODO: since pycocotools can't process dict in py36, write data to json file.
  546. _, tmp = tempfile.mkstemp()
  547. json.dump(data_dict, open(tmp, "w"))
  548. cocoDt = cocoGt.loadRes(tmp)
  549. '''
  550. try:
  551. from yolox.layers import COCOeval_opt as COCOeval
  552. except ImportError:
  553. from pycocotools import cocoeval as COCOeval
  554. logger.warning("Use standard COCOeval.")
  555. '''
  556. #from pycocotools.cocoeval import COCOeval
  557. from yolox.layers import COCOeval_opt as COCOeval
  558. cocoEval = COCOeval(cocoGt, cocoDt, annType[1])
  559. cocoEval.evaluate()
  560. cocoEval.accumulate()
  561. redirect_string = io.StringIO()
  562. with contextlib.redirect_stdout(redirect_string):
  563. cocoEval.summarize()
  564. info += redirect_string.getvalue()
  565. return cocoEval.stats[0], cocoEval.stats[1], info
  566. else:
  567. return 0, 0, info