coco_evaluator.py 7.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224
  1. #!/usr/bin/env python3
  2. # -*- coding:utf-8 -*-
  3. # Copyright (c) Megvii, Inc. and its affiliates.
  4. from loguru import logger
  5. from tqdm import tqdm
  6. import torch
  7. from yolox.utils import (
  8. gather,
  9. is_main_process,
  10. postprocess,
  11. synchronize,
  12. time_synchronized,
  13. xyxy2xywh
  14. )
  15. import contextlib
  16. import io
  17. import itertools
  18. import json
  19. import tempfile
  20. import time
  21. class COCOEvaluator:
  22. """
  23. COCO AP Evaluation class. All the data in the val2017 dataset are processed
  24. and evaluated by COCO API.
  25. """
  26. def __init__(
  27. self, dataloader, img_size, confthre, nmsthre, num_classes, testdev=False
  28. ):
  29. """
  30. Args:
  31. dataloader (Dataloader): evaluate dataloader.
  32. img_size (int): image size after preprocess. images are resized
  33. to squares whose shape is (img_size, img_size).
  34. confthre (float): confidence threshold ranging from 0 to 1, which
  35. is defined in the config_files file.
  36. nmsthre (float): IoU threshold of non-max supression ranging from 0 to 1.
  37. """
  38. self.dataloader = dataloader
  39. self.img_size = img_size
  40. self.confthre = confthre
  41. self.nmsthre = nmsthre
  42. self.num_classes = num_classes
  43. self.testdev = testdev
  44. def evaluate(
  45. self,
  46. model,
  47. distributed=False,
  48. half=False,
  49. trt_file=None,
  50. decoder=None,
  51. test_size=None,
  52. ):
  53. """
  54. COCO average precision (AP) Evaluation. Iterate inference on the test dataset
  55. and the results are evaluated by COCO API.
  56. NOTE: This function will change training mode to False, please save states if needed.
  57. Args:
  58. model : model to evaluate.
  59. Returns:
  60. ap50_95 (float) : COCO AP of IoU=50:95
  61. ap50 (float) : COCO AP of IoU=50
  62. summary (sr): summary info of evaluation.
  63. """
  64. # TODO half to amp_test
  65. tensor_type = torch.cuda.HalfTensor if half else torch.cuda.FloatTensor
  66. model = model.eval()
  67. if half:
  68. model = model.half()
  69. ids = []
  70. data_list = []
  71. progress_bar = tqdm if is_main_process() else iter
  72. inference_time = 0
  73. nms_time = 0
  74. n_samples = len(self.dataloader) - 1
  75. if trt_file is not None:
  76. from torch2trt import TRTModule
  77. model_trt = TRTModule()
  78. model_trt.load_state_dict(torch.load(trt_file))
  79. x = torch.ones(1, 3, test_size[0], test_size[1]).cuda()
  80. model(x)
  81. model = model_trt
  82. for cur_iter, (imgs, _, info_imgs, ids) in enumerate(
  83. progress_bar(self.dataloader)
  84. ):
  85. with torch.no_grad():
  86. imgs = imgs.type(tensor_type)
  87. # skip the the last iters since batchsize might be not enough for batch inference
  88. is_time_record = cur_iter < len(self.dataloader) - 1
  89. if is_time_record:
  90. start = time.time()
  91. outputs = model(imgs)
  92. if decoder is not None:
  93. outputs = decoder(outputs, dtype=outputs.type())
  94. if is_time_record:
  95. infer_end = time_synchronized()
  96. inference_time += infer_end - start
  97. outputs = postprocess(
  98. outputs, self.num_classes, self.confthre, self.nmsthre
  99. )
  100. if is_time_record:
  101. nms_end = time_synchronized()
  102. nms_time += nms_end - infer_end
  103. data_list.extend(self.convert_to_coco_format(outputs, info_imgs, ids))
  104. statistics = torch.cuda.FloatTensor([inference_time, nms_time, n_samples])
  105. if distributed:
  106. data_list = gather(data_list, dst=0)
  107. data_list = list(itertools.chain(*data_list))
  108. torch.distributed.reduce(statistics, dst=0)
  109. eval_results = self.evaluate_prediction(data_list, statistics)
  110. synchronize()
  111. return eval_results
  112. def convert_to_coco_format(self, outputs, info_imgs, ids):
  113. data_list = []
  114. for (output, img_h, img_w, img_id) in zip(
  115. outputs, info_imgs[0], info_imgs[1], ids
  116. ):
  117. if output is None:
  118. continue
  119. output = output.cpu()
  120. bboxes = output[:, 0:4]
  121. # preprocessing: resize
  122. scale = min(
  123. self.img_size[0] / float(img_h), self.img_size[1] / float(img_w)
  124. )
  125. bboxes /= scale
  126. bboxes = xyxy2xywh(bboxes)
  127. cls = output[:, 6]
  128. scores = output[:, 4] * output[:, 5]
  129. for ind in range(bboxes.shape[0]):
  130. label = self.dataloader.dataset.class_ids[int(cls[ind])]
  131. pred_data = {
  132. "image_id": int(img_id),
  133. "category_id": label,
  134. "bbox": bboxes[ind].numpy().tolist(),
  135. "score": scores[ind].numpy().item(),
  136. "segmentation": [],
  137. } # COCO json format
  138. data_list.append(pred_data)
  139. return data_list
  140. def evaluate_prediction(self, data_dict, statistics):
  141. if not is_main_process():
  142. return 0, 0, None
  143. logger.info("Evaluate in main process...")
  144. annType = ["segm", "bbox", "keypoints"]
  145. inference_time = statistics[0].item()
  146. nms_time = statistics[1].item()
  147. n_samples = statistics[2].item()
  148. a_infer_time = 1000 * inference_time / (n_samples * self.dataloader.batch_size)
  149. a_nms_time = 1000 * nms_time / (n_samples * self.dataloader.batch_size)
  150. time_info = ", ".join(
  151. [
  152. "Average {} time: {:.2f} ms".format(k, v)
  153. for k, v in zip(
  154. ["forward", "NMS", "inference"],
  155. [a_infer_time, a_nms_time, (a_infer_time + a_nms_time)],
  156. )
  157. ]
  158. )
  159. info = time_info + "\n"
  160. # Evaluate the Dt (detection) json comparing with the ground truth
  161. if len(data_dict) > 0:
  162. cocoGt = self.dataloader.dataset.coco
  163. # TODO: since pycocotools can't process dict in py36, write data to json file.
  164. if self.testdev:
  165. json.dump(data_dict, open("./yolox_testdev_2017.json", "w"))
  166. cocoDt = cocoGt.loadRes("./yolox_testdev_2017.json")
  167. else:
  168. _, tmp = tempfile.mkstemp()
  169. json.dump(data_dict, open(tmp, "w"))
  170. cocoDt = cocoGt.loadRes(tmp)
  171. '''
  172. try:
  173. from yolox.layers import COCOeval_opt as COCOeval
  174. except ImportError:
  175. from pycocotools import cocoeval as COCOeval
  176. logger.warning("Use standard COCOeval.")
  177. '''
  178. #from pycocotools.cocoeval import COCOeval
  179. from yolox.layers import COCOeval_opt as COCOeval
  180. cocoEval = COCOeval(cocoGt, cocoDt, annType[1])
  181. cocoEval.evaluate()
  182. cocoEval.accumulate()
  183. redirect_string = io.StringIO()
  184. with contextlib.redirect_stdout(redirect_string):
  185. cocoEval.summarize()
  186. info += redirect_string.getvalue()
  187. return cocoEval.stats[0], cocoEval.stats[1], info
  188. else:
  189. return 0, 0, info