fast_coco_eval_api.py 5.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150
  1. #!/usr/bin/env python3
  2. # -*- coding:utf-8 -*-
  3. # This file comes from
  4. # https://github.com/facebookresearch/detectron2/blob/master/detectron2/evaluation/fast_eval_api.py
  5. # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
  6. # Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
  7. import numpy as np
  8. from pycocotools.cocoeval import COCOeval
  9. # import torch first to make yolox._C work without ImportError of libc10.so
  10. # in YOLOX, env is already set in __init__.py.
  11. from yolox import _C
  12. import copy
  13. import time
  14. class COCOeval_opt(COCOeval):
  15. """
  16. This is a slightly modified version of the original COCO API, where the functions evaluateImg()
  17. and accumulate() are implemented in C++ to speedup evaluation
  18. """
  19. def evaluate(self):
  20. """
  21. Run per image evaluation on given images and store results in self.evalImgs_cpp, a
  22. datastructure that isn't readable from Python but is used by a c++ implementation of
  23. accumulate(). Unlike the original COCO PythonAPI, we don't populate the datastructure
  24. self.evalImgs because this datastructure is a computational bottleneck.
  25. :return: None
  26. """
  27. tic = time.time()
  28. print("Running per image evaluation...")
  29. p = self.params
  30. # add backward compatibility if useSegm is specified in params
  31. if p.useSegm is not None:
  32. p.iouType = "segm" if p.useSegm == 1 else "bbox"
  33. print(
  34. "useSegm (deprecated) is not None. Running {} evaluation".format(
  35. p.iouType
  36. )
  37. )
  38. print("Evaluate annotation type *{}*".format(p.iouType))
  39. p.imgIds = list(np.unique(p.imgIds))
  40. if p.useCats:
  41. p.catIds = list(np.unique(p.catIds))
  42. p.maxDets = sorted(p.maxDets)
  43. self.params = p
  44. self._prepare()
  45. # loop through images, area range, max detection number
  46. catIds = p.catIds if p.useCats else [-1]
  47. if p.iouType == "segm" or p.iouType == "bbox":
  48. computeIoU = self.computeIoU
  49. elif p.iouType == "keypoints":
  50. computeIoU = self.computeOks
  51. self.ious = {
  52. (imgId, catId): computeIoU(imgId, catId)
  53. for imgId in p.imgIds
  54. for catId in catIds
  55. }
  56. maxDet = p.maxDets[-1]
  57. # <<<< Beginning of code differences with original COCO API
  58. def convert_instances_to_cpp(instances, is_det=False):
  59. # Convert annotations for a list of instances in an image to a format that's fast
  60. # to access in C++
  61. instances_cpp = []
  62. for instance in instances:
  63. instance_cpp = _C.InstanceAnnotation(
  64. int(instance["id"]),
  65. instance["score"] if is_det else instance.get("score", 0.0),
  66. instance["area"],
  67. bool(instance.get("iscrowd", 0)),
  68. bool(instance.get("ignore", 0)),
  69. )
  70. instances_cpp.append(instance_cpp)
  71. return instances_cpp
  72. # Convert GT annotations, detections, and IOUs to a format that's fast to access in C++
  73. ground_truth_instances = [
  74. [convert_instances_to_cpp(self._gts[imgId, catId]) for catId in p.catIds]
  75. for imgId in p.imgIds
  76. ]
  77. detected_instances = [
  78. [
  79. convert_instances_to_cpp(self._dts[imgId, catId], is_det=True)
  80. for catId in p.catIds
  81. ]
  82. for imgId in p.imgIds
  83. ]
  84. ious = [[self.ious[imgId, catId] for catId in catIds] for imgId in p.imgIds]
  85. if not p.useCats:
  86. # For each image, flatten per-category lists into a single list
  87. ground_truth_instances = [
  88. [[o for c in i for o in c]] for i in ground_truth_instances
  89. ]
  90. detected_instances = [
  91. [[o for c in i for o in c]] for i in detected_instances
  92. ]
  93. # Call C++ implementation of self.evaluateImgs()
  94. self._evalImgs_cpp = _C.COCOevalEvaluateImages(
  95. p.areaRng,
  96. maxDet,
  97. p.iouThrs,
  98. ious,
  99. ground_truth_instances,
  100. detected_instances,
  101. )
  102. self._evalImgs = None
  103. self._paramsEval = copy.deepcopy(self.params)
  104. toc = time.time()
  105. print("COCOeval_opt.evaluate() finished in {:0.2f} seconds.".format(toc - tic))
  106. # >>>> End of code differences with original COCO API
  107. def accumulate(self):
  108. """
  109. Accumulate per image evaluation results and store the result in self.eval. Does not
  110. support changing parameter settings from those used by self.evaluate()
  111. """
  112. print("Accumulating evaluation results...")
  113. tic = time.time()
  114. if not hasattr(self, "_evalImgs_cpp"):
  115. print("Please run evaluate() first")
  116. self.eval = _C.COCOevalAccumulate(self._paramsEval, self._evalImgs_cpp)
  117. # recall is num_iou_thresholds X num_categories X num_area_ranges X num_max_detections
  118. self.eval["recall"] = np.array(self.eval["recall"]).reshape(
  119. self.eval["counts"][:1] + self.eval["counts"][2:]
  120. )
  121. # precision and scores are num_iou_thresholds X num_recall_thresholds X num_categories X
  122. # num_area_ranges X num_max_detections
  123. self.eval["precision"] = np.array(self.eval["precision"]).reshape(
  124. self.eval["counts"]
  125. )
  126. self.eval["scores"] = np.array(self.eval["scores"]).reshape(self.eval["counts"])
  127. toc = time.time()
  128. print(
  129. "COCOeval_opt.accumulate() finished in {:0.2f} seconds.".format(toc - tic)
  130. )