yolov3_detector.py 9.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249
  1. import os, sys
  2. sys.path.append(os.path.split(os.path.realpath(__file__))[0] + "/../lib")
  3. from cnstream import *
  4. import time
  5. import threading
  6. import cv2
  7. import math
  8. import numpy as np
  9. g_source_lock = threading.Lock()
  10. g_perf_print_lock = threading.Lock()
  11. g_perf_print_stop = False
  12. class CustomObserver(StreamMsgObserver):
  13. def __init__(self, pipeline, source):
  14. StreamMsgObserver.__init__(self)
  15. self.pipeline = pipeline
  16. self.source = source
  17. self.stop = False
  18. self.wakener = threading.Condition()
  19. self.stream_set = set()
  20. def update(self, msg):
  21. global g_source_lock
  22. g_source_lock.acquire()
  23. self.wakener.acquire()
  24. if self.stop:
  25. return
  26. if msg.type == StreamMsgType.eos_msg:
  27. print("pipeline[{}] stream[{}] gets EOS".format(self.pipeline.get_name(), msg.stream_id))
  28. if msg.stream_id in self.stream_set:
  29. self.source.remove_source(msg.stream_id)
  30. self.stream_set.remove(msg.stream_id)
  31. if len(self.stream_set) == 0:
  32. print("pipeline[{}] received all EOS".format(self.pipeline.get_name()))
  33. self.stop = True
  34. elif msg.type == StreamMsgType.stream_err_msg:
  35. print("pipeline[{}] stream[{}] gets stream error".format(self.pipeline.get_name(), msg.stream_id))
  36. if msg.stream_id in self.stream_set:
  37. self.source.remove_source(msg.stream_id)
  38. self.stream_set.remove(msg.stream_id)
  39. if len(self.stream_set) == 0:
  40. print("pipeline[{}] received all EOS".format(self.pipeline.get_name()))
  41. self.stop = True
  42. elif msg.type == StreamMsgType.error_msg:
  43. print("pipeline[{}] gets error".format(self.pipeline.get_name()))
  44. self.source.remove_sources()
  45. self.stream_set.clear()
  46. self.stop = True
  47. elif msg.type == StreamMsgType.frame_err_msg:
  48. print("pipeline[{}] stream[{}] gets frame error".format(self.pipeline.get_name(), msg.stream_id))
  49. else:
  50. print("pipeline[{}] unknown message type".format(self.pipeline.get_name()))
  51. if self.stop:
  52. self.wakener.notify()
  53. self.wakener.release()
  54. g_source_lock.release()
  55. def wait_for_stop(self):
  56. self.wakener.acquire()
  57. if len(self.stream_set) == 0:
  58. self.stop = True
  59. self.wakener.release()
  60. while True:
  61. if self.wakener.acquire():
  62. if not self.stop:
  63. self.wakener.wait()
  64. else:
  65. self.pipeline.stop()
  66. break
  67. self.wakener.release()
  68. def increase_stream(self, stream_id):
  69. self.wakener.acquire()
  70. if stream_id in self.stream_set:
  71. print("increase_stream() The stream is ongoing [{}]".format(stream_id))
  72. else:
  73. self.stream_set.add(stream_id)
  74. if self.stop:
  75. stop = False
  76. self.wakener.release()
  77. class PerfThread (threading.Thread):
  78. def __init__(self, pipeline):
  79. threading.Thread.__init__(self)
  80. self.pipeline = pipeline
  81. def run(self):
  82. print_performance(self.pipeline)
  83. def print_performance(pipeline):
  84. global g_perf_print_lock, g_perf_print_stop
  85. if pipeline.is_profiling_enabled():
  86. last_time = time.time()
  87. while True:
  88. g_perf_print_lock.acquire()
  89. if g_perf_print_stop:
  90. break
  91. g_perf_print_lock.release()
  92. elapsed_time = time.time() - last_time
  93. if elapsed_time < 2:
  94. time.sleep(2 - elapsed_time)
  95. last_time = time.time()
  96. # print whole process performance
  97. print_pipeline_performance(pipeline)
  98. # print real time performance (last 2 seconds)
  99. print_pipeline_performance(pipeline, 2000)
  100. g_perf_print_lock.release()
  101. class Yolov3Preproc(Preproc):
  102. def __init__(self):
  103. Preproc.__init__(self)
  104. def init(self, params):
  105. return True
  106. def execute(self, input_shapes, frame_info):
  107. data_frame = frame_info.get_cn_data_frame()
  108. bgr = data_frame.image_bgr()
  109. src_w = data_frame.width
  110. src_h = data_frame.height
  111. dst_w = input_shapes[0][2]
  112. dst_h = input_shapes[0][1]
  113. # resize as letterbox
  114. scaling_factor = min(dst_w / src_w, dst_h / src_h)
  115. unpad_w = math.ceil(scaling_factor * src_w)
  116. unpad_h = math.ceil(scaling_factor * src_h)
  117. resized = cv2.resize(bgr, (unpad_w, unpad_h))
  118. # to rgb
  119. rgb = resized[:,:,::-1]
  120. # padding
  121. pad_w = dst_w - unpad_w
  122. pad_h = dst_h - unpad_h
  123. pad_l = math.floor(pad_w / 2)
  124. pad_t = math.floor(pad_h / 2)
  125. pad_r = pad_w - pad_l
  126. pad_b = pad_h - pad_t
  127. dst_img = cv2.copyMakeBorder(rgb, pad_t, pad_b, pad_l, pad_r, cv2.BORDER_CONSTANT, (128, 128, 128))
  128. # to 0rgb
  129. argb = cv2.merge([np.zeros((dst_h, dst_w, 1), np.uint8), dst_img])
  130. # save pad params to frame_info
  131. collection = frame_info.get_py_collection()
  132. collection['unpad_h'] = unpad_h
  133. collection['unpad_w'] = unpad_w
  134. collection['pad_l'] = pad_l
  135. collection['pad_t'] = pad_t
  136. return [np.asarray(argb).flatten().astype(np.float32)]
  137. def to_range(val : float, min_val, max_val):
  138. return min(max(val, min_val), max_val)
  139. class Yolov3Postproc(Postproc):
  140. def __init__(self):
  141. Postproc.__init__(self)
  142. self.__threshold = 0.3
  143. def init(self, params):
  144. if 'threshold' in params:
  145. self.__threshold = float(params['threshold'])
  146. return True
  147. def execute(self, net_outputs, input_shapes, frame_info):
  148. collection = frame_info.get_py_collection()
  149. unpad_h = collection['unpad_h']
  150. unpad_w = collection['unpad_w']
  151. pad_l = collection['pad_l']
  152. pad_t = collection['pad_t']
  153. input_h = input_shapes[0][1] # model input height
  154. input_w = input_shapes[0][2] # model input width
  155. net_output = net_outputs[0].flatten()
  156. box_num = int(net_output[0])
  157. # get bboxes
  158. for box_id in range(box_num):
  159. label = str(int(net_output[64 + box_id * 7 + 1]))
  160. score = net_output[64 + box_id * 7 + 2]
  161. left = to_range((net_output[64 + box_id * 7 + 3] * input_w - pad_l) / unpad_w, 0, 1)
  162. top = to_range((net_output[64 + box_id * 7 + 4] * input_h - pad_t) / unpad_h, 0, 1)
  163. right = to_range((net_output[64 + box_id * 7 + 5] * input_w - pad_l) / unpad_w, 0, 1)
  164. bottom = to_range((net_output[64 + box_id * 7 + 6] * input_h - pad_t) / unpad_h, 0, 1)
  165. if left >= right or top >= bottom: continue
  166. if score < self.__threshold: continue
  167. # add detection object to frame_info
  168. detection_object = CNInferObject()
  169. detection_object.id = label
  170. detection_object.score = score
  171. detection_object.bbox.x = left
  172. detection_object.bbox.y = top
  173. detection_object.bbox.w = right - left
  174. detection_object.bbox.h = bottom - top
  175. frame_info.get_cn_infer_objects().push_back(detection_object)
  176. def main():
  177. model_file = os.path.join(os.path.abspath(os.path.dirname(__file__)), "../../data/models/yolov3_b4c4_argb_mlu270.cambricon")
  178. if not os.path.exists(model_file):
  179. os.makedirs(os.path.dirname(model_file),exist_ok=True)
  180. import urllib.request
  181. url_str = "http://video.cambricon.com/models/MLU270/yolov3_b4c4_argb_mlu270.cambricon"
  182. print(f'Downloading {url_str} ...')
  183. urllib.request.urlretrieve(url_str, model_file)
  184. global g_source_lock, g_perf_print_lock, g_perf_print_stop
  185. # Build a pipeline
  186. pipeline = Pipeline("yolov3_detection_pipeline")
  187. pipeline.build_pipeline_by_json_file('yolov3_detection_config.json')
  188. # Get pipeline's source module
  189. source_module_name = 'source'
  190. source = pipeline.get_source_module(source_module_name)
  191. # Set message observer
  192. obs = CustomObserver(pipeline, source)
  193. pipeline.stream_msg_observer = obs
  194. # Start the pipeline
  195. if not pipeline.start():
  196. return
  197. # Start a thread to print pipeline performance
  198. perf_th = PerfThread(pipeline)
  199. perf_th.start()
  200. # Define an input data handler
  201. mp4_path = "../../data/videos/cars.mp4"
  202. stream_num = 1
  203. for i in range(stream_num):
  204. stream_id = "stream_id_{}".format(i)
  205. file_handler = FileHandler(source, stream_id, mp4_path, -1)
  206. g_source_lock.acquire()
  207. if source.add_source(file_handler) != 0:
  208. print("Add source failed stream []".format(stream_id))
  209. else:
  210. obs.increase_stream(stream_id)
  211. g_source_lock.release()
  212. obs.wait_for_stop()
  213. if pipeline.is_profiling_enabled():
  214. g_perf_print_lock.acquire()
  215. g_perf_print_stop = True
  216. g_perf_print_lock.release()
  217. perf_th.join()
  218. print_pipeline_performance(pipeline)
  219. print("pipeline[{}] stops".format(pipeline.get_name()))
  220. if __name__ == '__main__':
  221. main()