123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148 |
- # -*- coding: utf-8 -*-
- # @Time : 2022/6/24 15:18
- # @Author : MaochengHu
- # @Email : wojiaohumaocheng@gmail.com
- # @File : config.py
- # @Project : person_monitor
- import os
- import argparse
- # -------------------------- 常规配置参数 ------------------------------- #
- # 项目根路径
- pro_root = "/data/fengyang/sunwin/code/person_monitor"
- # 依赖根路径
- dependence_root = os.path.join(pro_root, "dependence")
- # 模型根路径
- model_root = os.path.join(pro_root, "dev/src/algorithms/save_models")
- # 测试文件(可以是本地视频文件也可以是在线视频流地址)
- # input_source = "rtsp://admin:sunwin2019@192.168.20.240:554/h265/ch1/main/av_stream"
- input_source = "/data/fengyang/sunwin/code/person_monitor/dev/test/demo.mp4"
- # 使用GPU索引
- cuda_index = 0
- item_max_size = 60 # 保留多少帧进行动作识别
- # 保存结果
- save_result = True # 是否将识别出来的结果进行保存
- use_keypoint = True # 是否通过关键点进行行为识别,如果不用关键点则直接基于视频裁剪进行行为识别
- # ---------------------- 人物目标检测配置参数 --------------------------------#
- # yolov7模型加载参数
- # 模型加载参数
- object_detection_model_config = dict(
- pt_weights=os.path.join(model_root, "object_detection_model", "helmet_fall_phone.engine"), # 目标检测的权重地址
- data=os.path.join(model_root, "object_detection_model", "second_stage_bbox_helmet_fall_phone.yaml"), # 目标检测对应的类别文件
- imgsz=(640, 640), # 表示图片大小,不通的yolo模型需要对应不同的输入大小(不宜修改)
- device=cuda_index, # GPU索引
- confthre=0.001, # 做跟踪所以置信度需要很小, 保证丢失目标能补上
- nmsthre=0.7, # nms阈值大小
- max_det=20 # 设置最大检测人数
- )
- # class_list = ["play_phone", "call_phone", "sleep", "work", "no_helmet", "helmet"]
- person_end_index = 3
- person_attr = ["play_phone", "call_phone", "sleep", "work"]
- match_score_threshold = 0.8
- # ----------------------- 人物属性检测模型配置参数 ---------------------------- #
- # yolov7模型加载参数
- person_attribute_model_config = dict(
- pt_weights=os.path.join(model_root, "object_detection_model", "helmet_fall_phone.engine"), # 目标检测的权重地址
- data=os.path.join(model_root, "object_detection_model", "second_stage_bbox_helmet_fall_phone.yaml"), # 目标检测对应的类别文件
- imgsz=(640, 640), # 表示图片大小,不通的yolo模型需要对应不同的输入大小(不宜修改)
- device=cuda_index, # GPU索引
- confthre=0.3, # 目标置信度
- nmsthre=0.2, # nms阈值大小
- )
- # 人物状态类别
- person_class_list = ["play_phone", "call_phone", "sleep", "work"]
- # 安全帽佩戴状态类别
- helmet_class_list = ["no_helmet", "helmet"]
- # ----------------------- 人体跟踪模型配置参数 ---------------------------- #
- tracker_max_id = 100 # 建议大于等于max_det的5倍及以上
- tracker_model_config = dict(
- track_thresh=0.5, # 跟踪人体置信度
- track_buffer=30, # 如果人体框丢失多少帧则不进行追回
- match_thresh=0.8, # 相似度匹配阈值多少算匹配上
- mot20=False, # 是否使用mot20 计算
- tracker_max_id=tracker_max_id, # 最多跟踪多少人, 如果超过该人数, 则重新计数
- )
- # 生成跟踪参数解析器
- tracker_parser = argparse.ArgumentParser()
- for k, v in tracker_model_config.items():
- tracker_parser.add_argument("--{}".format(k), default=v)
- tracker_args = tracker_parser.parse_args()
- #
- tracker_frame_rate = 30 # 跟踪视频的fps的值
- min_box_area = 10 # 小于多少的边不进行识别
- output_side_size = 640 # 如果不进行关键点识别,直接基于视频识别,则需要进行padding裁剪对应的输出图片大小
- tracker_line_size = 90 # 设置人体行为轨迹跟踪线长度
- # ----------------------- 关键点模型配置参数 ---------------------------- #
- pose_name = "tiny_pose"
- pose_model_platform = "paddle" # 目前仅仅支持paddle(飞浆) 以及 mmpose 平台
- pose_trt = True # 是否使用tensorrt加速
- if pose_model_platform == "paddle":
- if pose_trt:
- run_mode = "trt_fp32"
- else:
- run_mode = "paddle"
- keypoint_model_config = dict(model_dir=os.path.join(model_root, "pose_model/tinypose_256x192"),
- device="gpu:{}".format(cuda_index),
- trt_calib_mode=True,
- run_mode=run_mode,
- enable_mkldnn=True,
- batch_size=8,
- threshold=0.5
- )
- elif pose_model_platform == "mmpose":
- if pose_trt:
- keypoint_model_config = dict(model_config_path=os.path.join(model_root,
- "mspn50_coco_256x192_topdown_heatmap/mspn50_coco_256x192.py "),
- deploy_config_path=os.path.join(model_root,
- "mspn50_coco_256x192_topdown_heatmap/pose-detection_tensorrt_static-256x192.py"),
- device="cuda:{}".format(cuda_index),
- checkpoint=[os.path.join(model_root,
- "mspn50_coco_256x192_topdown_heatmap/end2end.engine")]
- )
- else:
- keypoint_model_config = dict(model_config_path=os.path.join(model_root,
- "mspn50_coco_256x192_topdown_heatmap/mspn50_coco_256x192.py"),
- device="cuda:{}".format(cuda_index),
- checkpoint=os.path.join(model_root,
- "mspn50_coco_256x192_topdown_heatmap/mspn50_coco_256x192-8fbfb5d0_20201123.pth")
- )
- # ------------------------- 行为识别模型配置参数 ------------------------ #
- if use_keypoint:
- action_config_root = os.path.join(model_root, "action_model/stgcn_80e_ntu60_xsub_keypoint")
- save_kp_npy = False # 是否需要保留关键点姿态采集, 采集骨骼关键点, 并画出对应的骨骼关键点视频
- dataset_format = 'TopDownCocoDataset'
- class_name = "fall" # run, jump .etc # 需要采集的骨骼关键点对应的动作类别
- npy_output_dir = os.path.join(pro_root, "test_npy/{}".format(class_name))
- if save_kp_npy:
- if not os.path.exists(npy_output_dir):
- os.makedirs(npy_output_dir)
- action_model_config = dict(
- model_config_path=os.path.join(action_config_root, "stgcn_80e_ntu60_xsub_keypoint_customer.py"),
- checkpoint=os.path.join(action_config_root, "best_top1_acc_epoch_26.pth"),
- action_label=os.path.join(pro_root, "dev/configs/customer_action.txt"),
- device="cuda:{}".format(cuda_index),
- item_max_size=item_max_size, # 保留多少帧进行动作识别
- save_kp_npy=save_kp_npy,
- dataset_format=dataset_format,
- npy_output_dir=npy_output_dir
- )
- # ----------------------- 人群聚集检测配置参数 ---------------------------- #
- eps = 100 # 人员聚类距离
- min_samples = 2 # 簇最少人数
- # ----------------------- 人员入侵配置参数 ------------------------------- #
- limited_area = (800, 200, 1000, 600) # 对应限制区域的画面坐标
- # -------------------------- 显示结果配置参数 ---------------------------- #
- show_result = True # 是否需要展示效果
- show_config = dict(kps_threshold=0.3, draw_point_num=30) # 关键点的展示阈值以及需要画跟踪点的长度
|