基本配置:
conda install x264 ffmpeg -c conda-forge -y
apt-get update
apt-get install git -y
apt-get install zip -y
apt-get install unzip -y
cd /home
git clone https://gitee.com/YFwinston/MPCLST.git
yolov5与deep sort 安装
cd /home/MPCLST/yolovDeepsort
pip install -r requirements.txt
pip install opencv-python-headless==4.1.2.30
mkdir -p /root/.config/Ultralytics/
cp /user-data/yolov5File/crowdhuman_vbody_yolov5m.pt /home/MPCLST/yolovDeepsort/yolov5/crowdhuman_vbody_yolov5m.pt
cp /user-data/yolov5File/Arial.ttf /root/.config/Ultralytics/Arial.ttf
cp /user-data/yolov5File/ckpt.t7 ./deep_sort_pytorch/deep_sort/deep/checkpoint/ckpt.t7
mmaction2 安装
pip install mmcv-full==1.3.17 -f https://download.openmmlab.com/mmcv/dist/cu111/torch1.8.0/index.html
pip install opencv-python-headless==4.1.2.30
pip install moviepy
cd /home/MPCLST/mmaction2_YF
pip install -r requirements/build.txt
pip install -v -e .
mkdir -p ./data/ava
cd ..
git clone https://gitee.com/YFwinston/mmdetection.git
cd mmdetection
pip install -r requirements/build.txt
pip install -v -e .
cd ../mmaction2_YF
wget https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_2x_coco/faster_rcnn_r50_fpn_2x_coco_bbox_mAP-0.384_20200504_210434-a5d8aa15.pth -P ./Checkpionts/mmdetection/
wget https://download.openmmlab.com/mmaction/recognition/slowfast/slowfast_r50_8x8x1_256e_kinetics400_rgb/slowfast_r50_8x8x1_256e_kinetics400_rgb_20200716-73547d2b.pth -P ./Checkpionts/mmaction/
第一个标注
cd /home/MPCLST/Dataset
bash addDatasetXX.sh Dataset01
复制视频
cp /user-data/studentVideo/MVideo/video01/* /home/MPCLST/Dataset/Dataset01/videos/
裁剪视频
cd /home/MPCLST/Dataset
rm -r ./Dataset01/video_crop/*
bash cutVideos.sh ./Dataset01/videos ./Dataset01/video_crop ./Dataset01/cutVideos.txt
抽帧
cd /home/MPCLST/Dataset
rm -r ./Dataset01/frames/*
bash cut_frames.sh ./Dataset01/video_crop ./Dataset01/frames
整合与缩减帧
cd /home/MPCLST/Dataset
rm -r ./Dataset01/choose_frames_all/*
python choose_frames_all.py --frames_dir ./Dataset01/frames --choose_frames_all_dir ./Dataset01/choose_frames_all/
不整合的缩减
cd /home/MPCLST/Dataset
rm -r ./Dataset01/choose_frames/*
python choose_frames.py --frames_dir ./Dataset01/frames --choose_frames_dir ./Dataset01/choose_frames/
对choose_frames_all进行检测
cd /home/MPCLST/yolovDeepsort
rm -r ../Dataset/Dataset01/detect/*
python ./yolov5/detect.py --source ../Dataset/Dataset01/choose_frames_all/ --save-txt --save-conf --weights ./yolov5/crowdhuman_vbody_yolov5m.pt --hide-labels --line-thickness 2 --project ../Dataset/Dataset01/detect
异常框筛选
cd /home/MPCLST/Dataset/
mkdir -p ./Dataset01/detect/newExp
mkdir -p ./Dataset01/visualize
rm -r ./Dataset01/detect/newExp/*
rm -r ./Dataset01/visualize/*
python filter.py --label_dir ./Dataset01/detect/exp/labels --image_dir ./Dataset01/choose_frames_all --newExp_dir ./Dataset01/detect/newExp/
生成dense_proposals_train.pkl
cd /home/MPCLST/Dataset/
python dense_proposals_train.py --label_dir ./Dataset01/detect/newExp --proposals_dir ./Dataset01/annotations/dense_proposals_train.pkl
导入via
cd /home/MPCLST/Dataset/
python choose_frames_middle.py --frames_dir ./Dataset01/choose_frames --choose_frames_middle_dir ./Dataset01/choose_frames_middle/
生成via标注文件
cd /home/MPCLST/Dataset/
python dense_proposals_train_to_via.py --dense_proposals_dir ./Dataset01/annotations/dense_proposals_train.pkl --json_path ./Dataset01/choose_frames_middle/
去掉via默认值
cd /home/MPCLST/Dataset
python chang_via_json.py --choose_frames_middle_dir ./Dataset01/choose_frames_middle
下载choose_frames_middle与VIA标注
cd /home/MPCLST/Dataset
rm Dataset01.zip
zip -r Dataset01.zip ./Dataset01/choose_frames_middle/*
制作假数据
cd /home/MPCLST/Dataset
python FakeData.py --choose_frames_middle_dir ./Dataset01/choose_frames_middle
下载choose_frames_middle与VIA标注
cd /home/MPCLST/Dataset
rm Dataset01.zip
zip -r Dataset01.zip ./Dataset01/choose_frames_middle/*
第一个标注完成后
提取上传标注完成的json文件
这里需要注意的是,我给每个标注完成的文件取名:视频名_finish.json,如视频1000204,标注完成后的名字为:1000204_finish.json
cd /home/MPCLST/Dataset/
python json_extract.py --choose_frames_middle_dir ./Dataset01/choose_frames_middle --train_without_personID_dir ./Dataset01/train_without_personID.csv
deep sort
dense_proposals_train_deepsort.py
由于deepsort需要提前送入2帧图片,然后才能从第三帧开始标注人的ID,dense_proposals_train.pkl是从第三张开始的(即缺失了0,1),所以需要将0,1添加
cd /home/MPCLST/Dataset
python dense_proposals_train_deepsort.py --yoloLabel_dir ./Dataset01/detect/newExp --dense_proposals_dir ./Dataset01/dense_proposals_train_deepsort.pkl
接下来使用deep sort来关联人的ID
将图片与yolov5检测出来的坐标,送入deep sort进行检测
cd /home/MPCLST/yolovDeepsort/
python yolov5_to_deepsort.py --source ../Dataset/Dataset01/frames --train_personID_dir ../Dataset/Dataset01/train_personID.csv --dense_proposals_train_deepsort_dir ../Dataset/Dataset01/dense_proposals_train_deepsort.pkl
拼在一起
cd /home/MPCLST/Dataset/
python train_temp.py --train_personID_dir ./Dataset01/train_personID.csv --train_without_personID_dir ./Dataset01/train_without_personID.csv --train_temp_dir ./Dataset01/train_temp.csv
修正ava_train_temp.csv
针对train_temp.csv中存在-1的情况,需要进行修正
cd /home/MPCLST/Dataset/
python train.py --train_temp_dir ./Dataset01/train_temp.csv --train_dir ./Dataset01/annotations/train.csv
进行第二个标注
cd /home/MPCLST/Dataset
bash addDatasetXX.sh Dataset02
复制视频
cp /user-data/studentVideo/MVideo/video02/* /home/MPCLST/Dataset/Dataset02/videos/
裁剪视频
cd /home/MPCLST/Dataset
rm -r ./Dataset02/video_crop/*
bash cutVideos.sh ./Dataset02/videos ./Dataset02/video_crop ./Dataset02/cutVideos.txt
抽帧
cd /home/MPCLST/Dataset
rm -r ./Dataset02/frames/*
bash cut_frames.sh ./Dataset02/video_crop ./Dataset02/frames
整合与缩减帧
cd /home/MPCLST/Dataset
rm -r ./Dataset02/choose_frames_all/*
python choose_frames_all.py --frames_dir ./Dataset02/frames --choose_frames_all_dir ./Dataset02/choose_frames_all/
不整合的缩减
cd /home/MPCLST/Dataset
rm -r ./Dataset02/choose_frames/*
python choose_frames.py --frames_dir ./Dataset02/frames --choose_frames_dir ./Dataset02/choose_frames/
对choose_frames_all进行检测
cd /home/MPCLST/yolovDeepsort
rm -r ../Dataset/Dataset02/detect/*
python ./yolov5/detect.py --source ../Dataset/Dataset02/choose_frames_all/ --save-txt --save-conf --weights ./yolov5/crowdhuman_vbody_yolov5m.pt --hide-labels --line-thickness 2 --project ../Dataset/Dataset02/detect
异常框筛选
cd /home/MPCLST/Dataset/
mkdir -p ./Dataset02/detect/newExp
mkdir -p ./Dataset02/visualize
rm -r ./Dataset02/detect/newExp/*
rm -r ./Dataset02/visualize/*
python filter.py --label_dir ./Dataset02/detect/exp/labels --image_dir ./Dataset02/choose_frames_all --newExp_dir ./Dataset02/detect/newExp/
生成dense_proposals_train.pkl
cd /home/MPCLST/Dataset/
python dense_proposals_train.py --label_dir ./Dataset02/detect/newExp --proposals_dir ./Dataset02/annotations/dense_proposals_train.pkl
导入via
cd /home/MPCLST/Dataset/
python choose_frames_middle.py --frames_dir ./Dataset02/choose_frames --choose_frames_middle_dir ./Dataset02/choose_frames_middle/
生成via标注文件
cd /home/MPCLST/Dataset/
python dense_proposals_train_to_via.py --dense_proposals_dir ./Dataset02/annotations/dense_proposals_train.pkl --json_path ./Dataset02/choose_frames_middle/
去掉via默认值
cd /home/MPCLST/Dataset
python chang_via_json.py --choose_frames_middle_dir ./Dataset02/choose_frames_middle
下载choose_frames_middle与VIA标注
cd /home/MPCLST/Dataset
rm Dataset02.zip
zip -r Dataset02.zip ./Dataset02/choose_frames_middle/*
制作假数据
cd /home/MPCLST/Dataset
python FakeData.py --choose_frames_middle_dir ./Dataset02/choose_frames_middle
下载choose_frames_middle与VIA标注
cd /home/MPCLST/Dataset
rm Dataset02.zip
zip -r Dataset02.zip ./Dataset02/choose_frames_middle/*
第二个标注完成后
提取上传标注完成的json文件
这里需要注意的是,我给每个标注完成的文件取名:视频名_finish.json,如视频1000204,标注完成后的名字为:1000204_finish.json
cd /home/MPCLST/Dataset/
python json_extract.py --choose_frames_middle_dir ./Dataset02/choose_frames_middle --train_without_personID_dir ./Dataset02/train_without_personID.csv
deep sort
dense_proposals_train_deepsort.py
由于deepsort需要提前送入2帧图片,然后才能从第三帧开始标注人的ID,dense_proposals_train.pkl是从第三张开始的(即缺失了0,1),所以需要将0,1添加
cd /home/MPCLST/Dataset
python dense_proposals_train_deepsort.py --yoloLabel_dir ./Dataset02/detect/newExp --dense_proposals_dir ./Dataset02/dense_proposals_train_deepsort.pkl
接下来使用deep sort来关联人的ID
将图片与yolov5检测出来的坐标,送入deep sort进行检测
cd /home/MPCLST/yolovDeepsort/
python yolov5_to_deepsort.py --source ../Dataset/Dataset02/frames --train_personID_dir ../Dataset/Dataset02/train_personID.csv --dense_proposals_train_deepsort_dir ../Dataset/Dataset02/dense_proposals_train_deepsort.pkl
拼在一起
cd /home/MPCLST/Dataset/
python train_temp.py --train_personID_dir ./Dataset02/train_personID.csv --train_without_personID_dir ./Dataset02/train_without_personID.csv --train_temp_dir ./Dataset02/train_temp.csv
修正ava_train_temp.csv
针对train_temp.csv中存在-1的情况,需要进行修正
cd /home/MPCLST/Dataset/
python train.py --train_temp_dir ./Dataset02/train_temp.csv --train_dir ./Dataset02/annotations/train.csv
所有标注完成后
rawframes
在取名上,裁剪的视频帧存在与训练不匹配的问题,所以需要对/home/MPCLST/Dataset/frames中的图片进行名字修改
例如:
原本的名字:rawframes/1/1_000001.jpg
目标名字:rawframes/1/img_00001.jpg
cd /home/MPCLST/Dataset/
mkdir -p /home/MPCLST/Dataset/rawframes
rm -r ./rawframes/*
python contact_proposals.py
python contact_folder.py
python change_raw_frames.py --rawframes_dir ./rawframes
训练与测试
配置文件需要改字段
/home/MPCLST/mmaction2_YF/configs/detection/ava/my_slowfast_kinetics_pretrained_r50_4x16x1_20e_ava_rgb.py
训练
cd /home/MPCLST/mmaction2_YF
python tools/train.py configs/detection/ava/my_slowfast_kinetics_pretrained_r50_4x16x1_20e_ava_rgb.py --validate
测试
home/MPCLST/yolovDeepsort/
cd /home/MPCLST/yolovDeepsort
python yolov5Slowfasy.py --source /home/MPCLST/Dataset/Dataset01/video_crop/1000101.mp4 --save-txt --yolo_weights ./yolov5/crowdhuman_vbody_yolov5m.pt --checkpoint-slowfast /home/MPCLST/mmaction2_YF/work_dirs/ava/slowfast_kinetics_pretrained_r50_4x16x1_20e_ava_rgb/best_mAP@0.5IOU_epoch_1.pth --config-slowfast /home/MPCLST/mmaction2_YF/configs/detection/ava/my_slowfast_kinetics_pretrained_r50_4x16x1_20e_ava_rgb.py --label-map /home/MPCLST/mmaction2_YF/tools/data/ava/label_map2.txt --classes 1
其他:在线html转pdf:https://www.aconvert.com/cn/pdf/html-to-pdf/
cd /home/MPCLST/yolovDeepsort
python yolov5Slowfasy.py --source /home/MPCLST/Dataset/Dataset01/video_crop/1000203.mp4 --save-txt --yolo_weights ./yolov5/crowdhuman_vbody_yolov5m.pt --checkpoint-slowfast /home/MPCLST/mmaction2_YF/work_dirs/ava/slowfast_kinetics_pretrained_r50_4x16x1_20e_ava_rgb/best_mAP@0.5IOU_epoch_29.pth --config-slowfast /home/MPCLST/mmaction2_YF/configs/detection/ava/my_slowfast_kinetics_pretrained_r50_4x16x1_20e_ava_rgb.py --label-map /home/MPCLST/mmaction2_YF/tools/data/ava/label_map2.txt --classes 1