幼儿园行为分类
3大类(A、B、C),每一大类最多选一个(也可以不选),但是三大类必须至少选择一个
A:坐、站、走、跑、跳、蹲、爬、跪坐、举手
B:搭积木、玩玩具、做饭、弹钢琴、画画、吃喝、看书、拿东西、打扫卫生、整理东西(老师)
C:说话、听讲
A: sit, stand, walk, run, jump, squat, climb, kneel, raise hands
B: build blocks, play with toys, cook, play piano, draw, eat and drink, read, carry things, clean, organize things
C: talk, listen
基本配置:
conda install x264 ffmpeg -c conda-forge -y
apt-get update
apt-get install git -y
apt-get install zip -y
apt-get install unzip -y
dav转mp4
先上传dav视频到
同过ffmpeg转化dav为mp4
ffmpeg -i /user-data/kindergarten/dav/10001.dav /user-data/kindergarten/videos/10001.mp4
ffmpeg -i /user-data/kindergarten/dav/10002.dav /user-data/kindergarten/videos/10002.mp4
ffmpeg -i /user-data/kindergarten/dav/10003.dav /user-data/kindergarten/videos/10003.mp4
ffmpeg -i /user-data/kindergarten/dav/10004.dav /user-data/kindergarten/videos/10004.mp4
ffmpeg -i /user-data/kindergarten/dav/10005.dav /user-data/kindergarten/videos/10005.mp4
ffmpeg -i /user-data/kindergarten/dav/10006.dav /user-data/kindergarten/videos/10006.mp4
ffmpeg -i /user-data/kindergarten/dav/10007.dav /user-data/kindergarten/videos/10007.mp4
ffmpeg -i /user-data/kindergarten/dav/10008.dav /user-data/kindergarten/videos/10008.mp4
ffmpeg -i /user-data/kindergarten/dav/10009.dav /user-data/kindergarten/videos/10009.mp4
ffmpeg -i /user-data/kindergarten/dav/10010.dav /user-data/kindergarten/videos/10010.mp4
ffmpeg -i /user-data/kindergarten/dav/10011.dav /user-data/kindergarten/videos/10011.mp4
安装AVA
cd /home
git clone https://gitee.com/YFwinston/AVA-6-seconds.git
yolov5与deep sort 安装
cd /home/AVA-6-seconds/yolovDeepsort
pip install -r requirements.txt
pip install opencv-python-headless==4.1.2.30
mkdir -p /root/.config/Ultralytics/
cp /user-data/yolov5File/crowdhuman_vbody_yolov5m.pt /home/AVA-6-seconds/yolovDeepsort/yolov5/crowdhuman_vbody_yolov5m.pt
cp /user-data/yolov5File/Arial.ttf /root/.config/Ultralytics/Arial.ttf
cp /user-data/yolov5File/ckpt.t7 ./deep_sort_pytorch/deep_sort/deep/checkpoint/ckpt.t7
mmaction2 安装
pip install mmcv-full==1.3.17 -f https://download.openmmlab.com/mmcv/dist/cu111/torch1.8.0/index.html
pip install opencv-python-headless==4.1.2.30
pip install moviepy
cd /home/AVA-6-seconds/mmaction2_YF
pip install -r requirements/build.txt
pip install -v -e .
mkdir -p ./data/ava
cd ..
git clone https://gitee.com/YFwinston/mmdetection.git
cd mmdetection
pip install -r requirements/build.txt
pip install -v -e .
cd ../mmaction2_YF
wget https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_2x_coco/faster_rcnn_r50_fpn_2x_coco_bbox_mAP-0.384_20200504_210434-a5d8aa15.pth -P ./Checkpionts/mmdetection/
wget https://download.openmmlab.com/mmaction/recognition/slowfast/slowfast_r50_8x8x1_256e_kinetics400_rgb/slowfast_r50_8x8x1_256e_kinetics400_rgb_20200716-73547d2b.pth -P ./Checkpionts/mmaction/
数据准备
user-data/kindergarten/videos
cd /home/AVA-6-seconds/Dataset
bash Dataset.sh
cp /user-data/kindergarten/videos/* /home/AVA-6-seconds/Dataset/videos/
rm -r ./choose_frames/*
rm -r ./choose_frames_all/*
rm -r ./choose_frames_middle/*
rm -r ./frames/*
rm -r ./video_crops/*
rm -r ./choose_all_middlemost/*
rm -r ./detect/*
bash cutVideos.sh ./videos ./video_crops ./video_crop_demo.txt 6
bash cut_frames.sh ./video_crops ./frames
python choose_frames_all.py --frames_dir ./frames --choose_frames_all_dir ./choose_frames_all/ --seconds 6
# 将最中间的图片全部拿出,91结尾的所有图片
python choose_frames_all.py --frames_dir ./frames --choose_frames_all_dir ./choose_all_middlemost/ --seconds 3 --start 3
python choose_frames.py --frames_dir ./frames --choose_frames_dir ./choose_frames/ --seconds 6
python choose_frames_middle.py --frames_dir ./choose_frames --choose_frames_middle_dir ./choose_frames_middle/
检测
对choose_frames_all进行检测
cd /home/AVA-6-seconds/yolovDeepsort
rm -r ../Dataset/detect/*
python ./yolov5/detect.py --source ../Dataset/choose_all_middlemost/ --save-txt --save-conf --weights ./yolov5/crowdhuman_vbody_yolov5m.pt --hide-labels --line-thickness 2 --project ../Dataset/detect
异常框筛选(暂时取消)
cd /home/AVA-6-seconds/Dataset/
mkdir -p ./detect/newExp
mkdir -p ./visualize
rm -r ./detect/newExp/*
rm -r ./visualize/*
python filter.py --label_dir ./detect/exp/labels --image_dir ./choose_frames_all --newExp_dir ./detect/newExp/
复制前后秒的检测
cd /home/AVA-6-seconds/Dataset/
#python cpPreNextLabel.py --label_dir ./detect/newExp/ --new_label_dir ./detect/newLabels/
python cpPreNextLabel.py --label_dir ./detect/exp/labels/ --new_label_dir ./detect/newLabels/
生成dense_proposals_train.pkl
cd /home/AVA-6-seconds/Dataset/
python dense_proposals_train.py --label_dir ./detect/newLabels --proposals_dir ./annotations/dense_proposals_train.pkl
生成via标注文件
需要修改:
attributes_dict = {'1':dict(aname='A', type=3, options={'0':'坐', '1':'站', '2':'走', '3':'跑', '4':'跳', '5':'蹲', '6':'爬', '7':'跪坐', '8':'举手'},default_option_id="", anchor_id = 'FILE1_Z0_XY1'),
'2': dict(aname='B', type=3, options={'9':'搭积木', '10':'玩玩具', '11':'做饭', '12':'弹钢琴', '13':'画画', '14':'吃喝', '15':'看书', '16':'拿东西', '17':'打扫卫生', '18':'整理东西'}, default_option_id="", anchor_id='FILE1_Z0_XY1'),
'3': dict(aname='C', type=3, options={'19':'说话', '20':'听讲'}, default_option_id="", anchor_id='FILE1_Z0_XY1'),
}
cd /home/AVA-6-seconds/Dataset/
python dense_proposals_train_to_via.py --dense_proposals_dir ./annotations/dense_proposals_train.pkl --json_path ./choose_frames_middle/
去掉via默认值
需要修改:
cd /home/AVA-6-seconds/Dataset
python chang_via_json.py --choose_frames_middle_dir ./choose_frames_middle
下载choose_frames_middle与VIA标注
cd /home/AVA-6-seconds/Dataset
rm Dataset.zip
zip -r Dataset.zip ./choose_frames_middle/*
标注完成后
将中间一帧(第二帧)的标注信息,复制到第一和第三帧
cd /home/AVA-6-seconds/Dataset/
python copy_label_via.py --label_dir ./choose_frames_middle/
#压缩下载choose_frames_middle
rm Dataset.zip
zip -r Dataset.zip ./choose_frames_middle/*
生产train.csv,由于三帧的标注信息相同,就不需要采用deep sort来检测人的ID
cd /home/AVA-6-seconds/Dataset
python train_json_extract.py --choose_frames_middle_dir ./choose_frames_middle --train_dir ./annotations/train_all.csv
其他标注文件的生产
在/home/AVA-6-seconds/Dataset/annotations/下
action_list.pbtxt
item {
name: "坐"
id: 1
}
item {
name: "站"
id: 2
}
item {
name: "走"
id: 3
}
item {
name: "跑"
id: 4
}
item {
name: "跳"
id: 5
}
item {
name: "蹲"
id: 6
}
item {
name: "爬"
id: 7
}
item {
name: "跪坐"
id: 8
}
item {
name: "举手"
id: 9
}
item {
name: "搭积木"
id: 10
}
item {
name: "玩玩具"
id: 11
}
item {
name: "做饭"
id: 12
}
item {
name: "弹钢琴"
id: 13
}
item {
name: "画画"
id: 14
}
item {
name: "吃喝"
id: 15
}
item {
name: "看书"
id: 16
}
item {
name: "拿东西"
id: 17
}
item {
name: "打扫卫生"
id: 18
}
item {
name: "整理东西"
id: 19
}
item {
name: "说话"
id: 20
}
item {
name: "听讲"
id: 21
}
将现有的标签dense_proposals_train.pkl、train_all.csv分成train、val、test
cd /home/AVA-6-seconds/Dataset
python train_val_test_generate.py --DatasetXX_dir ./ --annotations_dir ../annotations/
rawframes
在取名上,裁剪的视频帧存在与训练不匹配的问题,所以需要对/home/videoData/frames中的图片进行名字修改
例如:
原本的名字:rawframes/20/20_000001.jpg
目标名字:rawframes/83/img_00001.jpg
cd /home/AVA-6-seconds/Dataset/
rm -r ./rawframes/*
cp -r /home/AVA-6-seconds/Dataset/frames/* /home/AVA-6-seconds/Dataset/rawframes/
python change_raw_frames.py --rawframes_dir ./rawframes
训练测试
训练与测试
配置文件需要改字段
/home/AVA-6-seconds/mmaction2_YF/configs/detection/ava/my_slowfast_kinetics_pretrained_r50_4x16x1_20e_ava_rgb.py
训练
cd /home/AVA-6-seconds/mmaction2_YF
python tools/train.py configs/detection/ava/my_slowfast_kinetics_pretrained_r50_4x16x1_20e_ava_rgb.py --validate
在:/home/AVA-6-seconds/mmaction2_YF/tools/data/ava/
写入label_map2.txt:
1: sit
2: stand
3: walk
4: run
5: jump
6: squat
7: climb
8: kneel
9: raise hands
10: build blocks
11: play with toys
12: cook
13: play piano
14: draw
15: eat and drink
16: read
17: carry things
18: clean
19: organize things
20: talk
21: listen
测试
home/MPCLST/yolovDeepsort/
cd /home/AVA-6-seconds/yolovDeepsort
python yolov5Slowfasy.py \
--source /home/AVA-6-seconds/Dataset/video_crops/1000101.mp4 \
--save-txt \
--yolo_weights ./yolov5/crowdhuman_vbody_yolov5m.pt \
--checkpoint-slowfast ../mmaction2_YF/work_dirs/ava/slowfast_kinetics_pretrained_r50_4x16x1_20e_ava_rgb/best_mAP@0.5IOU_epoch_88.pth \
--config-slowfast ../mmaction2_YF/configs/detection/ava/my_slowfast_kinetics_pretrained_r50_4x16x1_20e_ava_rgb.py \
--label-map ../mmaction2_YF/tools/data/ava/label_map2.txt \
--classes 1 \
--action-score-thr 0.2
cd /home/AVA-6-seconds/yolovDeepsort
python yolov5Slowfasy.py --source /home/AVA-6-seconds/Dataset/video_crops/1000101.mp4 --save-txt --yolo_weights ./yolov5/crowdhuman_vbody_yolov5m.pt --checkpoint-slowfast /home/AVA-6-seconds/mmaction2_YF/work_dirs/ava/slowfast_kinetics_pretrained_r50_4x16x1_20e_ava_rgb/best_mAP@0.5IOU_epoch_26.pth --config-slowfast /home/AVA-6-seconds/mmaction2_YF/configs/detection/ava/my_slowfast_kinetics_pretrained_r50_4x16x1_20e_ava_rgb.py --label-map /home/AVA-6-seconds/mmaction2_YF/tools/data/ava/label_map2.txt --classes 1 --action-score-thr 0.1