话不多说先看效果
下面是没有戴口罩的效果(先用偶像帮我遮一下)
下面是戴了口罩的效果
再次描述一下这个效果,其实这不是图片,而是摄像头实时读取的,首先感谢AIZOOTech 的开源项目–FaceMaskDetection ,同时也 主要参考了以下几篇博客,在此一一感谢:
(https://blog.youkuaiyun.com/qq_41204464/article/details/104596777)
(https://blog.youkuaiyun.com/qq_41204464/article/details/106026650)
(https://www.freesion.com/article/8339842034/)
(https://zmister.com/archives/477.html)
搭载的环境是PYCHARM(免费版) + Anaconda,编辑器和环境的安装我就不说了,上边几个大神有提到过,自己去看,conda环境下pip安装的内容截图如下(我自己的机器在这些环境下可以正常运行,给朋友的机器上也试过可以用)。如下图自己pip install 就好了。最后别忘了 conda install tensorflow,我的理解是把pip 安装的库搭载到conda里面(欢迎指正错误)。最后conda list 检查一下和我的截图版本是否一致,若不一致再卸载掉某个不符合的,再重新安装和指定版本就可以比如:先 pip uninstall keras 然后再 pip install keras==2.4.3
需要注意的是我安装的是tensorflow 2.2.0版本,对应的是keras 2.4.3版本,之前我安装了keras 2.2版本是会报错的。另外其他几个主要的三方库我已经用红线标注出来了。
下面是主程序代码,为了方便我把UI设计的代码和主程序代码写到了一起。
import sys
from PyQt5 import QtCore, QtGui, QtWidgets
import qtawesome
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from PyQt5.QtGui import *
import cv2
import tensorflow_infer as tsf
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(800, 511)
self.verticalLayoutWidget = QtWidgets.QWidget(Form)
self.verticalLayoutWidget.setGeometry(QtCore.QRect(10, 60, 131, 441))
self.verticalLayoutWidget.setObjectName("verticalLayoutWidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.verticalLayoutWidget)
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.setObjectName("verticalLayout")
self.label1 = QtWidgets.QPushButton(self.verticalLayoutWidget)
self.label1.setObjectName("labeltitle")
self.verticalLayout.addWidget(self.label1)
self.pushButton1 = QtWidgets.QPushButton(qtawesome.icon('fa.camera',color='white'),"")
self.pushButton1.setObjectName("pushButton")
self.verticalLayout.addWidget(self.pushButton1)
self.pushButton2 = QtWidgets.QPushButton(qtawesome.icon('fa.medkit',color='white'),"")
self.pushButton2.setObjectName("pushButton")
self.verticalLayout.addWidget(self.pushButton2)
self.pushButton3 = QtWidgets.QPushButton(qtawesome.icon('fa.smile-o',color='white'),"")
self.pushButton3.setObjectName("pushButton")
self.verticalLayout.addWidget(self.pushButton3)
self.label2 = QtWidgets.QPushButton(self.verticalLayoutWidget)
self.label2.setObjectName("labeltitle")
self.verticalLayout.addWidget(self.label2)
self.pushButton4 = QtWidgets.QPushButton(qtawesome.icon('fa.hourglass-start',color='white'),"")
self.pushButton4.setObjectName("pushButton")
self.verticalLayout.addWidget(self.pushButton4)
self.pushButton5 = QtWidgets.QPushButton(qtawesome.icon('fa.hourglass-end',color='white'),"")
self.pushButton5.setObjectName("pushButton")
self.verticalLayout.addWidget(self.pushButton5)
self.horizontalLayoutWidget = QtWidgets.QWidget(Form)
self.horizontalLayoutWidget.setGeometry(QtCore.QRect(10, 30, 131, 25))
self.horizontalLayoutWidget.setObjectName("horizontalLayoutWidget")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.horizontalLayoutWidget)
self.horizontalLayout.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout.setObjectName("horizontalLayout")
self.verticalLayoutWidget.setStyleSheet('''
QPushButton{border:none;color:white;}
QPushButton#labeltitle{
border:none;
border-bottom:1px solid white;
font-size:18px;
font-weight:700;
font-family: "Helvetica Neue", Helvetica, Arial, sans-serif; }
QPushButton#pushButton:hover{border-left:4px solid red;font-weight:700;}
QWidget#verticalLayoutWidget{
background:gray;
border-top:1px solid white;
border-bottom:1px solid white;
border-left:1px solid white;
border-top-left-radius:10px;
border-top-right-radius:10px;
border-bottom-right-radius:10px;
border-bottom-left-radius:10px; }
''')
self.pushButton8 = QtWidgets.QPushButton(qtawesome.icon('fa.minus-square',color='white'),"") # 最小化按钮
self.pushButton8.setObjectName("pushButton8")
self.horizontalLayout.addWidget(self.pushButton8)
self.pushButton9 = QtWidgets.QPushButton(qtawesome.icon('fa.window-close',color='white'),"") # 关闭按钮
self.pushButton9.setObjectName("pushButton9")
self.horizontalLayout.addWidget(self.pushButton9)
self.pushButton8.setStyleSheet('''QPushButton{background:#F76677;border-radius:5px;}QPushButton:hover{background:red;}''')
self.pushButton9.setStyleSheet('''QPushButton{background:#6DDF6D;border-radius:5px;}QPushButton:hover{background:green;}''')
self.pushButton8.clicked.connect(Form.showMinimized)
self.pushButton9.clicked.connect(Form.closeEvent)
self.verticalLayoutWidget_2 = QtWidgets.QWidget(Form)
self.verticalLayoutWidget_2.setGeometry(QtCore.QRect(150, 30, 631, 471))
self.verticalLayoutWidget_2.setObjectName("verticalLayoutWidget_2")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.verticalLayoutWidget_2)
self.verticalLayout_2.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.label = QtWidgets.QLabel(self.verticalLayoutWidget_2)
self.label.setObjectName("label")
self.label.setFixedSize(641,481) #给显示视频的Label设置大小为641x481
self.verticalLayout_2.addWidget(self.label)
self.verticalLayoutWidget_2.setStyleSheet('''
QWidget#verticalLayoutWidget_2{
background:gray;
border-top:1px solid white;
border-bottom:1px solid white;
border-left:1px solid white;
border-top-left-radius:10px;
border-top-right-radius:10px;
border-bottom-right-radius:10px;
border-bottom-left-radius:10px; }
''')
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
self.label1.setText( "口罩检测系统")
self.pushButton1.setText("打开摄像头")
self.pushButton2.setText("开启口罩检测")
self.pushButton3.setText("关闭口罩检测")
self.label2.setText("其他功能")
self.pushButton4.setText("敬请期待")
self.pushButton5.setText("敬请期待")
self.label.setText("摄像头在开启的过程中会比较慢,请稍等,别乱点,以免卡死。")
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "Form"))
def setNoTittle(self,Form):
Form.setWindowFlags(Qt.FramelessWindowHint)
Form.setWindowOpacity(1) # 设置窗口透明度
Form.setAttribute(Qt.WA_TranslucentBackground) # 设置窗口背景透明
class MainUi(QtWidgets.QMainWindow):
def mouseMoveEvent(self, e: QMouseEvent): # 重写移动事件
if self._tracking:
self._endPos = e.pos() - self._startPos
self.move(self.pos() + self._endPos)
def mousePressEvent(self, e: QMouseEvent):
if e.button() == Qt.LeftButton:
self._startPos = QPoint(e.x(), e.y())
self._tracking = True
def mouseReleaseEvent(self, e: QMouseEvent):
if e.button() == Qt.LeftButton:
self._tracking = False
self._startPos = None
self._endPos = None
def closeEvent(self):
reply = QMessageBox.question(self, '提示信息', '确认退出吗?',
QMessageBox.Yes | QMessageBox.No, QMessageBox.No)
if reply == QMessageBox.Yes:
QCoreApplication.instance().quit()
else:
pass
class MyMainForm(QtWidgets.QWidget,Ui_Form):
def __init__(self,parent=None):
super().__init__(parent) #父类的构造函数
self.timer_camera = QtCore.QTimer() #定义定时器,用于控制显示视频的帧率
self.cap = cv2.VideoCapture() #视频流
self.CAM_NUM = 0 #为0时表示视频流来自笔记本内置摄像头
self.openkz = False
def setupUi(self,Form):
Ui_Form.setupUi(self,Form)
def slot_init(self):
self.pushButton1.clicked.connect(self.open_camera_clicked) #若该按键被点击,则调用button_open_camera_clicked()
self.timer_camera.timeout.connect(self.show_camera) #若定时器结束,则调用show_camera()
self.pushButton2.clicked.connect(self.openkouzhao)
self.pushButton3.clicked.connect(self.closekouzhao)
def openkouzhao(self):
self.openkz = True
def closekouzhao(self):
self.openkz = False
def open_camera_clicked(self):
self.label.setText("")
if self.timer_camera.isActive() == False: #若定时器未启动
flag = self.cap.open(self.CAM_NUM) #参数是0,表示打开笔记本的内置摄像头,参数是视频文件路径则打开视频
if flag == False: #flag表示open()成不成功
msg = QtWidgets.QMessageBox.warning(self,'warning',"请检查相机于电脑是否连接正确",buttons=QtWidgets.QMessageBox.Ok)
else:
self.timer_camera.start(30) #定时器开始计时30ms,结果是每过30ms从摄像头中取一帧显示
self.pushButton1.setText('关闭摄像头')
else:
self.timer_camera.stop() #关闭定时器
self.cap.release() #释放视频流
self.label.clear() #清空视频显示区域
self.pushButton1.setText('打开摄像头')
self.label.setText("摄像头在开启的过程中会比较慢,请稍等,别乱点,以免卡死。")
def show_camera(self):
flag,self.image = self.cap.read() #从视频流中读取
#
if self.openkz:
img_raw = cv2.cvtColor(self.image, cv2.COLOR_BGR2RGB)
tsf.inference(img_raw,
conf_thresh=0.5,
iou_thresh=0.5,
target_shape=(260, 260),
draw_result=True,
show_result=False)
show = img_raw[:, :, ::-1]
else:
show = cv2.resize(self.image,(640,480)) #把读到的帧的大小重新设置为 640x480
#
show = cv2.cvtColor(show,cv2.COLOR_BGR2RGB) #视频色彩转换回RGB,这样才是现实的颜色
showImage = QtGui.QImage(show.data,show.shape[1],show.shape[0],QtGui.QImage.Format_RGB888) #把读取到的视频数据变成QImage形式
self.label.setPixmap(QtGui.QPixmap.fromImage(showImage)) #往显示视频的Label里 显示QImage
if __name__ =='__main__':
app = QtWidgets.QApplication(sys.argv)
Form = MainUi()
ui = MyMainForm()
ui.setupUi(Form)
ui.setNoTittle(Form)
ui.slot_init()
Form.show()
sys.exit(app.exec_())
应该有人能够发现我的QT是用designer做的然后转的源码,没错,所以可以有更简单的写法,但是我懒,所以就不停的继承再继承。qt这里主要用了几个关键的方法,一个是重写移动函数、一个是关掉窗口自己设计缩小和关闭按钮、另一个就是样式。然后视频流传送到lable里面,这样就可以实现视频的打开和关闭。
开源的tensorflow_infer.py的代码,做了一丢丢修改,其实就是注释掉几行而已,然后把视频流作为比对的样品,经tsf.inference方法检验,返回结果,最后再赋值给label就完成了。
下面是的tensorflow_infer.py的代码(想要模型和源码的去下方地址下载)
# -*- coding:utf-8 -*-
import cv2
import time
import argparse
import numpy as np
from PIL import Image
from keras.models import model_from_json
from utils.anchor_generator import generate_anchors
from utils.anchor_decode import decode_bbox
from utils.nms import single_class_non_max_suppression
from load_model.tensorflow_loader import load_tf_model, tf_inference
sess, graph = load_tf_model('models/face_mask_detection.pb')
# anchor configuration
feature_map_sizes = [[33, 33], [17, 17], [9, 9], [5, 5], [3, 3]]
anchor_sizes = [[0.04, 0.056], [0.08, 0.11], [0.16, 0.22], [0.32, 0.45], [0.64, 0.72]]
anchor_ratios = [[1, 0.62, 0.42]] * 5
# generate anchors
anchors = generate_anchors(feature_map_sizes, anchor_sizes, anchor_ratios)
# for inference , the batch size is 1, the model output shape is [1, N, 4],
# so we expand dim for anchors to [1, anchor_num, 4]
anchors_exp = np.expand_dims(anchors, axis=0)
id2class = {0: 'YES', 1: 'NO'}
def inference(image,
conf_thresh=0.5,
iou_thresh=0.4,
target_shape=(160, 160),
draw_result=True,
show_result=True
):
'''
Main function of detection inference
:param image: 3D numpy array of image
:param conf_thresh: the min threshold of classification probabity.
:param iou_thresh: the IOU threshold of NMS
:param target_shape: the model input size.
:param draw_result: whether to daw bounding box to the image.
:param show_result: whether to display the image.
:return:
'''
# image = np.copy(image)
output_info = []
height, width, _ = image.shape
image_resized = cv2.resize(image, target_shape)
image_np = image_resized / 255.0 # 归一化到0~1
image_exp = np.expand_dims(image_np, axis=0)
y_bboxes_output, y_cls_output = tf_inference(sess, graph, image_exp)
# remove the batch dimension, for batch is always 1 for inference.
y_bboxes = decode_bbox(anchors_exp, y_bboxes_output)[0]
y_cls = y_cls_output[0]
# To speed up, do single class NMS, not multiple classes NMS.
bbox_max_scores = np.max(y_cls, axis=1)
bbox_max_score_classes = np.argmax(y_cls, axis=1)
# keep_idx is the alive bounding box after nms.
keep_idxs = single_class_non_max_suppression(y_bboxes,
bbox_max_scores,
conf_thresh=conf_thresh,
iou_thresh=iou_thresh,
)
for idx in keep_idxs:
conf = float(bbox_max_scores[idx])
class_id = bbox_max_score_classes[idx]
bbox = y_bboxes[idx]
# clip the coordinate, avoid the value exceed the image boundary.
xmin = max(0, int(bbox[0] * width))
ymin = max(0, int(bbox[1] * height))
xmax = min(int(bbox[2] * width), width)
ymax = min(int(bbox[3] * height), height)
if draw_result:
if class_id == 0:
color = (0, 255, 0)
else:
color = (255, 0, 0)
cv2.rectangle(image, (xmin, ymin), (xmax, ymax), color, 2)
cv2.putText(image, "%s: %.2f" % (id2class[class_id], conf), (xmin + 2, ymin - 2),
cv2.FONT_HERSHEY_SIMPLEX, 0.8, color)
output_info.append([class_id, conf, xmin, ymin, xmax, ymax])
if show_result:
Image.fromarray(image).show()
return output_info
def run_on_video(video_path, output_video_name, conf_thresh):
cap = cv2.VideoCapture(video_path)
height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
fps = cap.get(cv2.CAP_PROP_FPS)
fourcc = cv2.VideoWriter_fourcc(*'XVID')
# writer = cv2.VideoWriter(output_video_name, fourcc, int(fps), (int(width), int(height)))
total_frames = cap.get(cv2.CAP_PROP_FRAME_COUNT)
if not cap.isOpened():
raise ValueError("Video open failed.")
return
status = True
idx = 0
while status:
start_stamp = time.time()
status, img_raw = cap.read()
img_raw = cv2.cvtColor(img_raw, cv2.COLOR_BGR2RGB)
read_frame_stamp = time.time()
if (status):
inference(img_raw,
conf_thresh,
iou_thresh=0.5,
target_shape=(260, 260),
draw_result=True,
show_result=False)
cv2.imshow('image', img_raw[:, :, ::-1])
cv2.waitKey(1)
inference_stamp = time.time()
# writer.write(img_raw)
write_frame_stamp = time.time()
idx += 1
# print("%d of %d" % (idx, total_frames))
# print("read_frame:%f, infer time:%f, write time:%f" % (read_frame_stamp - start_stamp,
# inference_stamp - read_frame_stamp,
# write_frame_stamp - inference_stamp))
# writer.release()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Face Mask Detection")
parser.add_argument('--img-mode', type=int, default=0, help='set 1 to run on image, 0 to run on video.')
parser.add_argument('--img-path', type=str, help='path to your image.')
parser.add_argument('--video-path', type=str, default='0', help='path to your video, `0` means to use camera.')
# parser.add_argument('--hdf5', type=str, help='keras hdf5 file')
args = parser.parse_args()
if args.img_mode:
imgPath = args.img_path
img = cv2.imread(imgPath)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
inference(img, show_result=True, target_shape=(260, 260))
else:
video_path = args.video_path
if args.video_path == '0':
video_path = 0
run_on_video(video_path, '', conf_thresh=0.5)
需要tensorflow2.2 版本工程的朋友可以到网盘中拿:
链接:
https://pan.baidu.com/s/10IQ1uscONZOYkgdtjcx2pQ
提取码:g8g9
————————————————
毕竟是从别人那里拿的,所以就把【一颗小树x】的链接拿过来让大家下载,欢迎交流。