【OpenCV】Pyqt界面+摄像头实现人脸检测+安全帽识别,最终导出为exe文件

发布时间:2023-10-30 16:30

文章目录

  • 前言
  • 一、界面设计
  • 二、相关代码
  • 三、导出exe文件
  • 总结


前言

接上节的内容。设计好的界面如下:

\"【OpenCV】Pyqt界面+摄像头实现人脸检测+安全帽识别,最终导出为exe文件_第1张图片\"

实现的功能:
1.通过摄像头进行截图,把截图上传到OneNetAI服务器进行人脸检测。
2.通过打开图片文件,上传图片到OneNetAI服务器进行安全帽识别检测。


一、界面设计

参考我博客的另外一篇博客。【OpenCV】Pyqt5界面设计+USB摄像头

二、相关代码

文件结构如下:
\"【OpenCV】Pyqt界面+摄像头实现人脸检测+安全帽识别,最终导出为exe文件_第2张图片\"
demo.ui是界面设计文件,使用pyqt的designer.exe实现的。
demo.py是界面设计文件通过pyuic5.exe转化的python代码。
at.jpg是戴安全帽的一个测试图片。
myqtdemo.py是编写逻辑控制代码的文件。
upload.jpg是截图按钮截图以后保存的文件。

demo.py代码如下:


from PyQt5 import QtCore, QtGui, QtWidgets


class Ui_MainWindow(object):
    def setupUi(self, MainWindow):
        MainWindow.setObjectName(\"MainWindow\")
        MainWindow.resize(933, 727)
        self.centralwidget = QtWidgets.QWidget(MainWindow)
        self.centralwidget.setObjectName(\"centralwidget\")
        self.label_Camera = QtWidgets.QLabel(self.centralwidget)
        self.label_Camera.setGeometry(QtCore.QRect(60, 50, 200, 200))
        self.label_Camera.setObjectName(\"label_Camera\")
        self.Button_OpenCamera = QtWidgets.QPushButton(self.centralwidget)
        self.Button_OpenCamera.setGeometry(QtCore.QRect(120, 290, 100, 50))
        self.Button_OpenCamera.setObjectName(\"Button_OpenCamera\")
        self.label_Capture = QtWidgets.QLabel(self.centralwidget)
        self.label_Capture.setGeometry(QtCore.QRect(360, 50, 200, 200))
        self.label_Capture.setObjectName(\"label_Capture\")
        self.Button_Faces = QtWidgets.QPushButton(self.centralwidget)
        self.Button_Faces.setGeometry(QtCore.QRect(720, 290, 100, 50))
        self.Button_Faces.setObjectName(\"Button_Faces\")
        self.Button_Capture = QtWidgets.QPushButton(self.centralwidget)
        self.Button_Capture.setGeometry(QtCore.QRect(420, 290, 100, 50))
        self.Button_Capture.setObjectName(\"Button_Capture\")
        self.label_Result = QtWidgets.QLabel(self.centralwidget)
        self.label_Result.setGeometry(QtCore.QRect(660, 50, 200, 200))
        self.label_Result.setObjectName(\"label_Result\")
        self.Button_ReadImage = QtWidgets.QPushButton(self.centralwidget)
        self.Button_ReadImage.setGeometry(QtCore.QRect(110, 600, 100, 50))
        self.Button_ReadImage.setObjectName(\"Button_ReadImage\")
        self.label_ReadImage = QtWidgets.QLabel(self.centralwidget)
        self.label_ReadImage.setGeometry(QtCore.QRect(60, 380, 200, 200))
        self.label_ReadImage.setObjectName(\"label_ReadImage\")
        self.Button_hat = QtWidgets.QPushButton(self.centralwidget)
        self.Button_hat.setGeometry(QtCore.QRect(400, 600, 100, 50))
        self.Button_hat.setObjectName(\"Button_hat\")
        self.label_HatResult = QtWidgets.QLabel(self.centralwidget)
        self.label_HatResult.setGeometry(QtCore.QRect(350, 380, 200, 200))
        self.label_HatResult.setObjectName(\"label_HatResult\")
        MainWindow.setCentralWidget(self.centralwidget)
        self.menubar = QtWidgets.QMenuBar(MainWindow)
        self.menubar.setGeometry(QtCore.QRect(0, 0, 933, 23))
        self.menubar.setObjectName(\"menubar\")
        MainWindow.setMenuBar(self.menubar)
        self.statusbar = QtWidgets.QStatusBar(MainWindow)
        self.statusbar.setObjectName(\"statusbar\")
        MainWindow.setStatusBar(self.statusbar)

        self.retranslateUi(MainWindow)
        self.Button_OpenCamera.clicked.connect(MainWindow.Button_OpenCamera_Clicked)
        self.Button_Capture.clicked.connect(MainWindow.Button_Capture_Clicked)
        self.Button_ReadImage.clicked.connect(MainWindow.Button_ReadImage_Clicked)
        self.Button_Faces.clicked.connect(MainWindow.Button_faces_Clicked)
        self.Button_hat.clicked.connect(MainWindow.Button_hat_Clicked)
        QtCore.QMetaObject.connectSlotsByName(MainWindow)

    def retranslateUi(self, MainWindow):
        _translate = QtCore.QCoreApplication.translate
        MainWindow.setWindowTitle(_translate(\"MainWindow\", \"MainWindow\"))
        self.label_Camera.setText(_translate(\"MainWindow\", \"摄像头图片显示\"))
        self.Button_OpenCamera.setText(_translate(\"MainWindow\", \"打开摄像头\"))
        self.label_Capture.setText(_translate(\"MainWindow\", \"截图结果显示\"))
        self.Button_Faces.setText(_translate(\"MainWindow\", \"人脸检测\"))
        self.Button_Capture.setText(_translate(\"MainWindow\", \"截图\"))
        self.label_Result.setText(_translate(\"MainWindow\", \"上传结果显示\"))
        self.Button_ReadImage.setText(_translate(\"MainWindow\", \"打开图片\"))
        self.label_ReadImage.setText(_translate(\"MainWindow\", \"打开图片显示\"))
        self.Button_hat.setText(_translate(\"MainWindow\", \"安全帽识别\"))
        self.label_HatResult.setText(_translate(\"MainWindow\", \"打开图片显示\"))

myqtdemo.py代码如下:

import sys
import cv2
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import QFileDialog, QMainWindow
from demo import Ui_MainWindow

import requests
import json
import base64


class PyQtMainEntry(QMainWindow, Ui_MainWindow):
    def __init__(self):
        super().__init__()
        self.setupUi(self)

        self.camera = cv2.VideoCapture(0)
        self.is_camera_opened = False  # 摄像头有没有打开标记

        # 定时器:30ms捕获一帧
        self._timer = QtCore.QTimer(self)
        self._timer.timeout.connect(self._queryFrame)
        self._timer.setInterval(30)

    def Button_OpenCamera_Clicked(self):   #打开和关闭摄像头
        self.is_camera_opened = ~self.is_camera_opened
        if self.is_camera_opened:
            self.Button_OpenCamera.setText(\"关闭摄像头\")
            self._timer.start()
        else:
            self.Button_OpenCamera.setText(\"打开摄像头\")
            self._timer.stop()

    def Button_Capture_Clicked(self):#        捕获图片

        # 摄像头未打开,不执行任何操作
        if not self.is_camera_opened:
            return

        self.captured = self.frame
        # ===================先截图保存起来====================================
        self.captured = cv2.cvtColor(self.captured, cv2.COLOR_BGR2RGB)
        cv2.imwrite(\'upload.jpg\', self.captured)
        # =====================================================================
        self.captured = cv2.cvtColor(self.captured, cv2.COLOR_BGR2RGB)
        rows, cols, channels = self.captured.shape
        bytesPerLine = channels * cols
        # Qt显示图片时,需要先转换成QImgage类型
        QImg = QImage(self.captured.data, cols, rows, bytesPerLine, QImage.Format_RGB888)
        self.label_Capture.setPixmap(QPixmap.fromImage(QImg).scaled(self.label_Capture.size(), Qt.KeepAspectRatio, Qt.SmoothTransformation))


    def Button_faces_Clicked(self):# 上传数据到OneNetAI平台  人脸检测
        # 如果没有捕获图片,则不执行操作
        if not hasattr(self, \"captured\"):
            return
        # self.cpatured = cv2.cvtColor(self.captured, cv2.COLOR_RGB2GRAY)

        url = \'http://ai.heclouds.com:9090/v1/aiApi/picture/FACE_RECO\'
        headers = {\'Content-Type\': \'application/json\',
                   \'token\': \'eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJpZCI6ImQ1N2NlYmIwMWZkYTRkZjM4N2EzM2IwMWQ5OTFlY2Y4IiwiZXhwIjoxNjUxMTM4MTU4LCJ2YWx1ZSI6IntcImFpS2V5XCI6XCI0MWU3YjQ3MTA4ZWM0YjZkYWRmMGE1MTNiNDhlYmQwM1wiLFwiYXBwSWRcIjpcIjkxNjMwNTY0NzgzMjY2MjAxNlwiLFwicHVycG9zZVwiOlwiYXBpXCIsXCJzZWNyZXRLZXlcIjpcImI2NWQ1NDk1Zjk0MjRiNjE5YWI5NmEyYTg4NDEyZTljXCIsXCJ1c2VySWRcIjpcIjAxMHUwMDE1NDg0MjM5MTY4NTg2NzY0XCJ9In0.zIArmy1JaMeM2WeclSDp_G2SgdoBb0T16oznRKwIQvQ\'}
        file = open(\'upload.jpg\', \'rb\')  # 打开图片文件
        base64Str = base64.b64encode(file.read()).decode()  # 将其转为base64信息
        file.close()  # 关闭打开的文件
        data = {\'picture\': [base64Str]}  # 构造接口调用参数
        response = requests.request(\"POST\", url, headers=headers, data=json.dumps(data))  # POST 方式调用
        strdata = response.text  # 打印结果
        print(strdata)
        mydata = json.loads(strdata)  # 字符串转字典
        print(mydata[\"data\"][0][\'confidence\']) #字典下data列表的第1个confidence值
        pointx = int(mydata[\"data\"][0][\'box\'][\'x\'])
        pointy = int(mydata[\"data\"][0][\'box\'][\'y\'])
        point_width = pointx + int(mydata[\"data\"][0][\'box\'][\'width\'])
        point_height = pointy + int(mydata[\"data\"][0][\'box\'][\'height\'])

        self.captured = cv2.imread(\'upload.jpg\')
        self.captured = cv2.cvtColor(self.captured, cv2.COLOR_BGR2RGB)
        self.captured = cv2.rectangle(self.captured, (pointx, pointy), (point_width, point_height), (0, 255, 0), 4)

        rows, cols, channels = self.captured.shape
        print(self.captured.shape)  # 480*640
        bytesPerLine = channels * cols
        QImg = QImage(self.captured.data, cols, rows, bytesPerLine, QImage.Format_RGB888)
        self.label_Result.setPixmap(QPixmap.fromImage(QImg).scaled(self.label_Result.size(), Qt.KeepAspectRatio, Qt.SmoothTransformation))


    def Button_ReadImage_Clicked(self):  # 从本地读取图片 文件路径不能有中文
        # 打开文件选取对话框
        filename, _ = QFileDialog.getOpenFileName(self, \'打开图片\')
        self.myfilename = str(filename)
        if filename:
            self.captured = cv2.imread(str(filename))
            # OpenCV图像以BGR通道存储,显示时需要从BGR转到RGB
            self.captured = cv2.cvtColor(self.captured, cv2.COLOR_BGR2RGB)

            rows, cols, channels = self.captured.shape
            bytesPerLine = channels * cols
            QImg = QImage(self.captured.data, cols, rows, bytesPerLine, QImage.Format_RGB888)
            self.label_ReadImage.setPixmap(QPixmap.fromImage(QImg).scaled(self.label_ReadImage.size(), Qt.KeepAspectRatio,Qt.SmoothTransformation))

    def Button_hat_Clicked(self):# 上传数据到OneNetAI平台  安全帽检测
        # if not hasattr(self, \"captured\"):
        #     return

        url = \'http://ai.heclouds.com:9090/v1/aiApi/picture/HAT_RECO\'
        headers = {\'Content-Type\': \'application/json\',
                   \'token\': \'eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJpZCI6ImQ1N2NlYmIwMWZkYTRkZjM4N2EzM2IwMWQ5OTFlY2Y4IiwiZXhwIjoxNjUxMTM4MTU4LCJ2YWx1ZSI6IntcImFpS2V5XCI6XCI0MWU3YjQ3MTA4ZWM0YjZkYWRmMGE1MTNiNDhlYmQwM1wiLFwiYXBwSWRcIjpcIjkxNjMwNTY0NzgzMjY2MjAxNlwiLFwicHVycG9zZVwiOlwiYXBpXCIsXCJzZWNyZXRLZXlcIjpcImI2NWQ1NDk1Zjk0MjRiNjE5YWI5NmEyYTg4NDEyZTljXCIsXCJ1c2VySWRcIjpcIjAxMHUwMDE1NDg0MjM5MTY4NTg2NzY0XCJ9In0.zIArmy1JaMeM2WeclSDp_G2SgdoBb0T16oznRKwIQvQ\'}

        file = open(self.myfilename, \'rb\')  # 打开图片文件
        base64Str = base64.b64encode(file.read()).decode()  # 将其转为base64信息
        file.close()  # 关闭打开的文件
        data = {\'picture\': [base64Str]}  # 构造接口调用参数
        response = requests.request(\"POST\", url, headers=headers, data=json.dumps(data))  # POST 方式调用
        # if response:
        #     print(response.json())
        #     json_dict = response.json()
        #     print(json_dict)
        #     guid = json_dict.get(\"data\")
        #     print(guid[0][\'confidence\'])

        strdata=response.text  # 打印结果
        print(strdata)
        mydata = json.loads(strdata)#字符串转字典
        print(mydata[\"data\"][0][\'confidence\'])
        pointx=int(mydata[\"data\"][0][\'box\'][\'x\'])
        pointy=int(mydata[\"data\"][0][\'box\'][\'y\'])
        point_width=pointx+int(mydata[\"data\"][0][\'box\'][\'width\'])
        point_height=pointy+int(mydata[\"data\"][0][\'box\'][\'height\'])

        self.captured = cv2.imread(self.myfilename)
        self.captured = cv2.cvtColor(self.captured, cv2.COLOR_BGR2RGB)
        self.captured=cv2.rectangle(self.captured, (pointx,pointy), (point_width,point_height),(0, 255, 0), 4)

        rows, cols, channels = self.captured.shape
        print(self.captured.shape) # 480*640
        bytesPerLine = channels * cols
        QImg = QImage(self.captured.data, cols, rows, bytesPerLine, QImage.Format_RGB888)
        self.label_HatResult.setPixmap(QPixmap.fromImage(QImg).scaled(self.label_HatResult.size(), Qt.KeepAspectRatio, Qt.SmoothTransformation))


    @QtCore.pyqtSlot()
    def _queryFrame(self):   #循环捕获图片
        ret, self.frame = self.camera.read()
        img_rows, img_cols, channels = self.frame.shape
        bytesPerLine = channels * img_cols

        cv2.cvtColor(self.frame, cv2.COLOR_BGR2RGB, self.frame)
        QImg = QImage(self.frame.data, img_cols, img_rows, bytesPerLine, QImage.Format_RGB888)
        self.label_Camera.setPixmap(QPixmap.fromImage(QImg).scaled(self.label_Camera.size(), Qt.KeepAspectRatio, Qt.SmoothTransformation))


if __name__ == \"__main__\":
    app = QtWidgets.QApplication(sys.argv)
    window = PyQtMainEntry()
    window.show()
    sys.exit(app.exec_())

三、导出exe文件

1.pyinstaller是将Python脚本打包成可执行程序的一个包,必须先安装一下。在Anaconda Prompt环境下输入pip install pyinstaller
\"【OpenCV】Pyqt界面+摄像头实现人脸检测+安全帽识别,最终导出为exe文件_第3张图片\"

2.进入当前项目的目录,在途径栏位置输入cmd。
\"【OpenCV】Pyqt界面+摄像头实现人脸检测+安全帽识别,最终导出为exe文件_第4张图片\"
3.命令行界面出现后,输入  pyinstaller -F -w myqtdemo.py myqtdemo.py是你的py文件名称,注意,你的文件名称不一定和我的一样。
如果想加个图标,则输入pyinstaller -F --icon=my.ico yourfile.py
4.最终会在当前项目目录下生成build和dist文件夹,这一步得耐心等待,时间比较久。打开dist文件夹,内部即是exe程序。
\"【OpenCV】Pyqt界面+摄像头实现人脸检测+安全帽识别,最终导出为exe文件_第5张图片\"


总结

视频动态识别的话,受限于网络到服务器传输的速度,需要使用深度学习的方法实现。

ItVuer - 免责声明 - 关于我们 - 联系我们

本网站信息来源于互联网,如有侵权请联系:561261067@qq.com

桂ICP备16001015号