Yolov8可视化界面使用说明,含代码

发布于:2024-06-27 ⋅ 阅读:(15) ⋅ 点赞:(0)


⭐⭐ YOLOv8改进专栏|包含主干、模块、注意力机制、检测头等前沿创新 ​ ⭐⭐


YOLOv8可视化界面如下

        使用需要安装opencv-python、torch、numpy及PySide6(python版本>=3.9)

pip install PySide6
pip install numpy
pip install opencv-python

 使用说明

        运行下方代码,会出现如图所示界面,选择视频文件,左边即可播放视频。选择摄像头。左侧开始实时展示摄像头画面(需要电脑含有摄像头),选择模型即开始检测(可先选择模型),第一次检测会加载模型,会有一点卡顿。

import os
import cv2
import torch
import numpy as np

from PySide6.QtGui import QIcon
from PySide6 import QtWidgets, QtCore, QtGui

from ultralytics import YOLO


class MyWindow(QtWidgets.QMainWindow):
    def __init__(self):
        super().__init__()

        self.init_gui()
        self.model = None
        self.timer = QtCore.QTimer()
        self.timer1 = QtCore.QTimer()
        self.cap = None
        self.video = None
        self.timer.timeout.connect(self.camera_show)
        self.timer1.timeout.connect(self.video_show)

    def init_gui(self):
        self.setFixedSize(960, 440)
        self.setWindowTitle('Bilibili:秋芒时不知')
        self.setWindowIcon(QIcon("🅱️ "))

        centralWidget = QtWidgets.QWidget(self)
        self.setCentralWidget(centralWidget)

        mainLayout = QtWidgets.QVBoxLayout(centralWidget)

        topLayout = QtWidgets.QHBoxLayout()
        self.oriVideoLabel = QtWidgets.QLabel(self)
        self.detectlabel = QtWidgets.QLabel(self)
        self.oriVideoLabel.setMinimumSize(448, 336)
        self.detectlabel.setMinimumSize(448, 336)
        self.oriVideoLabel.setStyleSheet('border:1px solid #D7E2F9;')
        self.detectlabel.setStyleSheet('border:1px solid #D7E2F9;')
        # 960 540  1920 960

        topLayout.addWidget(self.oriVideoLabel)
        topLayout.addWidget(self.detectlabel)

        mainLayout.addLayout(topLayout)

        # 界面下半部分: 输出框 和 按钮
        groupBox = QtWidgets.QGroupBox(self)

        bottomLayout = QtWidgets.QVBoxLayout(groupBox)

        mainLayout.addWidget(groupBox)

        btnLayout = QtWidgets.QHBoxLayout()
        self.selectModel = QtWidgets.QPushButton('📂选择模型')
        self.selectModel.setFixedSize(100, 50)
        self.selectModel.clicked.connect(self.load_model)
        self.openVideoBtn = QtWidgets.QPushButton('🎞️视频文件')
        self.openVideoBtn.setFixedSize(100, 50)
        self.openVideoBtn.clicked.connect(self.start_video)
        self.openVideoBtn.setEnabled(False)
        self.openCamBtn = QtWidgets.QPushButton('📹摄像头')
        self.openCamBtn.setFixedSize(100, 50)
        self.openCamBtn.clicked.connect(self.start_camera)
        self.stopDetectBtn = QtWidgets.QPushButton('🛑停止')
        self.stopDetectBtn.setFixedSize(100, 50)
        self.stopDetectBtn.setEnabled(False)
        self.stopDetectBtn.clicked.connect(self.stop_detect)
        self.exitBtn = QtWidgets.QPushButton('⏹退出')
        self.exitBtn.setFixedSize(100, 50)
        self.exitBtn.clicked.connect(self.close)
        btnLayout.addWidget(self.selectModel)
        btnLayout.addWidget(self.openVideoBtn)
        btnLayout.addWidget(self.openCamBtn)
        btnLayout.addWidget(self.stopDetectBtn)
        btnLayout.addWidget(self.exitBtn)
        bottomLayout.addLayout(btnLayout)

    def start_camera(self):
        self.timer1.stop()
        if self.cap is None:
            self.cap = cv2.VideoCapture(0, cv2.CAP_DSHOW)
        if self.cap.isOpened():
            # exit()
            self.timer.start(50)
            pass
        self.stopDetectBtn.setEnabled(True)

    def camera_show(self):
        ret, frame = self.cap.read()
        if ret:
            if self.model is not None:
                frame = cv2.resize(frame, (448, 352))
                frame1 = self.model(frame, imgsz=[448, 352], device='cuda') if torch.cuda.is_available() \
                    else self.model(frame, imgsz=[448, 352], device='cpu')
                frame1 = cv2.cvtColor(frame1[0].plot(), cv2.COLOR_RGB2BGR)
                frame1 = QtGui.QImage(frame1.data, frame1.shape[1], frame.shape[0], QtGui.QImage.Format_RGB888)
                self.detectlabel.setPixmap(QtGui.QPixmap.fromImage(frame1))
            frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            frame = QtGui.QImage(frame.data, frame.shape[1], frame.shape[0], QtGui.QImage.Format_RGB888)
            self.oriVideoLabel.setPixmap(QtGui.QPixmap.fromImage(frame))
            self.oriVideoLabel.setScaledContents(True)
        else:
            pass

    def start_video(self):
        if self.timer.isActive():
            self.timer.stop()
        fileName, fileType = QtWidgets.QFileDialog.getOpenFileName(self, "选取视频文件", filter='*.mp4')
        if os.path.isfile(fileName):
            # capture = cv2.VideoCapture(fileName)
            # frame_count = int(capture.get(cv2.CAP_PROP_FRAME_COUNT))
            self.video = cv2.VideoCapture(fileName)
            fps = self.video.get(cv2.CAP_PROP_FPS)
            self.timer1.start(int(1/fps))
        else:
            print("Reselect video")

    def video_show(self):
        ret, frame = self.video.read()
        if ret:
            if self.model is not None:
                frame = cv2.resize(frame, (448, 352))
                frame1 = self.model(frame, imgsz=[448, 352], device='cuda') if torch.cuda.is_available() \
                    else self.model(frame, imgsz=[448, 352], device='cpu')
                frame1 = cv2.cvtColor(frame1[0].plot(), cv2.COLOR_RGB2BGR)
                frame1 = QtGui.QImage(frame1.data, frame1.shape[1], frame.shape[0], QtGui.QImage.Format_RGB888)
                self.detectlabel.setPixmap(QtGui.QPixmap.fromImage(frame1))
            frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            frame = QtGui.QImage(frame.data, frame.shape[1], frame.shape[0], QtGui.QImage.Format_RGB888)
            self.oriVideoLabel.setPixmap(QtGui.QPixmap.fromImage(frame))
            self.oriVideoLabel.setScaledContents(True)
        else:
            self.timer1.stop()
            img = cv2.cvtColor(np.zeros((500, 500), np.uint8), cv2.COLOR_BGR2RGB)
            img = QtGui.QImage(img.data, img.shape[1], img.shape[0], QtGui.QImage.Format_RGB888)
            self.oriVideoLabel.setPixmap(QtGui.QPixmap.fromImage(img))
            self.detectlabel.setPixmap(QtGui.QPixmap.fromImage(img))
            self.video.release()
            self.video = None

    def load_model(self):
        fileName, fileType = QtWidgets.QFileDialog.getOpenFileName(self, "选取模型权重", filter='*.pt')
        if fileName.endswith('.pt'):
            self.model = YOLO(fileName)
        else:
            print("Reselect model")

        self.openVideoBtn.setEnabled(True)
        self.stopDetectBtn.setEnabled(True)

    def stop_detect(self):
        if self.timer.isActive():
            self.timer.stop()
        if self.timer1.isActive():
            self.timer1.stop()
        if self.cap is not None:
            self.cap.release()
            self.cap = None
        self.video = None
        img = cv2.cvtColor(np.zeros((500, 500), np.uint8), cv2.COLOR_BGR2RGB)
        img = QtGui.QImage(img.data, img.shape[1], img.shape[0], QtGui.QImage.Format_RGB888)
        self.oriVideoLabel.setPixmap(QtGui.QPixmap.fromImage(img))
        self.detectlabel.setPixmap(QtGui.QPixmap.fromImage(img))

    def close(self):
        if self.cap is not None:
            self.cap.release()
            self.cap = None
        if self.timer.isActive():
            self.timer.stop()
        exit()


if __name__ == '__main__':
    app = QtWidgets.QApplication()
    window = MyWindow()
    window.show()
    app.exec()

效果展示