需求:在目标检测时,我们要求前端能够将后端检测的结果实时渲染在图片或者视频上。图片是静态的,只需要渲染一次;而视频是动态的,播放时需要根据帧数来实时渲染标注框,可以想象视频就是由一张张图片播放的效果。
1.前端技术:sse、canvas
2.模型:yolov11
效果图
图片检测:
视频检测:
步骤1:预览图片和视频,canvas绘制标注框
<div class="image-list">
<div v-if="currentMedia" class="btn" @click="prevPage">
<kp-icon name="LeftOutlined" style="font-size: 30px"></kp-icon>
</div>
<div v-if="currentMedia" class="media">
<div class="workspace">
<!--图片预览-->
<div v-if="isImage" class="media-container">
<img ref="imgPreviewRef" style="height: 100%; overflow: scroll" alt="" />
<canvas id="imgCanvasOverlay"></canvas>
</div>
<!--视频预览-->
<div v-else class="media-container">
<video id="videoElement" controls height="90%"></video>
<canvas id="canvasOverlay"></canvas>
</div>
</div>
</div>
<empty v-else title="暂无图像或视频" />
<div v-if="currentMedia" class="btn" @click="nextPage">
<kp-icon name="RightOutlined" style="font-size: 30px"></kp-icon>
</div>
</div>
步骤2:前端选择sse,在页面加载时与后端建立连接
onMounted(() => {
clientId.value = Math.random().toString(36).substring(2, 15)
let sseUrl = `${shipDetectUrl}/sse/${clientId.value}`
sseConnection.value = new EventSource(sseUrl)
sseConnection.value.addEventListener('message', event => {
try {
let data = JSON.parse(event.data)
if (data && data.filename === currentMedia.value?.fileName) {
// 情况一:图片
if (!data['is_video']) {
// 缩放比例
let scale = data.height / imgPreviewRef.value.getBoundingClientRect().height
data.data.map((item, index) => {
imgAnnotation.value.push([
item.det_res[0] / scale,
item.det_res[1] / scale,
(item.det_res[2] - item.det_res[0]) / scale,
(item.det_res[3] - item.det_res[1]) / scale,
item.det_res[4],
item.det_res[5],
item.cls_res,
index,
])
})
drawImgAnnotations()
} else if (data['is_video']) {
// 情况二:视频
if (videoCheckState.value === 0) {
videoCheckState.value = 1
} else if (data['video_end']) {
videoCheckState.value = 2
}
frameRate.value = data.fps
// 缩放比例
let scale = data.height / video.value.getBoundingClientRect().height
let annotationData = data.data.map((item, index) => {
return [
item.det_res[0] / scale,
item.det_res[1] / scale,
(item.det_res[2] - item.det_res[0]) / scale,
(item.det_res[3] - item.det_res[1]) / scale,
item.det_res[4],
item.det_res[5],
item.cls_res,
index,
]
})
annotations.set(data.frame, annotationData)
}
} else if (data['batch_end']) {
batchLoading.value = false
// 情况三:批量检测
checkState.value = 2
message.success('已完成批量检测!')
if (!isImage.value && video.value.paused) video.value.play()
handleDetection()
}
} catch (error) {
console.error('Error parsing SSE data:', error)
}
})
sseConnection.value.addEventListener('error', error => {
console.error('SSE connection error:', error)
})
})
步骤3:图片或者视频加载完成,立即调用检测接口,检测结果在sse连接中返回
// 预览图像
function previewImg() {
nextTick(() => {
imageCanvas.value = document.getElementById('imgCanvasOverlay')
if (imageCanvas.value) {
imageCanvasCtx.value = imageCanvas.value.getContext('2d')
}
imgPreviewRef.value.src = `/_api/detectionDataset/preview?filePath=${currentMedia.value.filePath.replaceAll('\\', '/')}`
imgPreviewRef.value.onload = () => {
imgAnnotation.value = []
clickedBoxId.value = null
imageCanvas.value.width = imgPreviewRef.value.width
imageCanvas.value.height = imgPreviewRef.value.height
handleDetection()
}
imgPreviewRef.value.addEventListener('click', handleCanvasClick)
})
}
const isFirst = ref(false)
const isRePlaying = ref(false) // 是否暂停后重新开始播放
// 预览视频
function previewVideo() {
annotations.clear()
nextTick(() => {
video.value = document.getElementById('videoElement')
videoCanvas.value = document.getElementById('canvasOverlay')
if (videoCanvas.value) {
videoCanvasCtx.value = videoCanvas.value.getContext('2d')
}
if (video.value && currentMedia.value) {
video.value.src = currentMedia.value.src
video.value.addEventListener('canplay', () => {
if (!isFirst.value && video.value.currentTime == 0) {
handleDetection()
video.value.playbackRate = 0.5
video.value.play()
animationLoop() // 开始绘制循环
isFirst.value = true
}
})
video.value.addEventListener('play', () => {
console.log('play开始播放')
isFirst.value = false
clickedBoxId.value = null
})
video.value.addEventListener('pause', () => {
if (!isRePlaying.value) {
console.log('pause暂停播放')
stopVideoDetection()
}
isRePlaying.value = true
})
video.value.addEventListener('playing', () => {
console.log('暂停后重新开始播放')
if (video.value.currentTime !== 0 && isRePlaying.value) {
handleDetection()
}
isRePlaying.value = false
})
video.value.addEventListener('loadedmetadata', resizeCanvas)
window.addEventListener('resize', resizeCanvas)
video.value.addEventListener('click', handleCanvasClick)
}
})
}
// 调用检测接口
function handleDetection() {
let param = {
source: currentMedia.value.filePath,
model: modelType.value, // 模型
client_id: clientId.value,
}
if (!isImage.value) {
param.frame = Math.floor(video.value.currentTime * frameRate.value)
}
localImageVideo.startDetect(param).then(res => {
taskId.value = res.data.task_id
})
}
步骤4:图片绘制一次标注框;视频则按帧绘制标注框,因为返回结果是以帧数为单位(帧数 = 当前时间 * 帧率)
// 绘制图片的标注框
function drawImgAnnotations() {
if (imageCanvasCtx.value)
imageCanvasCtx.value.clearRect(0, 0, imageCanvas.value.width, imageCanvas.value.height)
detectionData.value = []
imgAnnotation.value.forEach(item => {
const [x1, y1, width, height, level, type, cls_res, id] = item
detectionData.value.push({
type: type,
level: level,
children: cls_res,
id,
})
detectionResultRef.value.getData(detectionData.value)
// 绘制矩形
imageCanvasCtx.value.beginPath()
imageCanvasCtx.value.strokeStyle = clickedBoxId.value === id ? 'red' : 'yellow'
imageCanvasCtx.value.lineWidth = 2
imageCanvasCtx.value.strokeRect(x1, y1, width, height)
// 绘制 level 标签
imageCanvasCtx.value.font = '20px Arial'
imageCanvasCtx.value.fillStyle = clickedBoxId.value === id ? 'red' : 'yellow'
const labelText = type.toString()
const labelX = x1
const labelY = y1 - 5 // 调整标签的垂直位置,使其位于矩形上方
imageCanvasCtx.value.fillText(labelText, labelX, labelY)
})
}
// 绘制视频的标注框
function drawAnnotations() {
if (video.value) {
videoCanvasCtx.value?.clearRect(0, 0, videoCanvas.value.width, videoCanvas.value.height)
if (video.value.paused) return
const currentFrame = Math.round(video.value.currentTime * frameRate.value)
let boxes = []
// 查找当前时间对应的标注
boxes = annotations.get(currentFrame)
if (currentFrame === 0) {
boxes = annotations.get(1)
} else if (currentFrame > annotations.size) {
boxes = annotations.get(annotations.size)
}
detectionData.value = []
if (boxes?.length === 0) {
videoCanvasCtx.value.clearRect(0, 0, videoCanvas.value.width, videoCanvas.value.height)
return
}
// 绘制每个框
boxes?.forEach(box => {
// 解构坐标(归一化值)
const [x1, y1, width, height, level, type, cls_res, id] = box
detectionData.value.push({
type: type,
level: level,
children: cls_res,
id,
})
detectionResultRef.value.getData(detectionData.value)
// 绘制矩形
videoCanvasCtx.value.beginPath()
videoCanvasCtx.value.strokeStyle = clickedBoxId.value === id ? 'red' : 'yellow'
videoCanvasCtx.value.lineWidth = 2
videoCanvasCtx.value.strokeRect(x1, y1, width, height)
// 绘制 level 标签
videoCanvasCtx.value.font = '20px Arial'
videoCanvasCtx.value.fillStyle = clickedBoxId.value === id ? 'red' : 'yellow'
const labelText = type.toString()
const labelX = x1
const labelY = y1 - 5 // 调整标签的垂直位置,使其位于矩形上方
videoCanvasCtx.value.fillText(labelText, labelX, labelY)
})
}
}
// 视频高精度帧监听
function animationLoop() {
drawAnnotations()
requestAnimationFrame(animationLoop)
}
步骤5:点击标注框可查看目标细节
<!--展示目标细节的容器-->
<div class="detail">
<div class="detail-title">细节</div>
<div class="magnifier-glass">
<img id="croppedPreview" />
</div>
</div>
<style lang="scss" scoped>
.detail {
width: 300px;
background: #fff;
padding: 10px;
.magnifier-glass {
padding: 10px 0;
width: calc(100% - 20px);
overflow: scroll;
#croppedPreview {
object-fit: contain;
border: 2px solid #fff;
}
}
}
</style>
// 点击画布上的标注框
function handleCanvasClick(event) {
// 获取点击坐标
let rect = isImage.value
? imageCanvas.value.getBoundingClientRect()
: videoCanvas.value.getBoundingClientRect()
let clickedBox = null
const clickX = event.clientX - rect.left
const clickY = event.clientY - rect.top
// 图片
if (isImage.value) {
// 查找被点击的框
clickedBox = imgAnnotation.value.find(box => {
const [x1, y1, width, height, level, type, cls_res] = box
return clickX >= x1 && clickX <= x1 + width && clickY >= y1 && clickY <= y1 + height
})
} else {
// 视频
// 获取当前帧数据
const currentFrame = Math.floor(video.value.currentTime * frameRate.value)
let boxes = []
// 查找当前时间对应的标注
boxes = annotations.get(currentFrame)
if (currentFrame === 0) {
boxes = annotations.get(1)
} else if (currentFrame > annotations.size) {
boxes = annotations.get(annotations.size)
}
if (!boxes) return
// 查找被点击的框
clickedBox = boxes.find(box => {
const [x1, y1, width, height, level, type, cls_res] = box
return clickX >= x1 && clickX <= x1 + width && clickY >= y1 && clickY <= y1 + height
})
}
if (clickedBox) {
event.preventDefault()
detectionResultRef.value.selectResult(clickedBox)
captureBoxArea(clickedBox)
clickedBoxId.value = clickedBox[clickedBox.length - 1]
// 重新绘制标注框
if (isImage.value) {
drawImgAnnotations()
} else {
drawAnnotations()
}
}
}
// 绘制被点击的目标
function captureBoxArea(box) {
// 创建临时canvas
const tempCanvas = document.createElement('canvas')
const tempCtx = tempCanvas.getContext('2d')
// 设置临时canvas尺寸为实际视频尺寸
let dom = isImage.value ? imgPreviewRef.value : video.value
const domWidth = dom.getBoundingClientRect().width
const domHeight = dom.getBoundingClientRect().height
tempCanvas.width = domWidth
tempCanvas.height = domHeight
// 绘制当前视频帧
tempCtx.drawImage(dom, 0, 0, domWidth, domHeight)
// 计算实际像素坐标
const [x1, y1, width, height, level, type, cls_res] = box
// 截取区域
const imageData = tempCtx.getImageData(x1, y1, width, height)
// 创建新canvas处理图像
const croppedCanvas = document.createElement('canvas')
croppedCanvas.width = width
croppedCanvas.height = height
croppedCanvas.getContext('2d').putImageData(imageData, 0, 0)
// 显示预览
const croppedPreview = document.getElementById('croppedPreview')
croppedPreview.src = croppedCanvas.toDataURL()
croppedPreview.style.display = 'block'
}
完整代码:
<template>
<div class="analysis">
<div class="analysis-top">
<div class="preview-wrap">
<div class="top-btns">
<div v-if="checkState !== 1" class="left-btn">
<a-button type="primary" @click="handleDataset">选择数据集</a-button>
<a-button type="primary" style="margin-left: 10px" @click="handleUpload"
>本地上传</a-button
>
</div>
<div v-if="currentMedia" class="name">{{ currentMedia.fileName }}</div>
<div v-if="currentMedia" class="right-btn">
<!--数量-->
<span class="num">{{ activeMediaIndex + 1 }} / {{ mediaList.length }}</span>
<span class="refresh" @click="changeLoad(datasetId)">
<kp-icon name="icon_shuaxin"></kp-icon>
</span>
<span>
模型类型:
<a-select
v-model:value="modelType"
:options="modelTypeOptions"
style="width: 80px"
@change="reset"
>
</a-select>
</span>
<!--检测按钮-->
<!-- <a-button class="btn" @click="handleDetection">检测</a-button> -->
<a-button
v-if="!isImage && videoCheckState === 1"
class="btn"
@click="stopVideoDetection"
>暂停检测</a-button
>
<a-button :loading="batchLoading" class="btn" @click="handleBatchDetection"
>批量检测</a-button
>
</div>
</div>
<div class="image-list">
<div v-if="currentMedia" class="btn" @click="prevPage">
<kp-icon name="LeftOutlined" style="font-size: 30px"></kp-icon>
</div>
<div v-if="currentMedia" class="media">
<div class="workspace">
<div v-if="isImage" class="media-container">
<img ref="imgPreviewRef" style="height: 100%; overflow: scroll" alt="" />
<canvas id="imgCanvasOverlay"></canvas>
</div>
<!--视频预览-->
<div v-else class="media-container">
<video id="videoElement" controls height="90%"></video>
<canvas id="canvasOverlay"></canvas>
</div>
</div>
</div>
<empty v-else title="暂无图像或视频" />
<div v-if="currentMedia" class="btn" @click="nextPage">
<kp-icon name="RightOutlined" style="font-size: 30px"></kp-icon>
</div>
</div>
</div>
</div>
<div class="analysis-bottom">
<div class="result">
<detection-result
ref="detectionResultRef"
:current-media="currentMedia"
@select-row="selectRow"
></detection-result>
</div>
<div class="detail">
<div class="detail-title">细节</div>
<div class="magnifier-glass">
<img id="croppedPreview" />
</div>
</div>
</div>
</div>
<select-dataset ref="selectDatasetRef" @handle-img-and-video="handleImgAndVideo"></select-dataset>
<local-upload ref="localUploadRef" @change-load="changeLoad"></local-upload>
</template>
<script setup lang="ts">
import '@kunpeng/layout/default/vgg.css'
import '@kunpeng/layout/default/vgg3.css'
import DetectionResult from './components/detectionResult.vue'
import SelectDataset from './components/selectDataset.vue'
import LocalUpload from './components/localUpload.vue'
import empty from '@/components/empty/index.vue'
import annotated from '@kunpeng/api/dataset-annotation/annotated'
import localImageVideo from '@/api/localImageVideo'
import { message } from 'ant-design-vue'
import { ref } from 'vue'
const $store = inject('$store')
const baseApi = $store.$appStore.baseApi
const userInfo = $store.$userStore.userInfo
const resultZoomInImg = ref<string>({}) // 结果放大影像数据
const sourceCanvas = ref<HTMLCanvasElement | null>(null)
const isRunning = ref(false) // 视频是否正在播放
const mediaList = ref([]) // 图像或视频列表
const currentMedia = ref(null) // 当前图像或者视频
const selectDatasetRef = ref()
const localUploadRef = ref()
const openImage = ref(() => {})
const openVideo = ref(() => {})
const handleAnnotated = ref(() => {})
const handleImgDetection = ref(() => {}) // 检测单个图片
const activeMediaIndex = ref(0)
const currentStreamJson = ref('') // 当前sse接收到的标注信息
const imgAnnotation = ref([]) //图片检测结果
const detectionData = ref([]) // 检测结果数据
const batchLoading = ref(false) //批量检测加载状态
const checkState = ref(0) // 批量检测状态 0 未检测 1 检测中 2 检测完成
const videoCheckState = ref(0) // 视频检测状态 0 未检测 1 检测中 2 检测完成
//属性json
const attributes_json = ref({
region: {},
})
const labelJson = ref([])
const fileList = ref([])
const allRegionList = ref([])
const videoInterval = ref(null)
const isImage = computed(() => {
return ['png', 'jpg', 'bmp', 'jpeg', 'webp'].includes(currentMedia.value?.ext)
})
const imgPreviewRef = ref(null) // 图片
const imageCanvas = ref(null) // 图片的标注画布
const imageCanvasCtx = ref(null)
const video = ref(null) // 视频
const videoCanvas = ref(null) // 视频的标注画布
const videoCanvasCtx = ref(null)
let annotations = new Map() // 存储视频的标注数据,按时间戳索引
const frameRate = ref(25) // 视频的帧率
const detectionResultRef = ref(null)
const isCustomButton = ref(false)
const clickedBoxId = ref(null)
const modelType = ref(0)
const modelTypeOptions = window.config.modelTypeOptions
const datasetId = ref('')
const loading = ref(false) // 检测按钮loading状态
const taskId = ref('') // 图片视频检测任务id
const clientId = ref('') // 客户端id
const sseConnection = ref(null)
const shipDetectUrl = window.config.shipDetectUrl
onMounted(() => {
clientId.value = Math.random().toString(36).substring(2, 15)
console.log(clientId.value)
let sseUrl = `${shipDetectUrl}/sse/${clientId.value}`
sseConnection.value = new EventSource(sseUrl)
// Removed the nextTick block since we're now using @loadedmetadata directly
sseConnection.value.addEventListener('message', event => {
try {
let data = JSON.parse(event.data)
if (data && data.filename === currentMedia.value?.fileName) {
// 情况一:图片
if (!data['is_video']) {
let scale = data.height / imgPreviewRef.value.getBoundingClientRect().height // 缩放比例
data.data.map((item, index) => {
imgAnnotation.value.push([
item.det_res[0] / scale,
item.det_res[1] / scale,
(item.det_res[2] - item.det_res[0]) / scale,
(item.det_res[3] - item.det_res[1]) / scale,
item.det_res[4],
item.det_res[5],
item.cls_res,
index,
])
})
drawImgAnnotations()
} else if (data['is_video']) {
console.log('sse消息', data)
// 情况二:视频
// 收到消息开始播放视频
if (videoCheckState.value === 0 && video.value.paused) {
videoCheckState.value = 1
} else if (data['video_end']) {
videoCheckState.value = 2
}
frameRate.value = data.fps
let scale = data.height / video.value.getBoundingClientRect().height // 缩放比例
let annotationData = data.data.map((item, index) => {
return [
item.det_res[0] / scale,
item.det_res[1] / scale,
(item.det_res[2] - item.det_res[0]) / scale,
(item.det_res[3] - item.det_res[1]) / scale,
item.det_res[4],
item.det_res[5],
item.cls_res,
index,
]
})
annotations.set(data.frame, annotationData)
console.log('frame', data.frame)
console.log('annotationData', annotationData)
console.log('annotations', annotations)
}
} else if (data['batch_end']) {
batchLoading.value = false
// 情况三:批量检测
checkState.value = 2
message.success('已完成批量检测!')
if (!isImage.value && video.value.paused) video.value.play()
handleDetection()
}
} catch (error) {
console.error('Error parsing SSE data:', error)
}
})
sseConnection.value.addEventListener('error', error => {
console.error('SSE connection error:', error)
})
})
onUnmounted(() => {
clearInterval(videoInterval.value)
})
// 检测按钮点击事件
async function handleDetection() {
let param = {
source: currentMedia.value.filePath,
model: modelType.value, // 模型
client_id: clientId.value,
}
if (!isImage.value) {
param.frame = Math.floor(video.value.currentTime * frameRate.value)
}
await localImageVideo.startDetect(param).then(res => {
taskId.value = res.data.task_id
})
}
// 批量检测按钮点击事件
function handleBatchDetection() {
checkState.value = 1
message.info('批量检测中')
batchLoading.value = true
let lastIndex = currentMedia.value.filePath.lastIndexOf('/')
let param = {
source: currentMedia.value.filePath.substring(0, lastIndex),
model: modelType.value, // 模型
client_id: clientId.value,
}
localImageVideo.startDetect(param)
}
// 预览图像
function previewImg() {
// imageCanvasCtx.value.clearRect(0, 0, imageCanvas.value.width, imageCanvas.value.height)
nextTick(() => {
imageCanvas.value = document.getElementById('imgCanvasOverlay')
if (imageCanvas.value) {
imageCanvasCtx.value = imageCanvas.value.getContext('2d')
}
imgPreviewRef.value.src = `/_api/detectionDataset/preview?filePath=${currentMedia.value.filePath.replaceAll('\\', '/')}`
imgPreviewRef.value.onload = () => {
imgAnnotation.value = []
clickedBoxId.value = null
imageCanvas.value.width = imgPreviewRef.value.width
imageCanvas.value.height = imgPreviewRef.value.height
// 如果批量检测结束,则调检测接口
handleDetection()
}
imgPreviewRef.value.addEventListener('click', handleCanvasClick)
})
}
const isFirst = ref(false)
const isSeeking = ref(false) // 是否正在拖动进度条
const debounceTimer = ref(null) // 防抖定时器
const isRePlaying = ref(false) // 是否暂停后重新开始播放
// 预览视频
function previewVideo() {
annotations.clear()
nextTick(() => {
video.value = document.getElementById('videoElement')
videoCanvas.value = document.getElementById('canvasOverlay')
if (videoCanvas.value) {
videoCanvasCtx.value = videoCanvas.value.getContext('2d')
}
if (video.value && currentMedia.value) {
video.value.src = currentMedia.value.src
video.value.addEventListener('canplay', () => {
if (!isFirst.value && video.value.currentTime <= 0.1) {
handleDetection()
video.value.playbackRate = 0.5
video.value.play()
animationLoop() // 开始绘制循环
console.log('触发检测1111111111')
isFirst.value = true
}
})
video.value.addEventListener('play', () => {
console.log('play开始播放')
isFirst.value = false
clickedBoxId.value = null
// animationLoop() // 开始绘制循环
})
video.value.addEventListener('pause', () => {
if (!isRePlaying.value) {
console.log('pause暂停播放')
stopVideoDetection()
}
isRePlaying.value = true
})
video.value.addEventListener('playing', () => {
console.log('暂停后重新开始播放')
if (video.value.currentTime !== 0 && isRePlaying.value) {
console.log('暂停后重新开始播放==只调用一次')
console.log('触发检测222222222222222222')
handleDetection()
}
isRePlaying.value = false
})
video.value.addEventListener('loadedmetadata', resizeCanvas)
window.addEventListener('resize', resizeCanvas)
video.value.addEventListener('click', handleCanvasClick)
}
})
}
function handleCanvasClick(event) {
// 获取点击坐标
let rect = isImage.value
? imageCanvas.value.getBoundingClientRect()
: videoCanvas.value.getBoundingClientRect()
let clickedBox = null
const clickX = event.clientX - rect.left
const clickY = event.clientY - rect.top
// 图片
if (isImage.value) {
// 查找被点击的框
clickedBox = imgAnnotation.value.find(box => {
const [x1, y1, width, height, level, type, cls_res] = box
return clickX >= x1 && clickX <= x1 + width && clickY >= y1 && clickY <= y1 + height
})
} else {
// 视频
// 获取当前帧数据
const currentFrame = Math.floor(video.value.currentTime * frameRate.value)
let boxes = []
// 查找当前时间对应的标注
boxes = annotations.get(currentFrame)
if (currentFrame === 0) {
boxes = annotations.get(1)
} else if (currentFrame > annotations.size) {
boxes = annotations.get(annotations.size)
}
if (!boxes) return
// 查找被点击的框
clickedBox = boxes.find(box => {
const [x1, y1, width, height, level, type, cls_res] = box
return clickX >= x1 && clickX <= x1 + width && clickY >= y1 && clickY <= y1 + height
})
}
if (clickedBox) {
event.preventDefault()
detectionResultRef.value.selectResult(clickedBox)
captureBoxArea(clickedBox)
clickedBoxId.value = clickedBox[clickedBox.length - 1]
// 重新绘制标注框
if (isImage.value) {
drawImgAnnotations()
} else {
drawAnnotations()
}
}
}
function selectRow(data) {
let boxes = []
if (isImage.value) {
boxes = imgAnnotation.value
} else {
// 获取当前帧数据
// video.value.currentTime 这个是视频当前播放的时间 ok
const currentFrame = Math.floor(video.value.currentTime * frameRate.value)
// 查找当前时间对应的标注
boxes = annotations.get(currentFrame)
if (currentFrame === 0) {
boxes = annotations.get(1)
} else if (currentFrame > annotations.size) {
boxes = annotations.get(annotations.size)
}
if (!boxes) return
}
// 查找被点击的框
const clickedBox = boxes.find(box => {
return data.id === box[box.length - 1]
})
clickedBoxId.value = clickedBox[clickedBox.length - 1]
if (isImage.value) {
drawImgAnnotations()
} else {
drawAnnotations()
}
captureBoxArea(clickedBox)
}
function captureBoxArea(box) {
// 创建临时canvas
const tempCanvas = document.createElement('canvas')
const tempCtx = tempCanvas.getContext('2d')
// 设置临时canvas尺寸为实际视频尺寸
let dom = isImage.value ? imgPreviewRef.value : video.value
const domWidth = dom.getBoundingClientRect().width
const domHeight = dom.getBoundingClientRect().height
tempCanvas.width = domWidth
tempCanvas.height = domHeight
// 绘制当前视频帧
tempCtx.drawImage(dom, 0, 0, domWidth, domHeight)
// 计算实际像素坐标
const [x1, y1, width, height, level, type, cls_res] = box
// 截取区域
const imageData = tempCtx.getImageData(x1, y1, width, height)
// 创建新canvas处理图像
const croppedCanvas = document.createElement('canvas')
croppedCanvas.width = width
croppedCanvas.height = height
croppedCanvas.getContext('2d').putImageData(imageData, 0, 0)
// 显示预览
const croppedPreview = document.getElementById('croppedPreview')
croppedPreview.src = croppedCanvas.toDataURL()
croppedPreview.style.display = 'block'
}
// 高精度帧监听
function animationLoop() {
drawAnnotations()
requestAnimationFrame(animationLoop)
}
// 调整Canvas尺寸以匹配视频
function resizeCanvas() {
videoCanvas.value.width = video.value.offsetWidth
videoCanvas.value.height = video.value.offsetHeight
}
// 视频暂停检测
function stopVideoDetection() {
let data = {
task_id: taskId.value,
}
localImageVideo.pauseDetect(data).then(res => {
if (res.status === 200) {
videoCheckState.value = 0
video.value?.pause()
}
})
}
// 绘制图片的标注框
function drawImgAnnotations() {
if (imageCanvasCtx.value)
imageCanvasCtx.value.clearRect(0, 0, imageCanvas.value.width, imageCanvas.value.height)
detectionData.value = []
imgAnnotation.value.forEach(item => {
const [x1, y1, width, height, level, type, cls_res, id] = item
detectionData.value.push({
type: type,
level: level,
children: cls_res,
id,
})
detectionResultRef.value.getData(detectionData.value)
console.log(clickedBoxId.value, id)
// 绘制矩形
imageCanvasCtx.value.beginPath()
imageCanvasCtx.value.strokeStyle = clickedBoxId.value === id ? 'red' : 'yellow'
imageCanvasCtx.value.lineWidth = 2
imageCanvasCtx.value.strokeRect(x1, y1, width, height)
// 绘制 level 标签
imageCanvasCtx.value.font = '20px Arial'
imageCanvasCtx.value.fillStyle = clickedBoxId.value === id ? 'red' : 'yellow'
const labelText = type.toString()
const labelX = x1
const labelY = y1 - 5 // 调整标签的垂直位置,使其位于矩形上方
imageCanvasCtx.value.fillText(labelText, labelX, labelY)
})
}
// 绘制视频的标注框
function drawAnnotations() {
if (video.value) {
videoCanvasCtx.value?.clearRect(0, 0, videoCanvas.value.width, videoCanvas.value.height)
if (video.value.paused) return
const currentFrame = Math.round(video.value.currentTime * frameRate.value)
console.log('currentTime', video.value.currentTime)
console.log('currentFrame', currentFrame)
let boxes = []
// 查找当前时间对应的标注
boxes = annotations.get(currentFrame)
if (currentFrame === 0) {
boxes = annotations.get(1)
} else if (currentFrame > annotations.size) {
boxes = annotations.get(annotations.size)
}
detectionData.value = []
if (boxes?.length === 0) {
videoCanvasCtx.value.clearRect(0, 0, videoCanvas.value.width, videoCanvas.value.height)
return
}
console.log('boxes', boxes)
// 绘制每个框
boxes?.forEach(box => {
// 解构坐标(归一化值)
const [x1, y1, width, height, level, type, cls_res, id] = box
detectionData.value.push({
type: type,
level: level,
children: cls_res,
id,
})
detectionResultRef.value.getData(detectionData.value)
// 绘制矩形
videoCanvasCtx.value.beginPath()
videoCanvasCtx.value.strokeStyle = clickedBoxId.value === id ? 'red' : 'yellow'
videoCanvasCtx.value.lineWidth = 2
videoCanvasCtx.value.strokeRect(x1, y1, width, height)
// 绘制 level 标签
videoCanvasCtx.value.font = '20px Arial'
videoCanvasCtx.value.fillStyle = clickedBoxId.value === id ? 'red' : 'yellow'
const labelText = type.toString()
const labelX = x1
const labelY = y1 - 5 // 调整标签的垂直位置,使其位于矩形上方
videoCanvasCtx.value.fillText(labelText, labelX, labelY)
})
}
}
async function getMediaList(datasetId, type) {
let param = {
page: {
limit: -1,
page: -1,
total: 0,
},
querys: [
{
group: 'advance',
operation: 'EQUAL',
property: 'dataset_id',
relation: 'AND',
value: datasetId,
},
],
}
let res = await localImageVideo.getImgList(param)
mediaList.value = res.data.list
if (mediaList.value && mediaList.value.length > 0) {
mediaList.value.forEach(item => {
let random = Math.ceil(Math.random() * 100000)
item.src = `/_api/detectionDataset/preview?time=${random}&&filePath=${item.filePath.replaceAll('\\', '/')}`
})
}
if (type == 'localupload') {
activeMediaIndex.value = 0
currentMedia.value = mediaList.value[0]
console.log('currentMedia', currentMedia.value)
} else {
activeMediaIndex.value = mediaList.value.findIndex(item => item.id === currentMedia.value.id)
}
if (isImage.value) {
previewImg()
} else {
previewVideo()
}
reset()
}
// 选择数据集
async function handleImgAndVideo(datasetId: string, checkedList: string[]) {
checkState.value = 0
currentMedia.value = checkedList[0]
getMediaList(datasetId, 'selectDataset')
}
// 本地上传的数据集
function changeLoad(id) {
datasetId.value = id
checkState.value = 0
getMediaList(id, 'localupload')
}
// 重置数据
function reset() {
detectionData.value = []
detectionResultRef.value.getData(detectionData.value)
// 清除细节画布
const croppedPreview = document.getElementById('croppedPreview')
croppedPreview.src = ''
if (isImage.value) {
imgAnnotation.value = []
drawImgAnnotations()
} else {
annotations.clear()
drawAnnotations()
}
}
function handleDataset() {
if (!isImage.value) {
stopVideoDetection()
annotations.clear()
}
selectDatasetRef.value.open()
}
// 本地上传
function handleUpload() {
if (!isImage.value) {
stopVideoDetection()
annotations.clear()
}
localUploadRef.value.open()
}
// 上一个
async function prevPage() {
if (!isImage.value) {
console.log('sp')
stopVideoDetection()
annotations.clear()
}
if (activeMediaIndex.value > 0) {
activeMediaIndex.value--
currentMedia.value = mediaList.value[activeMediaIndex.value]
// 上一个为图片
if (isImage.value) {
previewImg()
} else {
// 上一个为视频
previewVideo()
}
reset()
} else {
message.warning('此为第一个影像')
}
}
// 下一个
async function nextPage() {
if (!isImage.value) {
console.log('sp')
stopVideoDetection()
annotations.clear()
}
if (activeMediaIndex.value < mediaList.value.length - 1) {
activeMediaIndex.value++
currentMedia.value = mediaList.value[activeMediaIndex.value]
reset()
// 下一个为图片
if (isImage.value) {
previewImg()
} else {
// 下一个为视频
previewVideo()
}
} else {
message.warning('此为最后一个影像')
}
}
</script>
<style scoped lang="scss">
.analysis {
width: 100%;
height: 100%;
background: #f3f5fb;
.analysis-top {
height: 60%;
display: flex;
.preview-wrap {
//width: calc(100% - 300px);
width: 100%;
background: #fff;
//margin-right: 10px;
.top-btns {
width: 100%;
height: 50px;
padding: 10px 20px;
position: relative;
display: flex;
justify-content: space-between;
align-items: center;
.right-btn {
.refresh {
margin: 0 20px 0 10px;
}
.btn {
margin-left: 10px;
}
}
.name {
font-weight: 600;
font-size: 16px;
}
}
.image-list {
width: 100%;
height: calc(100% - 52px);
display: flex;
justify-content: space-between;
align-items: center;
.media {
width: calc(100% - 60px);
height: 100%;
position: relative;
}
.workspace {
width: 100%;
height: 100%;
display: flex;
.media-container {
width: 100%;
height: 100%;
position: relative;
display: inline-block;
img,
video {
position: absolute;
left: 50%;
transform: translateX(-50%);
}
#imgCanvasOverlay,
#canvasOverlay {
position: absolute;
top: 0;
// left: 0;
left: 50%;
transform: translateX(-50%);
pointer-events: none; /* 确保canvas不阻挡视频操作 */
z-index: 99;
}
}
}
.btn {
height: 100%;
line-height: calc(100% - 50px);
width: 60px;
display: flex;
justify-content: center;
align-items: center;
// background: #ccc;
}
}
}
}
.analysis-bottom {
height: calc(40% - 10px);
display: flex;
margin-top: 10px;
.result {
width: calc(100% - 300px);
height: 100%;
background: #fff;
margin-right: 10px;
}
.detail {
width: 300px;
background: #fff;
padding: 10px;
.magnifier-glass {
padding: 10px 0;
width: calc(100% - 20px);
overflow: scroll;
#croppedPreview {
object-fit: contain;
border: 2px solid #fff;
}
}
}
}
}
video::-webkit-media-controls-fullscreen-button {
display: none;
}
</style>