首先对于每一帧的采集成功后,都会有一个闭包回调,然后调用renderPixelBuffer
再看renderPixelBuffer
的实现
会调用draw方法
// MARK: - MTKViewDelegate
extension KFMetalView: MTKViewDelegate {
func mtkView(_ view: MTKView, drawableSizeWillChange size: CGSize) {
viewportSize = vector_uint2(UInt32(size.width), UInt32(size.height))
}
func draw(in view: MTKView) {
// Metal 视图回调,有数据情况下渲染视图
weak var weakSelf = self
renderQueue.async {
guard let self = weakSelf else { return }
self.drawInMTKView(view)
}
}
}
最后看向真正的实现:drawInMTKView
/// 渲染数据
private func drawInMTKView(_ view: MTKView) {
semaphore.wait()
defer { semaphore.signal() }
guard let pixelBuffer = self.pixelBuffer else {
return
}
self.pixelBuffer = nil
// 检查并初始化命令队列
if commandQueue == nil {
guard let device = mtkView.device else {
return
}
commandQueue = device.makeCommandQueue()
}
guard let commandQueue = self.commandQueue,
let renderPassDescriptor = view.currentRenderPassDescriptor,
let drawable = view.currentDrawable else {
return
}
let commandBuffer = commandQueue.makeCommandBuffer()
guard let renderEncoder = commandBuffer?.makeRenderCommandEncoder(descriptor: renderPassDescriptor) else {
return
}
renderEncoder.setViewport(MTLViewport(originX: 0.0, originY: 0.0,
width: Double(viewportSize.x),
height: Double(viewportSize.y),
znear: -1.0, zfar: 1.0))
let isRenderYUV = CVPixelBufferGetPlaneCount(pixelBuffer) > 1
// 确保创建管道状态
if pipelineState == nil {
setupPipeline(isYUV: isRenderYUV)
}
guard let pipelineState = pipelineState else {
renderEncoder.endEncoding()
return
}
renderEncoder.setRenderPipelineState(pipelineState)
if updateFillMode {
updateVertices()
updateFillMode = false
}
if let vertices = vertices {
// 使用常量值0作为顶点缓冲索引,与Metal着色器中定义一致
renderEncoder.setVertexBuffer(vertices, offset: 0, index: 0)
} else {
renderEncoder.endEncoding()
return
}
if isRenderYUV {
// 处理 YUV 纹理
var textureY: MTLTexture?
var textureUV: MTLTexture?
if let textureCache = textureCache {
// 确保pixelBuffer被锁定以便安全访问
CVPixelBufferLockBaseAddress(pixelBuffer, .readOnly)
defer {
CVPixelBufferUnlockBaseAddress(pixelBuffer, .readOnly)
}
let width = CVPixelBufferGetWidthOfPlane(pixelBuffer, 0)
let height = CVPixelBufferGetHeightOfPlane(pixelBuffer, 0)
let pixelFormat = MTLPixelFormat.r8Unorm
var cvTextureY: CVMetalTexture?
let statusY = CVMetalTextureCacheCreateTextureFromImage(kCFAllocatorDefault, textureCache, pixelBuffer, nil, pixelFormat, width, height, 0, &cvTextureY)
if statusY == kCVReturnSuccess, let cvTextureY = cvTextureY {
textureY = CVMetalTextureGetTexture(cvTextureY)
}
let widthUV = CVPixelBufferGetWidthOfPlane(pixelBuffer, 1)
let heightUV = CVPixelBufferGetHeightOfPlane(pixelBuffer, 1)
let pixelFormatUV = MTLPixelFormat.rg8Unorm
var cvTextureUV: CVMetalTexture?
let statusUV = CVMetalTextureCacheCreateTextureFromImage(kCFAllocatorDefault, textureCache, pixelBuffer, nil, pixelFormatUV, widthUV, heightUV, 1, &cvTextureUV)
if statusUV == kCVReturnSuccess, let cvTextureUV = cvTextureUV {
textureUV = CVMetalTextureGetTexture(cvTextureUV)
}
}
if let textureY = textureY, let textureUV = textureUV {
// 设置纹理,使用shader中定义的索引
renderEncoder.setFragmentTexture(textureY, index: 0)
renderEncoder.setFragmentTexture(textureUV, index: 1)
} else {
renderEncoder.endEncoding()
return
}
if yuvMatrix == nil {
// 获取颜色空间信息,如果没有就使用默认值
let colorSpace: CFTypeRef
let isFullRange: Bool
// 尝试从pixelBuffer获取YUV颜色空间信息
if let matrixKey = CVBufferCopyAttachment(pixelBuffer, kCVImageBufferYCbCrMatrixKey, nil) {
colorSpace = matrixKey
isFullRange = pixelBufferIsFullRange(pixelBuffer)
} else {
// 如果pixelBuffer中没有颜色空间信息,使用默认值
// 对于相机视频,通常使用BT.601标准和full range
colorSpace = kCVImageBufferYCbCrMatrix_ITU_R_601_4
// 检查格式类型判断是否为full range
let format = CVPixelBufferGetPixelFormatType(pixelBuffer)
isFullRange = (format == kCVPixelFormatType_420YpCbCr8BiPlanarFullRange)
}
setupYUVMatrix(isFullRange: isFullRange, colorSpace: colorSpace)
}
// 设置矩阵缓冲区,使用shader中定义的索引
if let yuvMatrix = yuvMatrix {
renderEncoder.setFragmentBuffer(yuvMatrix, offset: 0, index: 0)
}
} else {
// 处理 RGB 纹理
var textureRGB: MTLTexture?
if let textureCache = textureCache {
let width = CVPixelBufferGetWidth(pixelBuffer)
let height = CVPixelBufferGetHeight(pixelBuffer)
let pixelFormat = MTLPixelFormat.bgra8Unorm
var cvTextureRGB: CVMetalTexture?
let status = CVMetalTextureCacheCreateTextureFromImage(kCFAllocatorDefault, textureCache, pixelBuffer, nil, pixelFormat, width, height, 0, &cvTextureRGB)
if status == kCVReturnSuccess, let cvTextureRGB = cvTextureRGB {
textureRGB = CVMetalTextureGetTexture(cvTextureRGB)
}
}
if let textureRGB = textureRGB {
// 设置RGB纹理,使用shader中定义的索引
renderEncoder.setFragmentTexture(textureRGB, index: 0)
} else {
renderEncoder.endEncoding()
return
}
}
renderEncoder.drawPrimitives(type: .triangleStrip, vertexStart: 0, vertexCount: numVertices)
renderEncoder.endEncoding()
commandBuffer?.present(drawable)
commandBuffer?.commit()
}
drawInMTKView
函数的实现过程总结:
获取并清除像素缓冲区
- 使用信号量确保线程安全访问
- 获取当前的 pixelBuffer 并立即清除引用
准备渲染资源
- 检查并初始化命令队列
- 获取当前渲染描述符和可绘制对象
- 创建命令缓冲区和渲染编码器
设置渲染状态
- 设置视口大小
- 检测是否为 YUV 格式(通过平面数量判断)
- 确保渲染管道状态已创建(如未创建则调用 setupPipeline)
- 检查顶点数据是否需要更新(根据填充模式)
设置顶点缓冲区
- 将顶点数据传递给渲染编码器
处理纹理
- 对于 YUV 格式:
- 创建 Y 平面和 UV 平面的纹理
- 设置 YUV 转换矩阵(如果尚未设置)
- 将纹理和矩阵传递给片段着色器
- 对于 RGB 格式:
- 创建单一 RGB 纹理并传递给片段着色器
- 对于 YUV 格式:
执行渲染
- 绘制三角形带(triangleStrip)
- 结束编码
- 提交命令缓冲区并呈现到屏幕
错误处理
- 在各个步骤中检查资源是否正确创建,如果失败则提前结束渲染过程
整个过程实现了从 CVPixelBuffer(可能是 YUV 或 RGB 格式)到 Metal 渲染的完整流程,包括纹理创建、格式转换和根据填充模式调整显示比例。
对于渲染的其他部分,是基于渲染管线进行渲染的,其他操作和渲染三角形之类的大差不差,比如配置Metal文件,创建渲染管线等。
//
// Shader.metal
// VideoDemo
//
// Created by ricard.li on 2025/5/23.
//
#include <metal_stdlib>
using namespace metal;
typedef struct {
// 顶点坐标,4 维向量。
vector_float4 position;
// 纹理坐标。
vector_float2 textureCoordinate;
} KFVertex;
typedef struct {
// YUV 矩阵,使用float4x4替代float3x3以确保内存对齐
float4x4 matrix;
// 是否为 full range,使用uint代替bool以确保内存布局一致
uint fullRange;
} KFConvertMatrix;
// 顶点的桥接枚举值 KFVertexInputIndexVertices。
typedef enum KFVertexInputIndex {
KFVertexInputIndexVertices = 0,
} KFVertexInputIndex;
// YUV 矩阵的桥接枚举值 KFFragmentInputIndexMatrix。
typedef enum KFFragmentBufferIndex {
KFFragmentInputIndexMatrix = 0,
} KFMetalFragmentBufferIndex;
// YUV 数据的桥接枚举值 KFFragmentTextureIndexTextureY、KFFragmentTextureIndexTextureUV。
typedef enum KFFragmentYUVTextureIndex {
KFFragmentTextureIndexTextureY = 0,
KFFragmentTextureIndexTextureUV = 1,
} KFFragmentYUVTextureIndex;
// RGBA 数据的桥接枚举值 KFFragmentTextureIndexTextureRGB。
typedef enum KFFragmentRGBTextureIndex {
KFFragmentTextureIndexTextureRGB = 0,
} KFFragmentRGBTextureIndex;
// 定义了一个类型为 RasterizerData 的结构体,里面有一个 float4 向量和 float2 向量。
typedef struct {
// float4:4 维向量;
// clipSpacePosition:参数名,表示顶点;
// [[position]]:position 是顶点修饰符,这是苹果内置的语法,不能改变,表示顶点信息。
float4 clipSpacePosition [[position]];
// float2:2 维向量;
// textureCoordinate:参数名,这里表示纹理。
float2 textureCoordinate;
} RasterizerData;
// 顶点函数通过一个自定义的结构体,返回对应的数据;顶点函数的输入参数也可以是自定义结构体。
// 顶点函数
// vertex:函数修饰符,表示顶点函数;
// RasterizerData:返回值类型;
// vertexShader:函数名;
// [[vertex_id]]:vertex_id 是顶点 id 修饰符,苹果内置的语法不可改变;
// [[buffer(YYImageVertexInputIndexVertexs)]]:buffer 是缓存数据修饰符,苹果内置的语法不可改变,YYImageVertexInputIndexVertexs 是索引;
// constant:是变量类型修饰符,表示存储在 device 区域。
vertex RasterizerData vertexShader(uint vertexID [[vertex_id]],
constant KFVertex *vertexArray [[buffer(KFVertexInputIndexVertices)]]) {
RasterizerData out;
out.clipSpacePosition = vertexArray[vertexID].position;
out.textureCoordinate = vertexArray[vertexID].textureCoordinate;
return out;
}
// 片元函数
// fragment:函数修饰符,表示片元函数;
// float4:返回值类型,返回 RGBA;
// fragmentImageShader:函数名;
// RasterizerData:参数类型;
// input:变量名;
// [[stage_in]:stage_in 表示这个数据来自光栅化,光栅化是顶点处理之后的步骤,业务层无法修改。
// texture2d:类型表示纹理;
// textureY:表示 Y 通道;
// textureUV:表示 UV 通道;
// [[texture(index)]]:纹理修饰符;可以加索引:[[texture(0)]] 对应纹理 0,[[texture(1)]] 对应纹理 1;
// KFFragmentTextureIndexTextureY、KFFragmentTextureIndexTextureUV:表示纹理索引。
fragment float4 yuvSamplingShader(RasterizerData input [[stage_in]],
texture2d<float> textureY [[texture(KFFragmentTextureIndexTextureY)]],
texture2d<float> textureUV [[texture(KFFragmentTextureIndexTextureUV)]],
constant KFConvertMatrix *convertMatrix [[buffer(KFFragmentInputIndexMatrix)]]) {
constexpr sampler textureSampler (mag_filter::linear, min_filter::linear);
// 初始化YUV向量
float3 yuv;
// 根据范围处理Y值
if (convertMatrix->fullRange != 0) { // full range.
yuv.x = textureY.sample(textureSampler, input.textureCoordinate).r;
} else { // video range.
yuv.x = textureY.sample(textureSampler, input.textureCoordinate).r - (16.0 / 255.0);
}
// 获取UV值,并从[0,1]范围转换到[-0.5,0.5]
yuv.yz = textureUV.sample(textureSampler, input.textureCoordinate).rg - 0.5;
// 使用YUV转RGB矩阵计算RGB值 - 使用float4x4矩阵,但仍提取float3结果
float3 rgb = float3(convertMatrix->matrix * float4(yuv, 1.0));
// 确保结果在合法范围内
rgb = clamp(rgb, 0.0, 1.0);
return float4(rgb, 1.0);
}
// 片元函数
// fragment:函数修饰符,表示片元函数;
// float4:返回值类型,返回 RGBA;
// fragmentImageShader:函数名;
// RasterizerData:参数类型;
// input:变量名;
// [[stage_in]]:stage_in 表示这个数据来自光栅化,光栅化是顶点处理之后的步骤,业务层无法修改。
// texture2d:类型表示纹理;
// colorTexture:代表 RGBA 数据;
// [[texture(index)]]:纹理修饰符;可以加索引:[[texture(0)]] 对应纹理 0,[[texture(1)]] 对应纹理 1;
// KFFragmentTextureIndexTextureRGB:表示纹理索引。
fragment float4 rgbSamplingShader(RasterizerData input [[stage_in]],
texture2d<half> colorTexture [[texture(KFFragmentTextureIndexTextureRGB)]]) {
constexpr sampler textureSampler (mag_filter::linear, min_filter::linear);
half4 colorSample = colorTexture.sample(textureSampler, input.textureCoordinate);
return float4(colorSample);
}
//
// KFMetalView.swift
// VideoDemo
//
// Created by ricard.li on 2025/5/23.
//
import UIKit
import MetalKit
import AVFoundation
import MetalPerformanceShaders
import simd
// 渲染画面填充模式
enum KFMetalViewContentMode: Int {
case stretch = 0 // 自动填充满,可能会变形
case fit = 1 // 按比例适配,可能会有黑边
case fill = 2 // 根据比例裁剪后填充满
}
// 颜色空间转换矩阵,BT.601 Video Range
private let kFColorMatrix601VideoRange = matrix_float4x4(
simd_float4(1.164, 1.164, 1.164, 0.0),
simd_float4(0.0, -0.392, 2.017, 0.0),
simd_float4(1.596, -0.813, 0.0, 0.0),
simd_float4(0.0, 0.0, 0.0, 1.0)
)
// 颜色空间转换矩阵,BT.601 Full Range
private let kFColorMatrix601FullRange = matrix_float4x4(
simd_float4(1.0, 1.0, 1.0, 0.0),
simd_float4(0.0, -0.343, 1.765, 0.0),
simd_float4(1.4, -0.711, 0.0, 0.0),
simd_float4(0.0, 0.0, 0.0, 1.0)
)
// 颜色空间转换矩阵,BT.709 Video Range
private let kFColorMatrix709VideoRange = matrix_float4x4(
simd_float4(1.164, 1.164, 1.164, 0.0),
simd_float4(0.0, -0.213, 2.112, 0.0),
simd_float4(1.793, -0.533, 0.0, 0.0),
simd_float4(0.0, 0.0, 0.0, 1.0)
)
// 颜色空间转换矩阵,BT.709 Full Range
private let kFColorMatrix709FullRange = matrix_float4x4(
simd_float4(1.0, 1.0, 1.0, 0.0),
simd_float4(0.0, -0.187, 1.856, 0.0),
simd_float4(1.575, -0.468, 0.0, 0.0),
simd_float4(0.0, 0.0, 0.0, 1.0)
)
class KFMetalView: UIView {
// MARK: - Public Properties
// 画面填充模式
var fillMode: KFMetalViewContentMode = .fit {
didSet {
updateFillMode = true
}
}
// MARK: - Private Properties
// 外层输入的最后一帧数据
private var pixelBuffer: CVPixelBuffer?
// 处理 PixelBuffer 锁,防止外层输入线程与渲染线程同时操作 Crash
private let semaphore = DispatchSemaphore(value: 1)
// 纹理缓存,根据 pixelbuffer 获取纹理
private var textureCache: CVMetalTextureCache?
// Metal 渲染的 view
private var mtkView: MTKView!
// 视口大小
private var viewportSize = vector_uint2(0, 0)
// 渲染管道,管理顶点函数和片元函数
private var pipelineState: MTLRenderPipelineState?
// 渲染指令队列
private var commandQueue: MTLCommandQueue?
// 顶点缓存对象
private var vertices: MTLBuffer?
// 顶点数量
private var numVertices: Int = 0
// YUV 数据矩阵对象
private var yuvMatrix: MTLBuffer?
// 填充模式变更标记
private var updateFillMode = true
// pixelBuffer 数据尺寸
private var pixelBufferSize = CGSize.zero
// 当前视图大小
private var currentViewSize = CGSize.zero
// 渲染线程
private let renderQueue = DispatchQueue(label: "com.KeyFrameKit.metalView.renderQueue", qos: .userInteractive)
// MARK: - Lifecycle
override init(frame: CGRect) {
super.init(frame: frame)
setupView(frame: frame)
}
required init?(coder: NSCoder) {
super.init(coder: coder)
setupView(frame: bounds)
}
private func setupView(frame: CGRect) {
currentViewSize = frame.size
fillMode = .fit
updateFillMode = true
// 设置视图背景色为黑色
backgroundColor = .black
// 创建 Metal 渲染视图且添加到当前视图
mtkView = MTKView(frame: bounds)
guard let device = MTLCreateSystemDefaultDevice() else {
return
}
mtkView.device = device
mtkView.backgroundColor = .black // 设置Metal视图背景色为黑色
// 确保使用正确的像素格式
mtkView.colorPixelFormat = .bgra8Unorm
// 确保每次绘制都会调用delegate
mtkView.isPaused = false
// 确保绘制能够连续进行
mtkView.enableSetNeedsDisplay = false
addSubview(mtkView)
mtkView.delegate = self
mtkView.framebufferOnly = true
viewportSize = vector_uint2(UInt32(mtkView.drawableSize.width), UInt32(mtkView.drawableSize.height))
// 创建纹理缓存
var newTextureCache: CVMetalTextureCache?
let status = CVMetalTextureCacheCreate(nil, nil, device, nil, &newTextureCache)
if status == kCVReturnSuccess {
textureCache = newTextureCache
}
// 预先创建命令队列
commandQueue = device.makeCommandQueue()
}
//确保Metal视图(mtkView)与包含它的父视图(KFMetalView)大小完全一致
override func layoutSubviews() {
// 视图自动调整布局,同步至 Metal 视图
super.layoutSubviews()
mtkView.frame = bounds
currentViewSize = bounds.size
}
deinit {
// 释放最后一帧数据、纹理缓存
semaphore.wait()
pixelBuffer = nil
if let textureCache = textureCache {
CVMetalTextureCacheFlush(textureCache, 0)
}
textureCache = nil
semaphore.signal()
mtkView.releaseDrawables()
}
// MARK: - Public Methods
/// 渲染 CVPixelBuffer
func renderPixelBuffer(_ pixelBuffer: CVPixelBuffer) {
semaphore.wait()
self.pixelBuffer = pixelBuffer
pixelBufferSize = CGSize(width: CVPixelBufferGetWidth(pixelBuffer), height: CVPixelBufferGetHeight(pixelBuffer))
semaphore.signal()
// 确保在主线程调用setNeedsDisplay触发绘制
DispatchQueue.main.async { [weak self] in
self?.mtkView.draw()
}
}
// MARK: - Private Methods
/// 初始化渲染管道
private func setupPipeline(isYUV: Bool) {
guard let device = mtkView.device else {
return
}
// 获取默认库
guard let library = device.makeDefaultLibrary() else {
return
}
// 根据是否处理YUV选择顶点和片元着色器
let vertexFunctionName = "vertexShader"
let fragmentFunctionName = isYUV ? "yuvSamplingShader" : "rgbSamplingShader"
guard let vertexFunction = library.makeFunction(name: vertexFunctionName) else {
return
}
guard let fragmentFunction = library.makeFunction(name: fragmentFunctionName) else {
return
}
let pipelineStateDescriptor = MTLRenderPipelineDescriptor()
pipelineStateDescriptor.vertexFunction = vertexFunction
pipelineStateDescriptor.fragmentFunction = fragmentFunction
pipelineStateDescriptor.colorAttachments[0].pixelFormat = mtkView.colorPixelFormat
do {
pipelineState = try device.makeRenderPipelineState(descriptor: pipelineStateDescriptor)
if commandQueue == nil {
commandQueue = device.makeCommandQueue()
}
} catch {
// 使用do-catch而不是print,避免不必要的日志输出
}
}
/// 初始化 YUV 矩阵
private func setupYUVMatrix(isFullRange: Bool, colorSpace: CFTypeRef) {
guard let device = mtkView.device else {
return
}
// 选择正确的YUV转RGB矩阵
var matrix: matrix_float4x4
// 安全地检查颜色空间类型
let colorSpaceString = CFCopyDescription(colorSpace) as String
let is601 = colorSpaceString.contains("601")
let is709 = colorSpaceString.contains("709")
if is601 {
if isFullRange {
matrix = kFColorMatrix601FullRange
} else {
matrix = kFColorMatrix601VideoRange
}
} else if is709 {
if isFullRange {
matrix = kFColorMatrix709FullRange
} else {
matrix = kFColorMatrix709VideoRange
}
} else {
// 默认使用BT.601
if isFullRange {
matrix = kFColorMatrix601FullRange
} else {
matrix = kFColorMatrix601VideoRange
}
}
// 创建转换矩阵结构体,将Bool转换为UInt32
var convertMatrix = KFConvertMatrix(matrix: matrix, fullRange: isFullRange ? 1 : 0)
// 创建矩阵缓冲区
yuvMatrix = device.makeBuffer(bytes: &convertMatrix, length: MemoryLayout<KFConvertMatrix>.size, options: .storageModeShared)
}
/// 更新顶点数据
private func updateVertices() {
guard let device = mtkView.device else { return }
var widthScaling: Float = 1.0
var heightScaling: Float = 1.0
if !currentViewSize.equalTo(.zero) && !pixelBufferSize.equalTo(.zero) {
// 计算视图和纹理的宽高比
let viewAspect = Float(currentViewSize.width / currentViewSize.height)
let textureAspect = Float(pixelBufferSize.width / pixelBufferSize.height)
switch fillMode {
case .stretch:
// 拉伸模式,直接使用 1.0
widthScaling = 1.0
heightScaling = 1.0
case .fit:
// 适配模式,保持比例,可能有黑边
if textureAspect > viewAspect {
// 视频比例比视图宽,高度需要缩小
widthScaling = 1.0
heightScaling = viewAspect / textureAspect
} else {
// 视频比例比视图窄,宽度需要缩小
widthScaling = textureAspect / viewAspect
heightScaling = 1.0
}
case .fill:
// 填充模式,确保没有黑边,可能会裁剪部分内容
if textureAspect > viewAspect {
// 视频比例比视图宽,宽度需要裁剪
widthScaling = viewAspect / textureAspect
heightScaling = 1.0
} else {
// 视频比例比视图窄,高度需要裁剪
widthScaling = 1.0
heightScaling = textureAspect / viewAspect
}
// 填充模式要确保覆盖整个屏幕,所以这里要取倒数
widthScaling = 1.0 / widthScaling
heightScaling = 1.0 / heightScaling
}
}
// 创建顶点数组,与shader中的KFVertex结构一致
let quadVertices: [KFVertex] = [
KFVertex(position: vector_float4(-widthScaling, -heightScaling, 0.0, 1.0), textureCoordinate: vector_float2(0.0, 1.0)),
KFVertex(position: vector_float4(widthScaling, -heightScaling, 0.0, 1.0), textureCoordinate: vector_float2(1.0, 1.0)),
KFVertex(position: vector_float4(-widthScaling, heightScaling, 0.0, 1.0), textureCoordinate: vector_float2(0.0, 0.0)),
KFVertex(position: vector_float4(widthScaling, heightScaling, 0.0, 1.0), textureCoordinate: vector_float2(1.0, 0.0))
]
vertices = device.makeBuffer(bytes: quadVertices, length: MemoryLayout<KFVertex>.stride * quadVertices.count, options: .storageModeShared)
numVertices = quadVertices.count
}
/// 判断 YUV 数据是否为 full range
private func pixelBufferIsFullRange(_ pixelBuffer: CVPixelBuffer) -> Bool {
// 根据像素格式类型判断
let pixelFormatType = CVPixelBufferGetPixelFormatType(pixelBuffer)
// 检查是否是已知的全范围格式
let isFullRange: Bool
switch pixelFormatType {
case kCVPixelFormatType_420YpCbCr8BiPlanarFullRange:
isFullRange = true
case kCVPixelFormatType_420YpCbCr8PlanarFullRange:
isFullRange = true
default:
isFullRange = false
}
return isFullRange
}
/// 渲染数据
private func drawInMTKView(_ view: MTKView) {
semaphore.wait()
defer { semaphore.signal() }
guard let pixelBuffer = self.pixelBuffer else {
return
}
self.pixelBuffer = nil
// 检查并初始化命令队列
if commandQueue == nil {
guard let device = mtkView.device else {
return
}
commandQueue = device.makeCommandQueue()
}
guard let commandQueue = self.commandQueue,
let renderPassDescriptor = view.currentRenderPassDescriptor,
let drawable = view.currentDrawable else {
return
}
let commandBuffer = commandQueue.makeCommandBuffer()
guard let renderEncoder = commandBuffer?.makeRenderCommandEncoder(descriptor: renderPassDescriptor) else {
return
}
renderEncoder.setViewport(MTLViewport(originX: 0.0, originY: 0.0,
width: Double(viewportSize.x),
height: Double(viewportSize.y),
znear: -1.0, zfar: 1.0))
let isRenderYUV = CVPixelBufferGetPlaneCount(pixelBuffer) > 1
// 确保创建管道状态
if pipelineState == nil {
setupPipeline(isYUV: isRenderYUV)
}
guard let pipelineState = pipelineState else {
renderEncoder.endEncoding()
return
}
renderEncoder.setRenderPipelineState(pipelineState)
if updateFillMode {
updateVertices()
updateFillMode = false
}
if let vertices = vertices {
// 使用常量值0作为顶点缓冲索引,与Metal着色器中定义一致
renderEncoder.setVertexBuffer(vertices, offset: 0, index: 0)
} else {
renderEncoder.endEncoding()
return
}
if isRenderYUV {
// 处理 YUV 纹理
var textureY: MTLTexture?
var textureUV: MTLTexture?
if let textureCache = textureCache {
// 确保pixelBuffer被锁定以便安全访问
CVPixelBufferLockBaseAddress(pixelBuffer, .readOnly)
defer {
CVPixelBufferUnlockBaseAddress(pixelBuffer, .readOnly)
}
let width = CVPixelBufferGetWidthOfPlane(pixelBuffer, 0)
let height = CVPixelBufferGetHeightOfPlane(pixelBuffer, 0)
let pixelFormat = MTLPixelFormat.r8Unorm
var cvTextureY: CVMetalTexture?
let statusY = CVMetalTextureCacheCreateTextureFromImage(kCFAllocatorDefault, textureCache, pixelBuffer, nil, pixelFormat, width, height, 0, &cvTextureY)
if statusY == kCVReturnSuccess, let cvTextureY = cvTextureY {
textureY = CVMetalTextureGetTexture(cvTextureY)
}
let widthUV = CVPixelBufferGetWidthOfPlane(pixelBuffer, 1)
let heightUV = CVPixelBufferGetHeightOfPlane(pixelBuffer, 1)
let pixelFormatUV = MTLPixelFormat.rg8Unorm
var cvTextureUV: CVMetalTexture?
let statusUV = CVMetalTextureCacheCreateTextureFromImage(kCFAllocatorDefault, textureCache, pixelBuffer, nil, pixelFormatUV, widthUV, heightUV, 1, &cvTextureUV)
if statusUV == kCVReturnSuccess, let cvTextureUV = cvTextureUV {
textureUV = CVMetalTextureGetTexture(cvTextureUV)
}
}
if let textureY = textureY, let textureUV = textureUV {
// 设置纹理,使用shader中定义的索引
renderEncoder.setFragmentTexture(textureY, index: 0)
renderEncoder.setFragmentTexture(textureUV, index: 1)
} else {
renderEncoder.endEncoding()
return
}
if yuvMatrix == nil {
// 获取颜色空间信息,如果没有就使用默认值
let colorSpace: CFTypeRef
let isFullRange: Bool
// 尝试从pixelBuffer获取YUV颜色空间信息
if let matrixKey = CVBufferCopyAttachment(pixelBuffer, kCVImageBufferYCbCrMatrixKey, nil) {
colorSpace = matrixKey
isFullRange = pixelBufferIsFullRange(pixelBuffer)
} else {
// 如果pixelBuffer中没有颜色空间信息,使用默认值
// 对于相机视频,通常使用BT.601标准和full range
colorSpace = kCVImageBufferYCbCrMatrix_ITU_R_601_4
// 检查格式类型判断是否为full range
let format = CVPixelBufferGetPixelFormatType(pixelBuffer)
isFullRange = (format == kCVPixelFormatType_420YpCbCr8BiPlanarFullRange)
}
setupYUVMatrix(isFullRange: isFullRange, colorSpace: colorSpace)
}
// 设置矩阵缓冲区,使用shader中定义的索引
if let yuvMatrix = yuvMatrix {
renderEncoder.setFragmentBuffer(yuvMatrix, offset: 0, index: 0)
}
} else {
// 处理 RGB 纹理
var textureRGB: MTLTexture?
if let textureCache = textureCache {
let width = CVPixelBufferGetWidth(pixelBuffer)
let height = CVPixelBufferGetHeight(pixelBuffer)
let pixelFormat = MTLPixelFormat.bgra8Unorm
var cvTextureRGB: CVMetalTexture?
let status = CVMetalTextureCacheCreateTextureFromImage(kCFAllocatorDefault, textureCache, pixelBuffer, nil, pixelFormat, width, height, 0, &cvTextureRGB)
if status == kCVReturnSuccess, let cvTextureRGB = cvTextureRGB {
textureRGB = CVMetalTextureGetTexture(cvTextureRGB)
}
}
if let textureRGB = textureRGB {
// 设置RGB纹理,使用shader中定义的索引
renderEncoder.setFragmentTexture(textureRGB, index: 0)
} else {
renderEncoder.endEncoding()
return
}
}
renderEncoder.drawPrimitives(type: .triangleStrip, vertexStart: 0, vertexCount: numVertices)
renderEncoder.endEncoding()
commandBuffer?.present(drawable)
commandBuffer?.commit()
}
}
// MARK: - MTKViewDelegate
extension KFMetalView: MTKViewDelegate {
func mtkView(_ view: MTKView, drawableSizeWillChange size: CGSize) {
viewportSize = vector_uint2(UInt32(size.width), UInt32(size.height))
}
func draw(in view: MTKView) {
// Metal 视图回调,有数据情况下渲染视图
weak var weakSelf = self
renderQueue.async {
guard let self = weakSelf else { return }
self.drawInMTKView(view)
}
}
}