1.类文件调用 RTCMTLVideoView → RTCMTLRenderer → - (void)drawFrame {} → - (void)render
- (void)render {
// 创建一个新的命令缓冲区,用于记录发送到GPU的命令
id<MTLCommandBuffer> commandBuffer = [_commandQueue commandBuffer];
// 设置命令缓冲区的标签,便于调试 3commandBuffer.label = commandBufferLabel;
// 初始化一个信号量,用于CPU等待GPU完成命令
__block dispatch_semaphore_t block_semaphore = _inflight_semaphore;
// 当GPU完成所有命令后,通过信号量通知CPU
[commandBuffer addCompletedHandler:^(id<MTLCommandBuffer> _Nonnull cmdBuf) { dispatch_semaphore_signal(block_semaphore); }];
// 获取当前的渲染通道描述符,用于配置渲染过程
MTLRenderPassDescriptor *renderPassDescriptor = _view.currentRenderPassDescriptor; if (renderPassDescriptor) { // 检查是否有有效的渲染目标(例如屏幕帧)
// 创建渲染命令编码器,开始配置渲染细节
id<MTLRenderCommandEncoder> renderEncoder = [commandBuffer renderCommandEncoderWithDescriptor:renderPassDescriptor];
renderEncoder.label = renderEncoderLabel; // 设置编码器标签
// 配置渲染环境
[renderEncoder pushDebugGroup:renderEncoderDebugGroup]; // 开启调试组,组织相关的调试信息
[renderEncoder setRenderPipelineState:_pipelineState]; // 设置渲染管线状态对象,包含着色器等
[renderEncoder setVertexBuffer:_vertexBuffer offset:0 atIndex:0]; // 绑定顶点缓冲区
[self uploadTexturesToRenderEncoder:renderEncoder]; // 上传纹理数据到GPU
// 执行绘制操作:绘制一个三角形带,4个顶点,实例化1次
[renderEncoder drawPrimitives:MTLPrimitiveTypeTriangleStrip vertexStart:0
vertexCount:4
instanceCount:1];
[renderEncoder popDebugGroup]; // 结束调试组
[renderEncoder endEncoding]; // 结束命令编码
// 提交渲染结果到屏幕显示
[commandBuffer presentDrawable:_view.currentDrawable]; }
// 所有CPU上的准备工作完成,提交命令缓冲区到GPU开始渲染
[commandBuffer commit];
}
#pragma mark - RTCMTLRenderer
// 定义一个drawFrame方法,接收一个RTCVideoFrame类型的参数frame
- (void)drawFrame:(RTC_OBJC_TYPE(RTCVideoFrame) *)frame {
@autoreleasepool {// 使用自动释放池来管理内存,确保在此作用域内创建的对象能及时释
// 等待直到当前正在GPU上执行的命令缓冲区(_inflight_semaphore所标记的)完成了其GPU工作。
// 这里使用DISPATCH_TIME_FOREVER意味着会一直等待直到信号量可用(即GPU完成任务)
dispatch_semaphore_wait(_inflight_semaphore, DISPATCH_TIME_FOREVER);
// 判断是否成功为当前frame设置了所需的纹理
if ([self setupTexturesForFrame:frame]) {
// 如果设置成功,则调用render方法进行渲染
[self render];
} else {
// 如果设置失败,直接释放信号量,允许下一次drawFrame调用继续执行
// 这通常是因为frame数据不合法或资源不足等原因无法正确设置纹理
dispatch_semaphore_signal(_inflight_semaphore);
}
}
}
2.RTCMTLRenderer其他部分
// 导入一个私有的头文件,这个文件可能包含了RTCMTLRenderer的私有扩展方法或属性
#import "RTCMTLRenderer+Private.h"
// 导入Metal框架,用于图形和计算操作
// 导入MetalKit框架,提供了一些便利的类和方法来使用Metal
#import <Metal/Metal.h>
#import <MetalKit/MetalKit.h>
// 导入其他辅助类
#import "base/RTCLogging.h"
#import "base/RTCVideoFrame.h"
#import "base/RTCVideoFrameBuffer.h"
// 包含视频旋转的定义,这些定义可能在WebRTC的api中
#include "api/video/video_rotation.h"
#include "rtc_base/checks.h"
// 定义顶点着色器函数的名称,这将用于Metal着色器程序
// 定义片段着色器函数的名称,这也将用于Metal着色器程序
static NSString *const vertexFunctionName = @"vertexPassthrough";
static NSString *const fragmentFunctionName = @"fragmentColorConversion";
// 定义管线描述符的标签,用于标识和查找资源
// 定义命令缓冲区的标签
static NSString *const pipelineDescriptorLabel = @"RTCPipeline";
static NSString *const commandBufferLabel = @"RTCCommandBuffer";
// 定义渲染编码器的标签
// 定义渲染编码器的调试组名称
static NSString *const renderEncoderLabel = @"RTCEncoder";
static NSString *const renderEncoderDebugGroup = @"RTCDrawFrame";
// getCubeVertexData函数用于计算给定旋转和裁剪参数的纹理坐标。
// 它接受裁剪区域的位置和大小,帧的宽度和高度,旋转角度,和一个浮点型数组buffer来存储结果。
static inline void getCubeVertexData(int cropX,
int cropY,
int cropWidth,
int cropHeight,
size_t frameWidth,
size_t frameHeight,
RTCVideoRotation rotation,
float *buffer) {
// cropLeft和cropRight定义了水平方向上的裁剪起始和结束位置,归一化到[0, 1]区间
float cropLeft = cropX / (float)frameWidth;
float cropRight = (cropX + cropWidth) / (float)frameWidth;
// cropTop和cropBottom定义了垂直方向上的裁剪起始和结束位置,也归一化到[0, 1]区间
float cropTop = cropY / (float)frameHeight;
float cropBottom = (cropY + cropHeight) / (float)frameHeight;
// 根据旋转角度,计算并填充纹理坐标映射数组。
// 这个数组将视图坐标映射到纹理坐标,考虑了裁剪和旋转。
// ... // 根据rotation的值选择不同的case来填充values数组
switch (rotation) {
case RTCVideoRotation_0: {
float values[16] = {-1.0, -1.0, cropLeft, cropBottom,
1.0, -1.0, cropRight, cropBottom,
-1.0, 1.0, cropLeft, cropTop,
1.0, 1.0, cropRight, cropTop};
memcpy(buffer, &values, sizeof(values));
} break;
case RTCVideoRotation_90: {
float values[16] = {-1.0, -1.0, cropRight, cropBottom,
1.0, -1.0, cropRight, cropTop,
-1.0, 1.0, cropLeft, cropBottom,
1.0, 1.0, cropLeft, cropTop};
memcpy(buffer, &values, sizeof(values));
} break;
case RTCVideoRotation_180: {
float values[16] = {-1.0, -1.0, cropRight, cropTop,
1.0, -1.0, cropLeft, cropTop,
-1.0, 1.0, cropRight, cropBottom,
1.0, 1.0, cropLeft, cropBottom};
memcpy(buffer, &values, sizeof(values));
} break;
case RTCVideoRotation_270: {
float values[16] = {-1.0, -1.0, cropLeft, cropTop,
1.0, -1.0, cropLeft, cropBottom,
-1.0, 1.0, cropRight, cropTop,
1.0, 1.0, cropRight, cropBottom};
memcpy(buffer, &values, sizeof(values));
} break;
}
}
// 定义同时在GPU中处理的最大命令缓冲区数量,目前设置为1
// 将来可能会使用三重缓冲方法来提高性能
static const NSInteger kMaxInflightBuffers = 1;
// 以下是RTCMTLRenderer类的实现部分
@implementation RTCMTLRenderer {
// 定义了一些用于Metal渲染的成员变量
__kindof MTKView *_view;
dispatch_semaphore_t _inflight_semaphore;
// Renderer.
id<MTLDevice> _device;
id<MTLCommandQueue> _commandQueue;
id<MTLLibrary> _defaultLibrary;
id<MTLRenderPipelineState> _pipelineState;
// Buffers.
id<MTLBuffer> _vertexBuffer;
// Values affecting the vertex buffer. Stored for comparison to avoid unnecessary recreation.
int _oldFrameWidth;
int _oldFrameHeight;
int _oldCropWidth;
int _oldCropHeight;
int _oldCropX;
int _oldCropY;
RTCVideoRotation _oldRotation;
}
// 声明rotationOverride属性的setter和getter方法
@synthesize rotationOverride = _rotationOverride;
- (instancetype)init {
if (self = [super init]) {
// 创建一个同步信号量,用于控制最大同时在GPU中处理的命令缓冲区数量
_inflight_semaphore = dispatch_semaphore_create(kMaxInflightBuffers);
}
return self;
}
// 实现添加渲染目标的方法,这个方法尝试设置Metal环境并添加视图作为渲染目标
- (BOOL)addRenderingDestination:(__kindof MTKView *)view {
return [self setupWithView:view];
}
#pragma mark - Private
// 使用给定的视图设置渲染器
- (BOOL)setupWithView:(__kindof MTKView *)view {
BOOL success = NO;
if ([self setupMetal]) {
_view = view;
view.device = _device;
// 设置视图的首选帧率
view.preferredFramesPerSecond = 60;
// 设置视图是否自动调整可绘制区域的大小
view.autoResizeDrawable = NO;
// 加载着色器等资源
[self loadAssets];
// 初始化顶点缓冲区,并填充默认值
float vertexBufferArray[16] = {0};
_vertexBuffer = [_device newBufferWithBytes:vertexBufferArray
length:sizeof(vertexBufferArray)
options:MTLResourceCPUCacheModeWriteCombined];
success = YES;
}
return success;
}
#pragma mark - Inheritance// 以下是继承方法的实现部分
- (id<MTLDevice>)currentMetalDevice {// 获取当前使用的Metal设备
return _device;
}
// 获取着色器源代码,这个方法在子类中应该被实现
- (NSString *)shaderSource {
RTC_DCHECK_NOTREACHED() << "Virtual method not implemented in subclass.";
return nil;
}
// 将纹理上传到渲染编码器,这个方法在子类中也应该被实现
- (void)uploadTexturesToRenderEncoder:(id<MTLRenderCommandEncoder>)renderEncoder {
RTC_DCHECK_NOTREACHED() << "Virtual method not implemented in subclass.";
}
// 获取视频帧的尺寸和裁剪信息
- (void)getWidth:(int *)width
height:(int *)height
cropWidth:(int *)cropWidth
cropHeight:(int *)cropHeight
cropX:(int *)cropX
cropY:(int *)cropY
ofFrame:(nonnull RTC_OBJC_TYPE(RTCVideoFrame) *)frame {
RTC_DCHECK_NOTREACHED() << "Virtual method not implemented in subclass.";
}
// 为视频帧设置纹理
- (BOOL)setupTexturesForFrame:(nonnull RTC_OBJC_TYPE(RTCVideoFrame) *)frame {
// 如果设置了旋转覆盖,则使用它,否则使用帧的旋转值
RTCVideoRotation rotation;
NSValue *rotationOverride = self.rotationOverride;
if (rotationOverride) {
#if defined(__IPHONE_11_0) && defined(__IPHONE_OS_VERSION_MAX_ALLOWED) && \
(__IPHONE_OS_VERSION_MAX_ALLOWED >= __IPHONE_11_0)
if (@available(iOS 11, *)) {
[rotationOverride getValue:&rotation size:sizeof(rotation)];
} else
#endif
{
[rotationOverride getValue:&rotation];
}
} else {
rotation = frame.rotation;
}
int frameWidth, frameHeight, cropWidth, cropHeight, cropX, cropY;
[self getWidth:&frameWidth
height:&frameHeight
cropWidth:&cropWidth
cropHeight:&cropHeight
cropX:&cropX
cropY:&cropY
ofFrame:frame];
// 如果裁剪参数或旋转、帧尺寸发生变化,则重新计算纹理坐标并更新顶点缓冲区
if (cropX != _oldCropX || cropY != _oldCropY || cropWidth != _oldCropWidth ||
cropHeight != _oldCropHeight || rotation != _oldRotation || frameWidth != _oldFrameWidth ||
frameHeight != _oldFrameHeight) {
getCubeVertexData(cropX,
cropY,
cropWidth,
cropHeight,
frameWidth,
frameHeight,
rotation,
(float *)_vertexBuffer.contents);
_oldCropX = cropX;
_oldCropY = cropY;
_oldCropWidth = cropWidth;
_oldCropHeight = cropHeight;
_oldRotation = rotation;
// 设置纹理方法
_oldFrameWidth = frameWidth;
_oldFrameHeight = frameHeight;
}
return YES;
}
#pragma mark - GPU methods
// 以下方法涉及到GPU操作
// setupMetal方法用于设置Metal环境
- (BOOL)setupMetal {
// Set the view to use the default device.
_device = MTLCreateSystemDefaultDevice();
if (!_device) {
return NO;
}
// 使用获取到的设备创建一个新的命令队列
_commandQueue = [_device newCommandQueue];
// 从源代码加载Metal着色器库,如果加载失败则输出错误并返回NO
NSError *libraryError = nil;
NSString *shaderSource = [self shaderSource];
id<MTLLibrary> sourceLibrary =
[_device newLibraryWithSource:shaderSource options:NULL error:&libraryError];
if (libraryError) {
RTCLogError(@"Metal: Library with source failed\n%@", libraryError);
return NO;
}
if (!sourceLibrary) {
RTCLogError(@"Metal: Failed to load library. %@", libraryError);
return NO;
}
_defaultLibrary = sourceLibrary;
return YES;
}
// loadAssets方法用于加载着色器函数和创建渲染管线状态
- (void)loadAssets {
id<MTLFunction> vertexFunction = [_defaultLibrary newFunctionWithName:vertexFunctionName];
id<MTLFunction> fragmentFunction = [_defaultLibrary newFunctionWithName:fragmentFunctionName];
// 创建渲染管线描述符并设置相关属性
MTLRenderPipelineDescriptor *pipelineDescriptor = [[MTLRenderPipelineDescriptor alloc] init];
pipelineDescriptor.label = pipelineDescriptorLabel;
pipelineDescriptor.vertexFunction = vertexFunction;
pipelineDescriptor.fragmentFunction = fragmentFunction;
pipelineDescriptor.colorAttachments[0].pixelFormat = _view.colorPixelFormat; // 设置颜色附件的像素格式
pipelineDescriptor.depthAttachmentPixelFormat = MTLPixelFormatInvalid;// 无深度附件
NSError *error = nil;
// 创建渲染管线状态,如果失败则输出错误
_pipelineState = [_device newRenderPipelineStateWithDescriptor:pipelineDescriptor error:&error];
if (!_pipelineState) {
RTCLogError(@"Metal: Failed to create pipeline state. %@", error);
}
}
//渲染部分本文最前面讲过了
- (void)render {
id<MTLCommandBuffer> commandBuffer = [_commandQueue commandBuffer];
commandBuffer.label = commandBufferLabel;
__block dispatch_semaphore_t block_semaphore = _inflight_semaphore;
[commandBuffer addCompletedHandler:^(id<MTLCommandBuffer> _Nonnull) {
// GPU work completed.
dispatch_semaphore_signal(block_semaphore);
}];
MTLRenderPassDescriptor *renderPassDescriptor = _view.currentRenderPassDescriptor;
if (renderPassDescriptor) { // Valid drawable.
id<MTLRenderCommandEncoder> renderEncoder =
[commandBuffer renderCommandEncoderWithDescriptor:renderPassDescriptor];
renderEncoder.label = renderEncoderLabel;
[renderEncoder pushDebugGroup:renderEncoderDebugGroup];
[renderEncoder setRenderPipelineState:_pipelineState];
[renderEncoder setVertexBuffer:_vertexBuffer offset:0 atIndex:0];
[self uploadTexturesToRenderEncoder:renderEncoder];
[renderEncoder drawPrimitives:MTLPrimitiveTypeTriangleStrip
vertexStart:0
vertexCount:4
instanceCount:1];
[renderEncoder popDebugGroup];
[renderEncoder endEncoding];
[commandBuffer presentDrawable:_view.currentDrawable];
}
// CPU work is completed, GPU work can be started.
[commandBuffer commit];
}
#pragma mark - RTCMTLRenderer
//drawFrame方法用于绘制传入的视频帧
- (void)drawFrame:(RTC_OBJC_TYPE(RTCVideoFrame) *)frame {
@autoreleasepool {
// Wait until the inflight (curently sent to GPU) command buffer
// has completed the GPU work.
dispatch_semaphore_wait(_inflight_semaphore, DISPATCH_TIME_FOREVER);
// 如果成功为帧设置纹理,则进行渲染,否则发送信号量
if ([self setupTexturesForFrame:frame]) {
[self render];
} else {
dispatch_semaphore_signal(_inflight_semaphore);
}
}
}
@end