代码分析说明:
import { makeSample, SampleInit } from '../../components/SampleLayout';
import fullscreenTexturedQuadWGSL from '../../shaders/fullscreenTexturedQuad.wgsl';
import sampleExternalTextureWGSL from '../../shaders/sampleExternalTexture.frag.wgsl';
const init: SampleInit = async ({ canvas, pageState, gui }) => {
// Set video element
const video = document.createElement('video');
video.loop = true;
video.autoplay = true;
video.muted = true;
video.src = '../assets/video/pano.webm';
await video.play();
const adapter = await navigator.gpu.requestAdapter();
const device = await adapter.requestDevice();
if (!pageState.active) return;
const context = canvas.getContext('webgpu') as GPUCanvasContext;
const devicePixelRatio = window.devicePixelRatio;
canvas.width = canvas.clientWidth * devicePixelRatio;
canvas.height = canvas.clientHeight * devicePixelRatio;
const presentationFormat = navigator.gpu.getPreferredCanvasFormat();
context.configure({
device,
format: presentationFormat,
alphaMode: 'premultiplied',
});
const pipeline = device.createRenderPipeline({
layout: 'auto',
vertex: {
module: device.createShaderModule({
code: fullscreenTexturedQuadWGSL,
}),
entryPoint: 'vert_main',
},
fragment: {
module: device.createShaderModule({
code: sampleExternalTextureWGSL,
}),
entryPoint: 'main',
targets: [
{
format: presentationFormat,
},
],
},
primitive: {
topology: 'triangle-list',
},
});
const sampler = device.createSampler({
magFilter: 'linear',
minFilter: 'linear',
});
const settings = {
requestFrame: 'requestAnimationFrame',
};
gui.add(settings, 'requestFrame', [
'requestAnimationFrame',
'requestVideoFrameCallback',
]);
function frame() {
// Sample is no longer the active page.
if (!pageState.active) return;
// 视频帧对象
const videoFrame = new VideoFrame(video);
const uniformBindGroup = device.createBindGroup({
layout: pipeline.getBindGroupLayout(0),
entries: [
{
binding: 1,
resource: sampler,
},
{
binding: 2,
resource: device.importExternalTexture({
// 传递视频帧对象
source: videoFrame as any, // eslint-disable-line @typescript-eslint/no-explicit-any
}),
},
],
});
const commandEncoder = device.createCommandEncoder();
const textureView = context.getCurrentTexture().createView();
const renderPassDescriptor: GPURenderPassDescriptor = {
colorAttachments: [
{
view: textureView,
clearValue: { r: 0.0, g: 0.0, b: 0.0, a: 1.0 },
loadOp: 'clear',
storeOp: 'store',
},
],
};
const passEncoder = commandEncoder.beginRenderPass(renderPassDescriptor);
passEncoder.setPipeline(pipeline);
passEncoder.setBindGroup(0, uniformBindGroup);
passEncoder.draw(6);
passEncoder.end();
device.queue.submit([commandEncoder.finish()]);
if (settings.requestFrame == 'requestVideoFrameCallback') {
video.requestVideoFrameCallback(frame);
} else {
requestAnimationFrame(frame);
}
}
};
顶点着色器
@group(0) @binding(0) var mySampler : sampler;
@group(0) @binding(1) var myTexture : texture_2d<f32>;
struct VertexOutput {
@builtin(position) Position : vec4<f32>,
@location(0) fragUV : vec2<f32>,
}
@vertex
fn vert_main(@builtin(vertex_index) VertexIndex : u32) -> VertexOutput {
const pos = array(
vec2( 1.0, 1.0),
vec2( 1.0, -1.0),
vec2(-1.0, -1.0),
vec2( 1.0, 1.0),
vec2(-1.0, -1.0),
vec2(-1.0, 1.0),
);
const uv = array(
vec2(1.0, 0.0),
vec2(1.0, 1.0),
vec2(0.0, 1.0),
vec2(1.0, 0.0),
vec2(0.0, 1.0),
vec2(0.0, 0.0),
);
var output : VertexOutput;
output.Position = vec4(pos[VertexIndex], 0.0, 1.0);
output.fragUV = uv[VertexIndex];
return output;
}
@fragment
fn frag_main(@location(0) fragUV : vec2<f32>) -> @location(0) vec4<f32> {
return textureSample(myTexture, mySampler, fragUV);
}
片元着色器
@group(0) @binding(1) var mySampler: sampler;
@group(0) @binding(2) var myTexture: texture_external;
@fragment
fn main(@location(0) fragUV : vec2<f32>) -> @location(0) vec4<f32> {
return textureSampleBaseClampToEdge(myTexture, mySampler, fragUV);
}
总结步骤
- 在
createBindGroup
的传参中,视频资源也可以传递VideoFrame
类型,通过这种类型可以获取每一帧的相关信息 - 其他的与上一个视频的编码一致