4 <meta name="viewport" content="width=1000">
5 <title>WebGPU Cube demo</title>
6 <script src="scripts/gl-matrix-min.js"></script>
7 <link rel="stylesheet" href="css/style.css"/>
12 const positionAttributeNum = 0;
13 const texCoordsAttributeNum = 1;
15 const transformBindingNum = 0;
16 const textureBindingNum = 1;
17 const samplerBindingNum = 2;
19 const vertexBufferIndex = 0;
20 const bindGroupIndex = 0;
23 #include <metal_stdlib>
25 using namespace metal;
29 float4 position [[attribute(${positionAttributeNum})]];
30 float2 texCoords [[attribute(${texCoordsAttributeNum})]];
35 float4 position [[position]];
41 device float4x4* modelViewProjectionMatrix [[id(${transformBindingNum})]];
46 texture2d<float> faceTexture [[id(${textureBindingNum})]];
47 sampler faceSampler [[id(${samplerBindingNum})]];
50 vertex FragmentData vertex_main(Vertex vertexIn [[stage_in]],
51 const device Uniform& uniforms [[buffer(${bindGroupIndex})]])
54 output.position = uniforms.modelViewProjectionMatrix[0] * vertexIn.position;
55 output.texCoords = vertexIn.texCoords;
60 fragment float4 fragment_main(FragmentData data [[stage_in]],
61 const device SampledTexture& args [[buffer(${bindGroupIndex})]])
63 float4 color = args.faceTexture.sample(args.faceSampler, data.texCoords);
71 let device, swapChain, verticesBuffer, bindGroupLayout, pipeline, renderPassDescriptor, queue, textureViewBinding, samplerBinding;
72 let projectionMatrix = mat4.create();
74 const texCoordsOffset = 4 * 4;
75 const vertexSize = 4 * 6;
76 function createVerticesArray() {
77 return new Float32Array([
78 // float4 position, float2 texCoords
123 async function init() {
124 const adapter = await navigator.gpu.requestAdapter();
125 device = await adapter.requestDevice();
127 const canvas = document.querySelector('canvas');
128 let canvasSize = canvas.getBoundingClientRect();
129 canvas.width = canvasSize.width;
130 canvas.height = canvasSize.height;
132 const aspect = Math.abs(canvas.width / canvas.height);
133 mat4.perspective(projectionMatrix, (2 * Math.PI) / 5, aspect, 1, 100.0);
135 const context = canvas.getContext('gpu');
137 const swapChainDescriptor = {
141 swapChain = context.configureSwapChain(swapChainDescriptor);
143 // WebKit WebGPU accepts only MSL for now.
144 const shaderModuleDescriptor = { code: shader };
145 const shaderModule = device.createShaderModule(shaderModuleDescriptor);
147 const verticesArray = createVerticesArray();
148 const verticesBufferDescriptor = {
149 size: verticesArray.byteLength,
150 usage: GPUBufferUsage.VERTEX | GPUBufferUsage.TRANSFER_DST
152 verticesBuffer = device.createBuffer(verticesBufferDescriptor);
153 verticesBuffer.setSubData(0, verticesArray.buffer);
155 // Input state. Model will change soon to adopt one of https://github.com/kainino0x/gpuweb/pull/2/'s ideas.
156 const positionAttributeDescriptor = {
157 shaderLocation: positionAttributeNum, // [[attribute(0)]].
158 inputSlot: vertexBufferIndex, // Used as vertex buffer index in Metal.
162 const texCoordsAttributeDescriptor = {
163 shaderLocation: texCoordsAttributeNum,
164 inputSlot: vertexBufferIndex,
165 offset: texCoordsOffset,
168 const vertexBufferDescriptor = {
169 inputSlot: vertexBufferIndex,
173 const inputStateDescriptor = {
174 indexFormat: "uint32",
175 attributes: [positionAttributeDescriptor, texCoordsAttributeDescriptor],
176 inputs: [vertexBufferDescriptor]
181 // Load texture image
182 const image = new Image();
183 const imageLoadPromise = new Promise(resolve => {
184 image.onload = () => resolve();
185 image.src = "resources/safari-alpha.png"
187 await Promise.resolve(imageLoadPromise);
189 const textureSize = {
191 height: image.height,
195 const textureDescriptor = {
201 format: "rgba8unorm",
202 usage: GPUTextureUsage.TRANSFER_DST | GPUTextureUsage.SAMPLED
204 const texture = device.createTexture(textureDescriptor);
207 const canvas2d = document.createElement('canvas');
208 canvas2d.width = image.width;
209 canvas2d.height = image.height;
210 const context2d = canvas2d.getContext('2d');
211 context2d.drawImage(image, 0, 0);
213 const imageData = context2d.getImageData(0, 0, image.width, image.height);
215 const textureDataBufferDescriptor = {
216 size: imageData.data.length,
217 usage: GPUBufferUsage.TRANSFER_SRC | GPUBufferUsage.TRANSFER_DST
219 const textureDataBuffer = device.createBuffer(textureDataBufferDescriptor);
220 textureDataBuffer.setSubData(0, imageData.data.buffer);
222 const dataCopyView = {
223 buffer: textureDataBuffer,
225 rowPitch: image.width * 4,
228 const textureCopyView = {
232 origin: { x: 0, y: 0, z: 0 }
235 const blitCommandEncoder = device.createCommandEncoder();
236 blitCommandEncoder.copyBufferToTexture(dataCopyView, textureCopyView, textureSize);
238 queue = device.getQueue();
240 queue.submit([blitCommandEncoder.finish()]);
242 // Bind group binding layout
243 const transformBufferBindGroupLayoutBinding = {
244 binding: transformBindingNum, // id[[(0)]]
245 visibility: GPUShaderStageBit.VERTEX,
246 type: "uniform-buffer"
249 const textureBindGroupLayoutBinding = {
250 binding: textureBindingNum,
251 visibility: GPUShaderStageBit.FRAGMENT,
252 type: "sampled-texture"
254 textureViewBinding = {
255 binding: textureBindingNum,
256 resource: texture.createDefaultView()
259 const samplerBindGroupLayoutBinding = {
260 binding: samplerBindingNum,
261 visibility: GPUShaderStageBit.FRAGMENT,
265 binding: samplerBindingNum,
266 resource: device.createSampler({})
269 const bindGroupLayoutDescriptor = {
270 bindings: [transformBufferBindGroupLayoutBinding, textureBindGroupLayoutBinding, samplerBindGroupLayoutBinding]
272 bindGroupLayout = device.createBindGroupLayout(bindGroupLayoutDescriptor);
275 const depthStateDescriptor = {
276 depthWriteEnabled: true,
280 const pipelineLayoutDescriptor = { bindGroupLayouts: [bindGroupLayout] };
281 const pipelineLayout = device.createPipelineLayout(pipelineLayoutDescriptor);
282 const vertexStageDescriptor = {
283 module: shaderModule,
284 entryPoint: "vertex_main"
286 const fragmentStageDescriptor = {
287 module: shaderModule,
288 entryPoint: "fragment_main"
291 format: "bgra8unorm",
293 srcFactor: "src-alpha",
294 dstFactor: "one-minus-src-alpha",
298 srcFactor: "src-alpha",
299 dstFactor: "one-minus-src-alpha",
302 writeMask: GPUColorWriteBits.ALL
304 const pipelineDescriptor = {
305 layout: pipelineLayout,
307 vertexStage: vertexStageDescriptor,
308 fragmentStage: fragmentStageDescriptor,
310 primitiveTopology: "triangle-list",
311 colorStates: [colorState],
312 depthStencilState: depthStateDescriptor,
313 inputState: inputStateDescriptor
315 pipeline = device.createRenderPipeline(pipelineDescriptor);
317 let colorAttachment = {
318 // attachment is acquired in render loop.
321 clearColor: { r: 0.5, g: 1.0, b: 1.0, a: 1.0 } // GPUColor
324 // Depth stencil texture
329 height: canvas.height,
333 const depthTextureDescriptor = {
339 format: "depth32float-stencil8",
340 usage: GPUTextureUsage.OUTPUT_ATTACHMENT
343 const depthTexture = device.createTexture(depthTextureDescriptor);
345 // GPURenderPassDepthStencilAttachmentDescriptor
346 const depthAttachment = {
347 attachment: depthTexture.createDefaultView(),
348 depthLoadOp: "clear",
349 depthStoreOp: "store",
353 renderPassDescriptor = {
354 colorAttachments: [colorAttachment],
355 depthStencilAttachment: depthAttachment
361 /* Transform Buffers and Bindings */
362 const transformSize = 4 * 16;
363 function updateTransformArray(array) {
364 let viewMatrix = mat4.create();
365 mat4.translate(viewMatrix, viewMatrix, vec3.fromValues(0, 0, -5));
366 let now = Date.now() / 1000;
367 mat4.rotate(viewMatrix, viewMatrix, 1, vec3.fromValues(Math.sin(now), Math.cos(now), 0));
368 let modelViewProjectionMatrix = mat4.create();
369 mat4.multiply(modelViewProjectionMatrix, projectionMatrix, viewMatrix);
370 for (let i = 0; i < 16; i++) {
371 array[i] = modelViewProjectionMatrix[i];
375 const transformBufferDescriptor = {
377 usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.MAP_WRITE
380 function createBindGroupDescriptor(transformBuffer, textureViewBinding, samplerBinding) {
381 const transformBufferBinding = {
382 buffer: transformBuffer,
386 const transformBufferBindGroupBinding = {
387 binding: transformBindingNum,
388 resource: transformBufferBinding
391 layout: bindGroupLayout,
392 bindings: [transformBufferBindGroupBinding, textureViewBinding, samplerBinding]
396 let mappedGroups = [];
399 if (mappedGroups.length == 0) {
400 const buffer = device.createBuffer(transformBufferDescriptor);
401 buffer.mapWriteAsync().then(arrayBuffer => {
402 const group = device.createBindGroup(createBindGroupDescriptor(buffer, textureViewBinding, samplerBinding));
404 let mappedGroup = { buffer: buffer, arrayBuffer: arrayBuffer, bindGroup: group };
405 drawCommands(mappedGroup);
408 drawCommands(mappedGroups.shift());
411 function drawCommands(mappedGroup) {
412 updateTransformArray(new Float32Array(mappedGroup.arrayBuffer));
413 mappedGroup.buffer.unmap();
415 const commandEncoder = device.createCommandEncoder();
416 renderPassDescriptor.colorAttachments[0].attachment = swapChain.getCurrentTexture().createDefaultView();
417 const passEncoder = commandEncoder.beginRenderPass(renderPassDescriptor);
418 // Encode drawing commands.
419 passEncoder.setPipeline(pipeline);
421 passEncoder.setVertexBuffers(vertexBufferIndex, [verticesBuffer], [0]);
423 passEncoder.setBindGroup(bindGroupIndex, mappedGroup.bindGroup);
424 passEncoder.draw(36, 1, 0, 0);
425 passEncoder.endPass();
427 queue.submit([commandEncoder.finish()]);
429 // Ready the current buffer for update after GPU is done with it.
430 mappedGroup.buffer.mapWriteAsync().then((arrayBuffer) => {
431 mappedGroup.arrayBuffer = arrayBuffer;
432 mappedGroups.push(mappedGroup);
435 requestAnimationFrame(render);