[Web GPU] Indexed drawing and GPUCommandEncoder crash prevention
[WebKit-https.git] / Source / WebCore / platform / graphics / gpu / cocoa / GPURenderPassEncoderMetal.mm
1 /*
2  * Copyright (C) 2018 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
14  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
15  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
17  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
18  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
19  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
20  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
21  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
22  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
23  * THE POSSIBILITY OF SUCH DAMAGE.
24  */
25
26 #import "config.h"
27 #import "GPURenderPassEncoder.h"
28
29 #if ENABLE(WEBGPU)
30
31 #import "GPUBuffer.h"
32 #import "GPUColor.h"
33 #import "GPUCommandBuffer.h"
34 #import "GPURenderPassDescriptor.h"
35 #import "GPURenderPipeline.h"
36 #import "Logging.h"
37 #import "WHLSLVertexBufferIndexCalculator.h"
38 #import <Foundation/Foundation.h>
39 #import <Metal/Metal.h>
40 #import <wtf/BlockObjCExceptions.h>
41 #import <wtf/CheckedArithmetic.h>
42
43 namespace WebCore {
44
45 static MTLLoadAction loadActionForGPULoadOp(GPULoadOp op)
46 {
47     switch (op) {
48     case GPULoadOp::Clear:
49         return MTLLoadActionClear;
50     case GPULoadOp::Load:
51         return MTLLoadActionLoad;
52     }
53
54     ASSERT_NOT_REACHED();
55 }
56
57 static MTLStoreAction storeActionForGPUStoreOp(GPUStoreOp op)
58 {
59     switch (op) {
60     case GPUStoreOp::Store:
61         return MTLStoreActionStore;
62     }
63
64     ASSERT_NOT_REACHED();
65 }
66
67 static bool populateMtlColorAttachmentsArray(MTLRenderPassColorAttachmentDescriptorArray *array, const Vector<GPURenderPassColorAttachmentDescriptor>& descriptors, const char* const functionName)
68 {
69 #if LOG_DISABLED
70     UNUSED_PARAM(functionName);
71 #endif
72
73     for (unsigned i = 0; i < descriptors.size(); ++i) {
74         const auto& descriptor = descriptors[i];
75         if (!descriptor.attachment->platformTexture()) {
76             LOG(WebGPU, "%s: Invalid MTLTexture for color attachment %u!", functionName, i);
77             return false;
78         }
79         const auto& color = descriptor.clearColor;
80
81         BEGIN_BLOCK_OBJC_EXCEPTIONS;
82
83         auto mtlAttachment = retainPtr([array objectAtIndexedSubscript:i]);
84         [mtlAttachment setTexture:descriptor.attachment->platformTexture()];
85         [mtlAttachment setClearColor:MTLClearColorMake(color.r, color.g, color.b, color.a)];
86         [mtlAttachment setLoadAction:loadActionForGPULoadOp(descriptor.loadOp)];
87         [mtlAttachment setStoreAction:storeActionForGPUStoreOp(descriptor.storeOp)];
88
89         END_BLOCK_OBJC_EXCEPTIONS;
90     }
91
92     return true;
93 }
94
95 static bool populateMtlDepthStencilAttachment(MTLRenderPassDepthAttachmentDescriptor *mtlAttachment, const GPURenderPassDepthStencilAttachmentDescriptor& descriptor, const char* const functionName)
96 {
97 #if LOG_DISABLED
98     UNUSED_PARAM(functionName);
99 #endif
100
101     if (!descriptor.attachment->platformTexture()) {
102         LOG(WebGPU, "%s: Invalid MTLTexture for depth attachment!", functionName);
103         return false;
104     }
105
106     BEGIN_BLOCK_OBJC_EXCEPTIONS;
107
108     [mtlAttachment setTexture:descriptor.attachment->platformTexture()];
109     [mtlAttachment setClearDepth:descriptor.clearDepth];
110     [mtlAttachment setLoadAction:loadActionForGPULoadOp(descriptor.depthLoadOp)];
111     [mtlAttachment setStoreAction:storeActionForGPUStoreOp(descriptor.depthStoreOp)];
112
113     END_BLOCK_OBJC_EXCEPTIONS;
114
115     return true;
116 }
117
118 static void useAttachments(GPUCommandBuffer& buffer, GPURenderPassDescriptor&& descriptor)
119 {
120     for (auto& colorAttachment : descriptor.colorAttachments)
121         buffer.useTexture(WTFMove(colorAttachment.attachment));
122     if (descriptor.depthStencilAttachment)
123         buffer.useTexture(WTFMove((*descriptor.depthStencilAttachment).attachment));
124 }
125
126 RefPtr<GPURenderPassEncoder> GPURenderPassEncoder::tryCreate(Ref<GPUCommandBuffer>&& buffer, GPURenderPassDescriptor&& descriptor)
127 {
128     const char* const functionName = "GPURenderPassEncoder::tryCreate()";
129
130     // Only one command encoder may be active at a time.
131     if (buffer->isEncodingPass()) {
132         LOG(WebGPU, "%s: Existing pass encoder must be ended first!");
133         return nullptr;
134     }
135
136     RetainPtr<MTLRenderPassDescriptor> mtlDescriptor;
137
138     BEGIN_BLOCK_OBJC_EXCEPTIONS;
139
140     mtlDescriptor = adoptNS([MTLRenderPassDescriptor new]);
141
142     END_BLOCK_OBJC_EXCEPTIONS;
143
144     if (!mtlDescriptor) {
145         LOG(WebGPU, "%s: Unable to create MTLRenderPassDescriptor!", functionName);
146         return nullptr;
147     }
148
149     if (!populateMtlColorAttachmentsArray(mtlDescriptor.get().colorAttachments, descriptor.colorAttachments, functionName))
150         return nullptr;
151
152     if (descriptor.depthStencilAttachment
153         && !populateMtlDepthStencilAttachment(mtlDescriptor.get().depthAttachment, *descriptor.depthStencilAttachment, functionName))
154         return nullptr;
155
156     buffer->endBlitEncoding();
157
158     RetainPtr<MTLRenderCommandEncoder> mtlEncoder;
159
160     BEGIN_BLOCK_OBJC_EXCEPTIONS;
161
162     mtlEncoder = [buffer->platformCommandBuffer() renderCommandEncoderWithDescriptor:mtlDescriptor.get()];
163
164     END_BLOCK_OBJC_EXCEPTIONS;
165
166     if (!mtlEncoder) {
167         LOG(WebGPU, "%s: Unable to create MTLRenderCommandEncoder!", functionName);
168         return nullptr;
169     }
170
171     // All is well; ensure GPUCommandBuffer is aware of new attachments.
172     useAttachments(buffer, WTFMove(descriptor));
173     
174     return adoptRef(new GPURenderPassEncoder(WTFMove(buffer), WTFMove(mtlEncoder)));
175 }
176
177 GPURenderPassEncoder::GPURenderPassEncoder(Ref<GPUCommandBuffer>&& commandBuffer, RetainPtr<MTLRenderCommandEncoder>&& encoder)
178     : GPUProgrammablePassEncoder(WTFMove(commandBuffer))
179     , m_platformRenderPassEncoder(WTFMove(encoder))
180 {
181 }
182
183 const MTLCommandEncoder *GPURenderPassEncoder::platformPassEncoder() const
184 {
185     return m_platformRenderPassEncoder.get();
186 }
187
188 void GPURenderPassEncoder::setPipeline(Ref<const GPURenderPipeline>&& pipeline)
189 {
190     if (!m_platformRenderPassEncoder) {
191         LOG(WebGPU, "GPURenderPassEncoder::setPipeline(): Invalid operation: Encoding is ended!");
192         return;
193     }
194
195     // FIXME: Metal throws an error if the MTLPipelineState's attachment formats do not match the MTLCommandEncoder's attachment formats.
196
197     BEGIN_BLOCK_OBJC_EXCEPTIONS;
198
199     if (pipeline->depthStencilState())
200         [m_platformRenderPassEncoder setDepthStencilState:pipeline->depthStencilState()];
201
202     [m_platformRenderPassEncoder setRenderPipelineState:pipeline->platformRenderPipeline()];
203
204     END_BLOCK_OBJC_EXCEPTIONS;
205
206     m_pipeline = WTFMove(pipeline);
207 }
208
209 void GPURenderPassEncoder::setBlendColor(const GPUColor& color)
210 {
211     if (!m_platformRenderPassEncoder) {
212         LOG(WebGPU, "GPURenderPassEncoder::setBlendColor(): Invalid operation: Encoding is ended!");
213         return;
214     }
215
216     BEGIN_BLOCK_OBJC_EXCEPTIONS;
217     [m_platformRenderPassEncoder setBlendColorRed:color.r green:color.g blue:color.b alpha:color.a];
218     END_BLOCK_OBJC_EXCEPTIONS;
219 }
220
221 void GPURenderPassEncoder::setViewport(float x, float y, float width, float height, float minDepth, float maxDepth)
222 {
223     if (!m_platformRenderPassEncoder) {
224         LOG(WebGPU, "GPURenderPassEncoder::setViewport(): Invalid operation: Encoding is ended!");
225         return;
226     }
227
228     BEGIN_BLOCK_OBJC_EXCEPTIONS;
229     [m_platformRenderPassEncoder setViewport: { x, y, width, height, minDepth, maxDepth }];
230     END_BLOCK_OBJC_EXCEPTIONS;
231 }
232
233 void GPURenderPassEncoder::setScissorRect(unsigned x, unsigned y, unsigned width, unsigned height)
234 {
235     if (!m_platformRenderPassEncoder) {
236         LOG(WebGPU, "GPURenderPassEncoder::setScissorRect(): Invalid operation: Encoding is ended!");
237         return;
238     }
239
240     BEGIN_BLOCK_OBJC_EXCEPTIONS;
241     [m_platformRenderPassEncoder setScissorRect: { x, y, width, height }];
242     END_BLOCK_OBJC_EXCEPTIONS;
243 }
244
245 void GPURenderPassEncoder::setIndexBuffer(GPUBuffer& buffer, uint64_t offset)
246 {
247     if (!m_platformRenderPassEncoder) {
248         LOG(WebGPU, "GPURenderPassEncoder::setIndexBuffer(): Invalid operation: Encoding is ended!");
249         return;
250     }
251     if (offset >= buffer.byteLength() || offset % 4) {
252         LOG(WebGPU, "GPURenderPassEncoder::setIndexBuffer(): Invalid offset!");
253         return;
254     }
255     ASSERT(buffer.platformBuffer());
256     // Buffer must be cached to provide it to Metal via drawIndexedPrimitives.
257     m_indexBuffer = makeRefPtr(buffer);
258     m_indexBufferOffset = offset;
259 }
260
261 void GPURenderPassEncoder::setVertexBuffers(unsigned index, const Vector<Ref<GPUBuffer>>& buffers, const Vector<uint64_t>& offsets)
262 {
263     if (!m_platformRenderPassEncoder) {
264         LOG(WebGPU, "GPURenderPassEncoder::setVertexBuffers(): Invalid operation: Encoding is ended!");
265         return;
266     }
267
268     ASSERT(buffers.size() && offsets.size() == buffers.size());
269
270     BEGIN_BLOCK_OBJC_EXCEPTIONS;
271
272     auto mtlBuffers = buffers.map([this] (auto& buffer) {
273         commandBuffer().useBuffer(buffer.copyRef());
274         ASSERT(buffer->platformBuffer());
275         return buffer->platformBuffer();
276     });
277
278     auto indexRanges = NSMakeRange(WHLSL::Metal::calculateVertexBufferIndex(index), buffers.size());
279
280     [m_platformRenderPassEncoder setVertexBuffers:mtlBuffers.data() offsets:(const NSUInteger *)offsets.data() withRange:indexRanges];
281
282     END_BLOCK_OBJC_EXCEPTIONS;
283 }
284
285 static MTLPrimitiveType mtlPrimitiveTypeForGPUPrimitiveTopology(GPUPrimitiveTopology type)
286 {
287     switch (type) {
288     case GPUPrimitiveTopology::PointList:
289         return MTLPrimitiveTypePoint;
290     case GPUPrimitiveTopology::LineList:
291         return MTLPrimitiveTypeLine;
292     case GPUPrimitiveTopology::LineStrip:
293         return MTLPrimitiveTypeLineStrip;
294     case GPUPrimitiveTopology::TriangleList:
295         return MTLPrimitiveTypeTriangle;
296     case GPUPrimitiveTopology::TriangleStrip:
297         return MTLPrimitiveTypeTriangleStrip;
298     }
299
300     ASSERT_NOT_REACHED();
301 }
302
303 void GPURenderPassEncoder::draw(unsigned vertexCount, unsigned instanceCount, unsigned firstVertex, unsigned firstInstance)
304 {
305     if (!m_platformRenderPassEncoder) {
306         LOG(WebGPU, "GPURenderPassEncoder::draw(): Invalid operation: Encoding is ended!");
307         return;
308     }
309     if (!m_pipeline) {
310         LOG(WebGPU, "GPURenderPassEncoder::draw(): No valid GPURenderPipeline found!");
311         return;
312     }
313
314     BEGIN_BLOCK_OBJC_EXCEPTIONS;
315     [m_platformRenderPassEncoder 
316         drawPrimitives:mtlPrimitiveTypeForGPUPrimitiveTopology(m_pipeline->primitiveTopology())
317         vertexStart:firstVertex
318         vertexCount:vertexCount
319         instanceCount:instanceCount
320         baseInstance:firstInstance];
321     END_BLOCK_OBJC_EXCEPTIONS;
322 }
323
324 static MTLIndexType mtlIndexTypeForGPUIndexFormat(GPUIndexFormat format)
325 {
326     switch (format) {
327     case GPUIndexFormat::Uint16:
328         return MTLIndexTypeUInt16;
329     case GPUIndexFormat::Uint32:
330         return MTLIndexTypeUInt32;
331     }
332
333     ASSERT_NOT_REACHED();
334 }
335
336 void GPURenderPassEncoder::drawIndexed(unsigned indexCount, unsigned instanceCount, unsigned firstIndex, int baseVertex, unsigned firstInstance)
337 {
338 #if !LOG_DISABLED
339     const char* const functionName = "GPURenderPassEncoder::drawIndexed()";
340 #endif
341     if (!m_platformRenderPassEncoder) {
342         LOG(WebGPU, "%s: Invalid operation: Encoding is ended!", functionName);
343         return;
344     }
345     if (!m_pipeline) {
346         LOG(WebGPU, "%s: No valid GPURenderPipeline found!", functionName);
347         return;
348     }
349     if (!m_pipeline->indexFormat()) {
350         LOG(WebGPU, "%s: No GPUIndexFormat specified!", functionName);
351         return;
352     }
353     if (!m_indexBuffer || !m_indexBuffer->platformBuffer()) {
354         LOG(WebGPU, "%s: No valid index buffer set!", functionName);
355         return;
356     }
357
358     auto indexByteSize = (m_pipeline->indexFormat() == GPUIndexFormat::Uint16) ? sizeof(uint16_t) : sizeof(uint32_t);
359     uint64_t firstIndexOffset = firstIndex * indexByteSize;
360     auto totalOffset = checkedSum<uint64_t>(firstIndexOffset, m_indexBufferOffset);
361     if (totalOffset.hasOverflowed() || totalOffset >= m_indexBuffer->byteLength()) {
362         LOG(WebGPU, "%s: Invalid firstIndex!", functionName);
363         return;
364     }
365
366     commandBuffer().useBuffer(makeRef(*m_indexBuffer));
367
368     BEGIN_BLOCK_OBJC_EXCEPTIONS;
369     [m_platformRenderPassEncoder
370         drawIndexedPrimitives:mtlPrimitiveTypeForGPUPrimitiveTopology(m_pipeline->primitiveTopology())
371         indexCount:indexCount
372         indexType:mtlIndexTypeForGPUIndexFormat(*m_pipeline->indexFormat())
373         indexBuffer:m_indexBuffer->platformBuffer()
374         indexBufferOffset:totalOffset.unsafeGet()
375         instanceCount:instanceCount
376         baseVertex:baseVertex
377         baseInstance:firstInstance];
378     END_BLOCK_OBJC_EXCEPTIONS;
379 }
380
381 #if USE(METAL)
382
383 void GPURenderPassEncoder::useResource(const MTLResource *resource, unsigned usage)
384 {
385     ASSERT(m_platformRenderPassEncoder);
386
387     BEGIN_BLOCK_OBJC_EXCEPTIONS;
388     [m_platformRenderPassEncoder useResource:resource usage:usage];
389     END_BLOCK_OBJC_EXCEPTIONS;
390 }
391
392 void GPURenderPassEncoder::setVertexBuffer(const MTLBuffer *buffer, unsigned offset, unsigned index)
393 {
394     ASSERT(m_platformRenderPassEncoder);
395
396     BEGIN_BLOCK_OBJC_EXCEPTIONS;
397     [m_platformRenderPassEncoder setVertexBuffer:buffer offset:offset atIndex:index];
398     END_BLOCK_OBJC_EXCEPTIONS;
399 }
400
401 void GPURenderPassEncoder::setFragmentBuffer(const MTLBuffer *buffer, unsigned offset, unsigned index)
402 {
403     ASSERT(m_platformRenderPassEncoder);
404
405     BEGIN_BLOCK_OBJC_EXCEPTIONS;
406     [m_platformRenderPassEncoder setFragmentBuffer:buffer offset:offset atIndex:index];
407     END_BLOCK_OBJC_EXCEPTIONS;
408 }
409
410 #endif // USE(METAL)
411
412 } // namespace WebCore
413
414 #endif // ENABLE(WEBGPU)