[osx, canvas]

- Pass buffer lengths to kernels and bound check when allocating from buffers
- Dynamically compute/guess size of intermediate buffers and grow if needed
This commit is contained in:
Martin Fouilleul 2023-07-11 20:03:17 +02:00
parent 98a516ff0a
commit 025ebd91d5
2 changed files with 282 additions and 177 deletions

View File

@ -68,6 +68,9 @@ typedef struct mg_mtl_canvas_backend
vec4 pathScreenExtents;
vec4 pathUserExtents;
int maxTileQueueCount;
int maxSegmentCount;
} mg_mtl_canvas_backend;
typedef struct mg_mtl_image_data
@ -145,16 +148,19 @@ void mg_mtl_canvas_encode_element(mg_mtl_canvas_backend* backend, mg_path_elt_ty
switch(kind)
{
case MG_PATH_LINE:
backend->maxSegmentCount += 1;
elt->kind = MG_MTL_LINE;
count = 2;
break;
case MG_PATH_QUADRATIC:
backend->maxSegmentCount += 3;
elt->kind = MG_MTL_QUADRATIC;
count = 3;
break;
case MG_PATH_CUBIC:
backend->maxSegmentCount += 7;
elt->kind = MG_MTL_CUBIC;
count = 4;
break;
@ -244,6 +250,10 @@ void mg_mtl_encode_path(mg_mtl_canvas_backend* backend, mg_primitive* primitive,
simd_make_float3(uvTransform.m[1]/scale, uvTransform.m[4]/scale, 0),
simd_make_float3(uvTransform.m[2], uvTransform.m[5], 1));
}
int nTilesX = ((path->box.z - path->box.x)*scale - 1) / MG_MTL_TILE_SIZE + 1;
int nTilesY = ((path->box.w - path->box.y)*scale - 1) / MG_MTL_TILE_SIZE + 1;
backend->maxTileQueueCount += (nTilesX * nTilesY);
}
bool mg_intersect_hull_legs(vec2 p0, vec2 p1, vec2 p2, vec2 p3, vec2* intersection)
@ -899,6 +909,27 @@ void mg_mtl_render_stroke(mg_mtl_canvas_backend* backend,
}
void mg_mtl_grow_buffer_if_needed(mg_mtl_canvas_backend* backend, id<MTLBuffer>* buffer, u64 wantedSize)
{
u64 bufferSize = [(*buffer) length];
if(bufferSize < wantedSize)
{
int newSize = wantedSize * 1.2;
@autoreleasepool
{
//NOTE: MTLBuffers are retained by the command buffer, so we don't risk deallocating while the buffer is in use
[*buffer release];
*buffer = nil;
id<MTLDevice> device = backend->surface->device;
MTLResourceOptions bufferOptions = MTLResourceStorageModePrivate;
*buffer = [device newBufferWithLength: newSize options: bufferOptions];
}
}
}
void mg_mtl_render_batch(mg_mtl_canvas_backend* backend,
mg_mtl_surface* surface,
mg_image_data* image,
@ -913,10 +944,18 @@ void mg_mtl_render_batch(mg_mtl_canvas_backend* backend,
int pathCount = backend->pathCount - backend->pathBatchStart;
int eltCount = backend->eltCount - backend->eltBatchStart;
//NOTE: update intermediate buffers sizes if needed
mg_mtl_grow_buffer_if_needed(backend, &backend->pathQueueBuffer, pathCount * sizeof(mg_mtl_path_queue));
mg_mtl_grow_buffer_if_needed(backend, &backend->tileQueueBuffer, backend->maxTileQueueCount * sizeof(mg_mtl_tile_queue));
mg_mtl_grow_buffer_if_needed(backend, &backend->segmentBuffer, backend->maxSegmentCount * sizeof(mg_mtl_segment));
mg_mtl_grow_buffer_if_needed(backend, &backend->screenTilesBuffer, nTilesX * nTilesY * sizeof(mg_mtl_screen_tile));
mg_mtl_grow_buffer_if_needed(backend, &backend->tileOpBuffer, backend->maxSegmentCount * 30 * sizeof(mg_mtl_tile_op));
//NOTE: encode GPU commands
@autoreleasepool
{
//NOTE: create output texture
//NOTE: clear output texture
MTLRenderPassDescriptor* clearDescriptor = [MTLRenderPassDescriptor renderPassDescriptor];
clearDescriptor.colorAttachments[0].texture = backend->outTexture;
clearDescriptor.colorAttachments[0].loadAction = MTLLoadActionClear;
@ -941,13 +980,16 @@ void mg_mtl_render_batch(mg_mtl_canvas_backend* backend,
pathEncoder.label = @"path pass";
[pathEncoder setComputePipelineState: backend->pathPipeline];
int tileQueueMax = [backend->tileQueueBuffer length] / sizeof(mg_mtl_tile_queue);
[pathEncoder setBytes:&pathCount length:sizeof(int) atIndex:0];
[pathEncoder setBuffer:backend->pathBuffer[backend->bufferIndex] offset:pathBufferOffset atIndex:1];
[pathEncoder setBuffer:backend->pathQueueBuffer offset:0 atIndex:2];
[pathEncoder setBuffer:backend->tileQueueBuffer offset:0 atIndex:3];
[pathEncoder setBuffer:backend->tileQueueCountBuffer offset:0 atIndex:4];
[pathEncoder setBytes:&tileSize length:sizeof(int) atIndex:5];
[pathEncoder setBytes:&scale length:sizeof(int) atIndex:6];
[pathEncoder setBytes:&tileQueueMax length:sizeof(int) atIndex:5];
[pathEncoder setBytes:&tileSize length:sizeof(int) atIndex:6];
[pathEncoder setBytes:&scale length:sizeof(int) atIndex:7];
MTLSize pathGridSize = MTLSizeMake(pathCount, 1, 1);
MTLSize pathGroupSize = MTLSizeMake([backend->pathPipeline maxTotalThreadsPerThreadgroup], 1, 1);
@ -960,6 +1002,9 @@ void mg_mtl_render_batch(mg_mtl_canvas_backend* backend,
segmentEncoder.label = @"segment pass";
[segmentEncoder setComputePipelineState: backend->segmentPipeline];
int tileOpMax = [backend->tileOpBuffer length] / sizeof(mg_mtl_tile_op);
int segmentMax = [backend->segmentBuffer length] / sizeof(mg_mtl_segment);
[segmentEncoder setBytes:&eltCount length:sizeof(int) atIndex:0];
[segmentEncoder setBuffer:backend->elementBuffer[backend->bufferIndex] offset:elementBufferOffset atIndex:1];
[segmentEncoder setBuffer:backend->segmentCountBuffer offset:0 atIndex:2];
@ -968,10 +1013,12 @@ void mg_mtl_render_batch(mg_mtl_canvas_backend* backend,
[segmentEncoder setBuffer:backend->tileQueueBuffer offset:0 atIndex:5];
[segmentEncoder setBuffer:backend->tileOpBuffer offset:0 atIndex:6];
[segmentEncoder setBuffer:backend->tileOpCountBuffer offset:0 atIndex:7];
[segmentEncoder setBytes:&tileSize length:sizeof(int) atIndex:8];
[segmentEncoder setBytes:&scale length:sizeof(int) atIndex:9];
[segmentEncoder setBuffer:backend->logBuffer[backend->bufferIndex] offset:0 atIndex:10];
[segmentEncoder setBuffer:backend->logOffsetBuffer[backend->bufferIndex] offset:0 atIndex:11];
[segmentEncoder setBytes:&tileOpMax length:sizeof(int) atIndex:8];
[segmentEncoder setBytes:&segmentMax length:sizeof(int) atIndex:9];
[segmentEncoder setBytes:&tileSize length:sizeof(int) atIndex:10];
[segmentEncoder setBytes:&scale length:sizeof(int) atIndex:11];
[segmentEncoder setBuffer:backend->logBuffer[backend->bufferIndex] offset:0 atIndex:12];
[segmentEncoder setBuffer:backend->logOffsetBuffer[backend->bufferIndex] offset:0 atIndex:13];
MTLSize segmentGridSize = MTLSizeMake(eltCount, 1, 1);
MTLSize segmentGroupSize = MTLSizeMake([backend->segmentPipeline maxTotalThreadsPerThreadgroup], 1, 1);
@ -1008,10 +1055,11 @@ void mg_mtl_render_batch(mg_mtl_canvas_backend* backend,
[mergeEncoder setBuffer:backend->tileOpCountBuffer offset:0 atIndex:5];
[mergeEncoder setBuffer:backend->rasterDispatchBuffer offset:0 atIndex:6];
[mergeEncoder setBuffer:backend->screenTilesBuffer offset:0 atIndex:7];
[mergeEncoder setBytes:&tileSize length:sizeof(int) atIndex:8];
[mergeEncoder setBytes:&scale length:sizeof(float) atIndex:9];
[mergeEncoder setBuffer:backend->logBuffer[backend->bufferIndex] offset:0 atIndex:10];
[mergeEncoder setBuffer:backend->logOffsetBuffer[backend->bufferIndex] offset:0 atIndex:11];
[mergeEncoder setBytes:&tileOpMax length:sizeof(int) atIndex:8];
[mergeEncoder setBytes:&tileSize length:sizeof(int) atIndex:9];
[mergeEncoder setBytes:&scale length:sizeof(float) atIndex:10];
[mergeEncoder setBuffer:backend->logBuffer[backend->bufferIndex] offset:0 atIndex:11];
[mergeEncoder setBuffer:backend->logOffsetBuffer[backend->bufferIndex] offset:0 atIndex:12];
MTLSize mergeGridSize = MTLSizeMake(nTilesX, nTilesY, 1);
MTLSize mergeGroupSize = MTLSizeMake(MG_MTL_TILE_SIZE, MG_MTL_TILE_SIZE, 1);
@ -1075,6 +1123,9 @@ void mg_mtl_render_batch(mg_mtl_canvas_backend* backend,
backend->pathBatchStart = backend->pathCount;
backend->eltBatchStart = backend->eltCount;
backend->maxSegmentCount = 0;
backend->maxTileQueueCount = 0;
}
void mg_mtl_canvas_resize(mg_mtl_canvas_backend* backend, vec2 size)
@ -1168,6 +1219,8 @@ void mg_mtl_canvas_render(mg_canvas_backend* interface,
backend->pathBatchStart = 0;
backend->eltCount = 0;
backend->eltBatchStart = 0;
backend->maxSegmentCount = 0;
backend->maxTileQueueCount = 0;
//NOTE: encode and render batches
vec2 currentPos = {0};
@ -1352,10 +1405,11 @@ void mg_mtl_canvas_image_upload_region(mg_canvas_backend* backendInterface, mg_i
const u32 MG_MTL_DEFAULT_PATH_BUFFER_LEN = (4<<10),
MG_MTL_DEFAULT_ELT_BUFFER_LEN = (4<<10),
MG_MTL_SEGMENT_BUFFER_SIZE = (4<<20)*sizeof(mg_mtl_segment),
MG_MTL_PATH_QUEUE_BUFFER_SIZE = (4<<20)*sizeof(mg_mtl_path_queue),
MG_MTL_TILE_QUEUE_BUFFER_SIZE = (4<<20)*sizeof(mg_mtl_tile_queue),
MG_MTL_TILE_OP_BUFFER_SIZE = (4<<20)*sizeof(mg_mtl_tile_op);
MG_MTL_DEFAULT_SEGMENT_BUFFER_LEN = (4<<10),
MG_MTL_DEFAULT_PATH_QUEUE_BUFFER_LEN = (4<<10),
MG_MTL_DEFAULT_TILE_QUEUE_BUFFER_LEN = (4<<10),
MG_MTL_DEFAULT_TILE_OP_BUFFER_LEN = (4<<14);
mg_canvas_backend* mtl_canvas_backend_create(mg_mtl_surface* surface)
{
@ -1461,22 +1515,22 @@ mg_canvas_backend* mtl_canvas_backend_create(mg_mtl_surface* surface)
}
bufferOptions = MTLResourceStorageModePrivate;
backend->segmentBuffer = [surface->device newBufferWithLength: MG_MTL_SEGMENT_BUFFER_SIZE
backend->segmentBuffer = [surface->device newBufferWithLength: MG_MTL_DEFAULT_SEGMENT_BUFFER_LEN * sizeof(mg_mtl_segment)
options: bufferOptions];
backend->segmentCountBuffer = [surface->device newBufferWithLength: sizeof(int)
options: bufferOptions];
backend->pathQueueBuffer = [surface->device newBufferWithLength: MG_MTL_PATH_QUEUE_BUFFER_SIZE
backend->pathQueueBuffer = [surface->device newBufferWithLength: MG_MTL_DEFAULT_PATH_QUEUE_BUFFER_LEN * sizeof(mg_mtl_path_queue)
options: bufferOptions];
backend->tileQueueBuffer = [surface->device newBufferWithLength: MG_MTL_TILE_QUEUE_BUFFER_SIZE
backend->tileQueueBuffer = [surface->device newBufferWithLength: MG_MTL_DEFAULT_TILE_QUEUE_BUFFER_LEN * sizeof(mg_mtl_tile_queue)
options: bufferOptions];
backend->tileQueueCountBuffer = [surface->device newBufferWithLength: sizeof(int)
options: bufferOptions];
backend->tileOpBuffer = [surface->device newBufferWithLength: MG_MTL_TILE_OP_BUFFER_SIZE
backend->tileOpBuffer = [surface->device newBufferWithLength: MG_MTL_DEFAULT_TILE_OP_BUFFER_LEN * sizeof(mg_mtl_tile_op)
options: bufferOptions];
backend->tileOpCountBuffer = [surface->device newBufferWithLength: sizeof(int)

View File

@ -231,8 +231,9 @@ kernel void mtl_path_setup(constant int* pathCount [[buffer(0)]],
device mg_mtl_path_queue* pathQueueBuffer [[buffer(2)]],
device mg_mtl_tile_queue* tileQueueBuffer [[buffer(3)]],
device atomic_int* tileQueueCount [[buffer(4)]],
constant int* tileSize [[buffer(5)]],
constant float* scale [[buffer(6)]],
constant int* tileQueueMax [[buffer(5)]],
constant int* tileSize [[buffer(6)]],
constant float* scale [[buffer(7)]],
uint pathIndex [[thread_position_in_grid]])
{
const device mg_mtl_path* path = &pathBuffer[pathIndex];
@ -254,6 +255,13 @@ kernel void mtl_path_setup(constant int* pathCount [[buffer(0)]],
int tileQueuesIndex = atomic_fetch_add_explicit(tileQueueCount, tileCount, memory_order_relaxed);
if(tileQueuesIndex + tileCount >= tileQueueMax[0])
{
pathQueueBuffer[pathIndex].area = int4(0);
pathQueueBuffer[pathIndex].tileQueues = 0;
}
else
{
pathQueueBuffer[pathIndex].area = int4(firstTile.x, firstTile.y, nTilesX, nTilesY);
pathQueueBuffer[pathIndex].tileQueues = tileQueuesIndex;
@ -266,6 +274,7 @@ kernel void mtl_path_setup(constant int* pathCount [[buffer(0)]],
atomic_store_explicit(&tileQueues[i].windingOffset, 0, memory_order_relaxed);
}
}
}
float ccw(float2 a, float2 b, float2 c)
{
@ -376,6 +385,9 @@ typedef struct mtl_segment_setup_context
int pathIndex;
int tileOpMax;
int segmentMax;
} mtl_segment_setup_context;
void mtl_segment_bin_to_tiles(thread mtl_segment_setup_context* context, device mg_mtl_segment* seg)
@ -439,6 +451,9 @@ void mtl_segment_bin_to_tiles(thread mtl_segment_setup_context* context, device
if(crossL || crossR || crossT || crossB || s0Inside || s1Inside)
{
int tileOpIndex = atomic_fetch_add_explicit(context->tileOpCount, 1, memory_order_relaxed);
if(tileOpIndex < context->tileOpMax)
{
device mg_mtl_tile_op* op = &context->tileOpBuffer[tileOpIndex];
op->kind = MG_MTL_OP_SEGMENT;
@ -472,6 +487,7 @@ void mtl_segment_bin_to_tiles(thread mtl_segment_setup_context* context, device
}
}
}
}
device mg_mtl_segment* mtl_segment_push(thread mtl_segment_setup_context* context, float2 p[4], mg_mtl_seg_kind kind)
{
@ -508,8 +524,13 @@ device mg_mtl_segment* mtl_segment_push(thread mtl_segment_setup_context* contex
} break;
}
device mg_mtl_segment* seg = 0;
int segIndex = atomic_fetch_add_explicit(context->segmentCount, 1, memory_order_relaxed);
device mg_mtl_segment* seg = &context->segmentBuffer[segIndex];
if(segIndex < context->segmentMax)
{
seg = &context->segmentBuffer[segIndex];
bool goingUp = e.y >= s.y;
bool goingRight = e.x >= s.x;
@ -558,6 +579,7 @@ device mg_mtl_segment* mtl_segment_push(thread mtl_segment_setup_context* contex
seg->config = MG_MTL_TR;
}
}
}
return(seg);
}
@ -567,9 +589,12 @@ device mg_mtl_segment* mtl_segment_push(thread mtl_segment_setup_context* contex
void mtl_line_setup(thread mtl_segment_setup_context* context, float2 p[2])
{
device mg_mtl_segment* seg = mtl_segment_push(context, p, MG_MTL_LINE);
if(seg)
{
seg->hullVertex = p[0];
mtl_segment_bin_to_tiles(context, seg);
}
}
float2 mtl_quadratic_blossom(float2 p[3], float u, float v)
{
@ -636,6 +661,8 @@ void mtl_quadratic_emit(thread mtl_segment_setup_context* context,
{
device mg_mtl_segment* seg = mtl_segment_push(context, p, MG_MTL_QUADRATIC);
if(seg)
{
//NOTE: compute implicit equation matrix
float det = p[0].x*(p[1].y-p[2].y) + p[1].x*(p[2].y-p[0].y) + p[2].x*(p[0].y - p[1].y);
@ -657,6 +684,7 @@ void mtl_quadratic_emit(thread mtl_segment_setup_context* context,
mtl_segment_bin_to_tiles(context, seg);
}
}
void mtl_quadratic_setup(thread mtl_segment_setup_context* context, thread float2* p)
{
@ -1044,6 +1072,8 @@ void mtl_cubic_emit(thread mtl_segment_setup_context* context, mtl_cubic_info cu
{
device mg_mtl_segment* seg = mtl_segment_push(context, sp, MG_MTL_CUBIC);
if(seg)
{
float2 v0 = p[0];
float2 v1 = p[3];
float2 v2;
@ -1109,6 +1139,7 @@ void mtl_cubic_emit(thread mtl_segment_setup_context* context, mtl_cubic_info cu
//NOTE: bin to tiles
mtl_segment_bin_to_tiles(context, seg);
}
}
void mtl_cubic_setup(thread mtl_segment_setup_context* context, float2 p[4])
{
@ -1229,11 +1260,13 @@ kernel void mtl_segment_setup(constant int* elementCount [[buffer(0)]],
device mg_mtl_tile_queue* tileQueueBuffer [[buffer(5)]],
device mg_mtl_tile_op* tileOpBuffer [[buffer(6)]],
device atomic_int* tileOpCount [[buffer(7)]],
constant int* tileSize [[buffer(8)]],
constant float* scale [[buffer(9)]],
constant int* segmentMax [[buffer(8)]],
constant int* tileOpMax [[buffer(9)]],
constant int* tileSize [[buffer(10)]],
constant float* scale [[buffer(11)]],
device char* logBuffer [[buffer(10)]],
device atomic_int* logOffsetBuffer [[buffer(11)]],
device char* logBuffer [[buffer(12)]],
device atomic_int* logOffsetBuffer [[buffer(13)]],
uint eltIndex [[thread_position_in_grid]])
{
const device mg_mtl_path_elt* elt = &elementBuffer[eltIndex];
@ -1247,10 +1280,12 @@ kernel void mtl_segment_setup(constant int* elementCount [[buffer(0)]],
.tileQueues = tileQueues,
.tileOpBuffer = tileOpBuffer,
.tileOpCount = tileOpCount,
.tileOpMax = tileOpMax[0],
.segmentMax = segmentMax[0],
.tileSize = tileSize[0],
.log.buffer = logBuffer,
.log.offset = logOffsetBuffer,
.log.enabled = false};
.log.enabled = false,};
switch(elt->kind)
{
@ -1327,10 +1362,11 @@ kernel void mtl_merge(constant int* pathCount [[buffer(0)]],
device atomic_int* tileOpCount [[buffer(5)]],
device MTLDispatchThreadgroupsIndirectArguments* dispatchBuffer [[buffer(6)]],
device mg_mtl_screen_tile* screenTilesBuffer [[buffer(7)]],
constant int* tileSize [[buffer(8)]],
constant float* scale [[buffer(9)]],
device char* logBuffer [[buffer(10)]],
device atomic_int* logOffsetBuffer [[buffer(11)]],
constant int* tileOpMax [[buffer(8)]],
constant int* tileSize [[buffer(9)]],
constant float* scale [[buffer(10)]],
device char* logBuffer [[buffer(11)]],
device atomic_int* logOffsetBuffer [[buffer(12)]],
uint2 threadCoord [[thread_position_in_grid]],
uint2 gridSize [[threads_per_grid]])
{
@ -1393,6 +1429,12 @@ kernel void mtl_merge(constant int* pathCount [[buffer(0)]],
//NOTE: tile is full covered. Add path start op (with winding offset).
// Additionally if color is opaque and tile is fully inside clip, trim tile list.
int pathOpIndex = atomic_fetch_add_explicit(tileOpCount, 1, memory_order_relaxed);
if(pathOpIndex >= tileOpMax[0])
{
return;
}
device mg_mtl_tile_op* pathOp = &tileOpBuffer[pathOpIndex];
pathOp->kind = MG_MTL_OP_CLIP_FILL;
pathOp->next = -1;
@ -1421,6 +1463,11 @@ kernel void mtl_merge(constant int* pathCount [[buffer(0)]],
{
//NOTE: add path start op (with winding offset)
int startOpIndex = atomic_fetch_add_explicit(tileOpCount, 1, memory_order_relaxed);
if(startOpIndex >= tileOpMax[0])
{
return;
}
device mg_mtl_tile_op* startOp = &tileOpBuffer[startOpIndex];
startOp->kind = MG_MTL_OP_START;
startOp->next = -1;
@ -1439,6 +1486,11 @@ kernel void mtl_merge(constant int* pathCount [[buffer(0)]],
//NOTE: add path end op
int endOpIndex = atomic_fetch_add_explicit(tileOpCount, 1, memory_order_relaxed);
if(endOpIndex >= tileOpMax[0])
{
return;
}
device mg_mtl_tile_op* endOp = &tileOpBuffer[endOpIndex];
endOp->kind = MG_MTL_OP_END;
endOp->next = -1;
@ -1446,7 +1498,6 @@ kernel void mtl_merge(constant int* pathCount [[buffer(0)]],
*nextLink = endOpIndex;
nextLink = &endOp->next;
}
}
}