SBgl 0.1.0
A graphics framework in C99
Loading...
Searching...
No Matches
sbgl_graphics_hal.h File Reference
#include <stdbool.h>
#include <stddef.h>
#include "sbgl_types.h"
Include dependency graph for sbgl_graphics_hal.h:
This graph shows which files directly or indirectly include this file:

Go to the source code of this file.

Data Structures

struct  sbgl_GfxTransientAllocation
 Represents a slice of a persistent GPU buffer used for transient data. More...
 

Typedefs

typedef struct sbgl_GfxContext sbgl_GfxContext
 Opaque handle for the graphics backend context.
 

Functions

sbgl_GfxContextsbgl_gfx_Init (sbgl_Window *window, struct SblArena *arena, const sbgl_ResourceLimits *limits, bool enableValidation)
 Initializes the graphics backend with configurable resource limits.
 
void sbgl_gfx_Shutdown (sbgl_GfxContext *ctx)
 
bool sbgl_gfx_BeginFrame (sbgl_GfxContext *ctx)
 Starts a new frame, acquiring an image and starting the command buffer.
 
void sbgl_gfx_EndFrame (sbgl_GfxContext *ctx)
 Submits the current frame's commands and presents the image.
 
void sbgl_gfx_BeginRenderPass (sbgl_GfxContext *ctx, float r, float g, float b, float a)
 Starts a graphics rendering pass.
 
void sbgl_gfx_EndRenderPass (sbgl_GfxContext *ctx)
 Ends the current graphics rendering pass.
 
void sbgl_gfx_DeviceWaitIdle (sbgl_GfxContext *ctx)
 
sbgl_Buffer sbgl_gfx_CreateBuffer (sbgl_GfxContext *ctx, sbgl_BufferUsage usage, size_t size, const void *data)
 
void sbgl_gfx_DestroyBuffer (sbgl_GfxContext *ctx, sbgl_Buffer buffer)
 
void sbgl_gfx_FillBuffer (sbgl_GfxContext *ctx, sbgl_Buffer buffer, size_t offset, size_t size, uint32_t value)
 Performs a hardware-accelerated buffer fill.
 
uint32_t sbgl_gfx_GetFrameIndex (sbgl_GfxContext *ctx)
 Retrieves the current backend frame index.
 
void * sbgl_gfx_MapBuffer (sbgl_GfxContext *ctx, sbgl_Buffer buffer)
 
void sbgl_gfx_UnmapBuffer (sbgl_GfxContext *ctx, sbgl_Buffer buffer)
 
sbgl_Shader sbgl_gfx_LoadShader (sbgl_GfxContext *ctx, sbgl_ShaderStage stage, const uint32_t *bytecode, size_t size)
 
void sbgl_gfx_DestroyShader (sbgl_GfxContext *ctx, sbgl_Shader shader)
 
sbgl_Pipeline sbgl_gfx_CreatePipeline (sbgl_GfxContext *ctx, const sbgl_PipelineConfig *config)
 
void sbgl_gfx_DestroyPipeline (sbgl_GfxContext *ctx, sbgl_Pipeline pipeline)
 
sbgl_ComputePipeline sbgl_gfx_CreateComputePipeline (sbgl_GfxContext *ctx, sbgl_Shader shader)
 
void sbgl_gfx_DestroyComputePipeline (sbgl_GfxContext *ctx, sbgl_ComputePipeline pipeline)
 
void sbgl_gfx_BindComputePipeline (sbgl_GfxContext *ctx, sbgl_ComputePipeline pipeline)
 
void sbgl_gfx_DispatchCompute (sbgl_GfxContext *ctx, uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ)
 
void sbgl_gfx_MemoryBarrier (sbgl_GfxContext *ctx, sbgl_BarrierType type)
 
void sbgl_gfx_BindPipeline (sbgl_GfxContext *ctx, sbgl_Pipeline pipeline)
 
void sbgl_gfx_BindBuffer (sbgl_GfxContext *ctx, sbgl_Buffer buffer, sbgl_BufferUsage usage)
 
void sbgl_gfx_Draw (sbgl_GfxContext *ctx, uint32_t vertexCount, uint32_t firstVertex, uint32_t instanceCount)
 
void sbgl_gfx_DrawIndexed (sbgl_GfxContext *ctx, uint32_t indexCount, uint32_t firstIndex, int32_t vertexOffset, uint32_t instanceCount)
 
void sbgl_gfx_DrawIndirect (sbgl_GfxContext *ctx, sbgl_Buffer buffer, size_t offset, uint32_t drawCount)
 Submits a batch of draw calls stored in a GPU buffer.
 
sbgl_GfxTransientAllocation sbgl_gfx_AllocateTransient (sbgl_GfxContext *ctx, size_t size, uint32_t alignment)
 Allocates a slice of GPU-visible memory for transient per-frame data.
 
uint64_t sbgl_gfx_GetBufferDeviceAddress (sbgl_GfxContext *ctx, sbgl_Buffer buffer)
 Retrieves the 64-bit GPU virtual address for a buffer.
 
void sbgl_gfx_DestroyBufferDeferred (sbgl_GfxContext *ctx, sbgl_Buffer buffer)
 Marks a buffer for destruction after current frames complete.
 
void sbgl_gfx_PushConstants (sbgl_GfxContext *ctx, size_t size, const void *data)
 
float sbgl_gfx_GetGpuTime (sbgl_GfxContext *ctx)
 Retrieves the elapsed GPU time for the previous frame in milliseconds.
 
int32_t sbgl_gfx_GetLastVkResult (sbgl_GfxContext *ctx)
 Retrieves the last VkResult from the backend for error inspection.
 

Typedef Documentation

◆ sbgl_GfxContext

typedef struct sbgl_GfxContext sbgl_GfxContext

Opaque handle for the graphics backend context.

Definition at line 21 of file sbgl_graphics_hal.h.

Function Documentation

◆ sbgl_gfx_AllocateTransient()

sbgl_GfxTransientAllocation sbgl_gfx_AllocateTransient ( sbgl_GfxContext * ctx,
size_t size,
uint32_t alignment )

Allocates a slice of GPU-visible memory for transient per-frame data.

This memory is managed by the backend's internal per-frame ring buffers and does not require manual destruction.

Parameters
ctxThe graphics context.
sizeThe number of bytes to allocate.
alignmentThe required byte alignment for the allocation.
Returns
A structure containing the allocation metadata and mapped pointer.

Definition at line 2039 of file sbgl_backend_vulkan.c.

2039 {
2040 /* The system sub-allocates from the current frame's persistent buffer, respecting
2041 the requested alignment to ensure compatibility with Vulkan requirements. */
2042 uint32_t frame = ctx->currentFrame;
2043 uint32_t offset = ctx->transientOffsets[frame];
2044
2045 if (alignment > 0) {
2046 offset = (offset + alignment - 1) & ~(alignment - 1);
2047 }
2048
2049 if (offset + size > SBGL_TRANSIENT_BUFFER_SIZE) {
2050 fprintf(stderr, "[Vulkan] Transient buffer overflow for frame %u!\n", frame);
2051 return (sbgl_GfxTransientAllocation){ 0 };
2052 }
2053
2055 .buffer = ctx->transientBuffers[frame],
2056 .offset = offset,
2057 .size = (uint32_t)size,
2058 .mapped = (char*)ctx->transientMapped[frame] + offset,
2059 .deviceAddress = sbgl_gfx_GetBufferDeviceAddress(ctx, ctx->transientBuffers[frame]) + offset
2060 };
2061
2062 ctx->transientOffsets[frame] = offset + (uint32_t)size;
2063 return alloc;
2064}
uint64_t sbgl_gfx_GetBufferDeviceAddress(sbgl_GfxContext *ctx, sbgl_Buffer handle)
Retrieves the 64-bit GPU virtual address for a buffer.
#define SBGL_TRANSIENT_BUFFER_SIZE
void * transientMapped[SBGL_MAX_FRAMES_IN_FLIGHT]
uint32_t transientOffsets[SBGL_MAX_FRAMES_IN_FLIGHT]
sbgl_Buffer transientBuffers[SBGL_MAX_FRAMES_IN_FLIGHT]
Represents a slice of a persistent GPU buffer used for transient data.

◆ sbgl_gfx_BeginFrame()

bool sbgl_gfx_BeginFrame ( sbgl_GfxContext * ctx)

Starts a new frame, acquiring an image and starting the command buffer.

This must be called before any GPU commands (Compute or Graphics) are recorded.

Definition at line 1105 of file sbgl_backend_vulkan.c.

1105 {
1106 ctx->vk.vkWaitForFences(
1107 ctx->device,
1108 1,
1109 &ctx->inFlightFences[ctx->currentFrame],
1110 VK_TRUE,
1111 UINT64_MAX
1112 );
1113
1114 /* The system processes the deferred destruction queue for the current frame slot,
1115 releasing GPU resources that are no longer in flight. */
1116 for (uint32_t i = 0; i < ctx->deferredCount[ctx->currentFrame]; i++) {
1118 }
1119 ctx->deferredCount[ctx->currentFrame] = 0;
1120
1121 /* The transient allocation offset is reset for the current frame, effectively
1122 recycling the GPU memory for new data while ensuring it does not overlap with
1123 memory currently in use by other frames in flight. */
1124 ctx->transientOffsets[ctx->currentFrame] = 0;
1125 ctx->dynamicHeap.offset[ctx->currentFrame] = 0;
1126
1127 if (sbgl_os_WasWindowResized(ctx->window)) {
1128 recreate_swapchain(ctx);
1129 }
1130
1131 VkResult result = ctx->vk.vkAcquireNextImageKHR(
1132 ctx->device,
1133 ctx->swapchain,
1134 UINT64_MAX,
1136 VK_NULL_HANDLE,
1137 &ctx->currentImageIndex
1138 );
1139
1140 ctx->backendResult = result;
1141
1142 if (result == VK_ERROR_OUT_OF_DATE_KHR) {
1143 recreate_swapchain(ctx);
1144 return false;
1145 } else if (result != VK_SUCCESS && result != VK_SUBOPTIMAL_KHR) {
1146 return false;
1147 }
1148
1149 ctx->vk.vkResetFences(ctx->device, 1, &ctx->inFlightFences[ctx->currentFrame]);
1150 ctx->vk.vkResetCommandBuffer(ctx->commandBuffers[ctx->currentFrame], 0);
1151 VkCommandBufferBeginInfo beginInfo = { .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO };
1152 ctx->vk.vkBeginCommandBuffer(ctx->commandBuffers[ctx->currentFrame], &beginInfo);
1153
1154 /* The system resets the query pool for the current frame to prepare for new
1155 timestamp recordings. */
1156 ctx->vk.vkCmdResetQueryPool(
1157 ctx->commandBuffers[ctx->currentFrame],
1158 ctx->queryPool,
1159 ctx->currentFrame * 2,
1160 2
1161 );
1162
1163 return true;
1164}
static void recreate_swapchain(sbgl_GfxContext *ctx)
void sbgl_gfx_DestroyBuffer(sbgl_GfxContext *ctx, sbgl_Buffer handle)
bool sbgl_os_WasWindowResized(sbgl_Window *window)
Checks if the window has been resized since the last check.
sbgl_Buffer deferredBuffers[SBGL_MAX_FRAMES_IN_FLIGHT][64]
sbgl_GfxDynamicHeap dynamicHeap
VkCommandBuffer commandBuffers[SBGL_MAX_FRAMES_IN_FLIGHT]
VkSemaphore imageAvailableSemaphores[SBGL_MAX_SWAPCHAIN_IMAGES]
VkSwapchainKHR swapchain
struct VolkDeviceTable vk
uint32_t deferredCount[SBGL_MAX_FRAMES_IN_FLIGHT]
VkFence inFlightFences[SBGL_MAX_FRAMES_IN_FLIGHT]

◆ sbgl_gfx_BeginRenderPass()

void sbgl_gfx_BeginRenderPass ( sbgl_GfxContext * ctx,
float r,
float g,
float b,
float a )

Starts a graphics rendering pass.

This must be called before any draw commands are recorded. It handles clearing the attachments if requested.

Definition at line 1166 of file sbgl_backend_vulkan.c.

1166 {
1167 /* The system records the starting timestamp at the beginning of the graphics pass. */
1168 ctx->vk.vkCmdWriteTimestamp(
1169 ctx->commandBuffers[ctx->currentFrame],
1170 VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
1171 ctx->queryPool,
1172 ctx->currentFrame * 2
1173 );
1174
1175 VkImageMemoryBarrier barriers[2] = { 0 };
1176 barriers[0].sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
1177 barriers[0].oldLayout = VK_IMAGE_LAYOUT_UNDEFINED;
1178 barriers[0].newLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
1179 barriers[0].image = ctx->images[ctx->currentImageIndex];
1180 barriers[0].subresourceRange =
1181 (VkImageSubresourceRange){ .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
1182 .levelCount = 1,
1183 .layerCount = 1 };
1184 barriers[0].dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
1185
1186 barriers[1].sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
1187 barriers[1].oldLayout = VK_IMAGE_LAYOUT_UNDEFINED;
1188 barriers[1].newLayout = VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL;
1189 barriers[1].image = ctx->depthImage;
1190 barriers[1].subresourceRange =
1191 (VkImageSubresourceRange){ .aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT,
1192 .levelCount = 1,
1193 .layerCount = 1 };
1194 barriers[1].dstAccessMask = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
1195
1196 ctx->vk.vkCmdPipelineBarrier(
1197 ctx->commandBuffers[ctx->currentFrame],
1198 VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
1199 VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT | VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT,
1200 0,
1201 0,
1202 NULL,
1203 0,
1204 NULL,
1205 2,
1206 barriers
1207 );
1208
1209 VkRenderingAttachmentInfo colorAttachment = {
1210 .sType = VK_STRUCTURE_TYPE_RENDERING_ATTACHMENT_INFO,
1211 .imageView = ctx->imageViews[ctx->currentImageIndex],
1212 .imageLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
1213 .loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR,
1214 .storeOp = VK_ATTACHMENT_STORE_OP_STORE,
1215 .clearValue = { { { r, g, b, a } } },
1216 };
1217
1218 VkRenderingAttachmentInfo depthAttachment = {
1219 .sType = VK_STRUCTURE_TYPE_RENDERING_ATTACHMENT_INFO,
1220 .imageView = ctx->depthImageView,
1221 .imageLayout = VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL,
1222 .loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR,
1223 .storeOp = VK_ATTACHMENT_STORE_OP_STORE,
1224 .clearValue = { .depthStencil = { 1.0f, 0 } },
1225 };
1226
1227 VkRenderingInfo renderingInfo = {
1228 .sType = VK_STRUCTURE_TYPE_RENDERING_INFO,
1229 .renderArea = { .extent = ctx->swapchainExtent },
1230 .layerCount = 1,
1231 .colorAttachmentCount = 1,
1232 .pColorAttachments = &colorAttachment,
1233 .pDepthAttachment = &depthAttachment,
1234 };
1235
1236 ctx->vk.vkCmdBeginRendering(ctx->commandBuffers[ctx->currentFrame], &renderingInfo);
1237}
VkImageView * imageViews

◆ sbgl_gfx_BindBuffer()

void sbgl_gfx_BindBuffer ( sbgl_GfxContext * ctx,
sbgl_Buffer buffer,
sbgl_BufferUsage usage )

Definition at line 1970 of file sbgl_backend_vulkan.c.

1970 {
1971 if (handle == SBGL_INVALID_HANDLE)
1972 return;
1973 uint32_t index = (uint32_t)handle - 1;
1974 if (index >= ctx->limits.maxBuffers || !ctx->bufferActive[index])
1975 return;
1976
1977 if (usage == SBGL_BUFFER_USAGE_VERTEX) {
1978 VkDeviceSize offsets[] = { 0 };
1979 ctx->vk.vkCmdBindVertexBuffers(
1980 ctx->commandBuffers[ctx->currentFrame],
1981 0,
1982 1,
1983 &ctx->buffers[index].handle,
1984 offsets
1985 );
1986 } else if (usage == SBGL_BUFFER_USAGE_INDEX) {
1987 ctx->vk.vkCmdBindIndexBuffer(
1988 ctx->commandBuffers[ctx->currentFrame],
1989 ctx->buffers[index].handle,
1990 0,
1991 VK_INDEX_TYPE_UINT32
1992 );
1993 }
1994}
@ SBGL_BUFFER_USAGE_INDEX
Definition sbgl_types.h:124
@ SBGL_BUFFER_USAGE_VERTEX
Definition sbgl_types.h:123
#define SBGL_INVALID_HANDLE
Definition sbgl_types.h:8
sbgl_ResourceLimits limits
SBGL_VulkanBuffer * buffers

◆ sbgl_gfx_BindComputePipeline()

void sbgl_gfx_BindComputePipeline ( sbgl_GfxContext * ctx,
sbgl_ComputePipeline pipeline )

Definition at line 1850 of file sbgl_backend_vulkan.c.

1850 {
1851 /* The currently active command buffer is updated to utilize the specified compute
1852 pipeline for all subsequent dispatch operations. */
1853 if (handle == SBGL_INVALID_HANDLE) {
1855 return;
1856 }
1857 uint32_t index = (uint32_t)handle - 1;
1858 if (index >= ctx->limits.maxPipelines || !ctx->computePipelines[index].active)
1859 return;
1860
1861 ctx->vk.vkCmdBindPipeline(
1862 ctx->commandBuffers[ctx->currentFrame],
1863 VK_PIPELINE_BIND_POINT_COMPUTE,
1864 ctx->computePipelines[index].handle
1865 );
1866 ctx->boundComputePipeline = handle;
1867}
SBGL_VulkanComputePipeline * computePipelines
sbgl_ComputePipeline boundComputePipeline

◆ sbgl_gfx_BindPipeline()

void sbgl_gfx_BindPipeline ( sbgl_GfxContext * ctx,
sbgl_Pipeline pipeline )

Definition at line 1942 of file sbgl_backend_vulkan.c.

1942 {
1943 if (handle == SBGL_INVALID_HANDLE)
1944 return;
1945 uint32_t index = (uint32_t)handle - 1;
1946 if (index >= ctx->limits.maxPipelines || !ctx->pipelines[index].active)
1947 return;
1948
1949 ctx->vk.vkCmdBindPipeline(
1950 ctx->commandBuffers[ctx->currentFrame],
1951 VK_PIPELINE_BIND_POINT_GRAPHICS,
1952 ctx->pipelines[index].handle
1953 );
1954 ctx->boundPipeline = handle;
1955
1956 VkViewport viewport = {
1957 .x = 0.0f,
1958 .y = (float)ctx->swapchainExtent.height,
1959 .width = (float)ctx->swapchainExtent.width,
1960 .height = -(float)ctx->swapchainExtent.height,
1961 .minDepth = 0.0f,
1962 .maxDepth = 1.0f,
1963 };
1964 ctx->vk.vkCmdSetViewport(ctx->commandBuffers[ctx->currentFrame], 0, 1, &viewport);
1965
1966 VkRect2D scissor = { .offset = { 0, 0 }, .extent = ctx->swapchainExtent };
1967 ctx->vk.vkCmdSetScissor(ctx->commandBuffers[ctx->currentFrame], 0, 1, &scissor);
1968}
SBGL_VulkanPipeline * pipelines
sbgl_Pipeline boundPipeline

◆ sbgl_gfx_CreateBuffer()

sbgl_Buffer sbgl_gfx_CreateBuffer ( sbgl_GfxContext * ctx,
sbgl_BufferUsage usage,
size_t size,
const void * data )

Definition at line 1323 of file sbgl_backend_vulkan.c.

1323 {
1324 /* Search for an available buffer slot in the internal tracking arrays. */
1325 uint32_t index = 0;
1326 for (; index < ctx->limits.maxBuffers; index++) {
1327 if (!ctx->bufferActive[index])
1328 break;
1329 }
1330 if (index == ctx->limits.maxBuffers)
1331 return SBGL_INVALID_HANDLE;
1332
1333 /* Identify the target memory heap based on the buffer's intended usage.
1334 Vertex and index buffers are assigned to the static heap, while storage
1335 buffers utilize the managed heap for persistence. */
1338 heapType = SBGL_HEAP_TYPE_STATIC;
1339 } else if (usage & SBGL_BUFFER_USAGE_STORAGE) {
1340 heapType = SBGL_HEAP_TYPE_MANAGED;
1341 }
1342
1343 VkBufferCreateInfo bufferInfo = {
1344 .sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
1345 .size = size,
1346 .usage = (usage & SBGL_BUFFER_USAGE_VERTEX ? VK_BUFFER_USAGE_VERTEX_BUFFER_BIT : 0) |
1347 (usage & SBGL_BUFFER_USAGE_INDEX ? VK_BUFFER_USAGE_INDEX_BUFFER_BIT : 0) |
1348 (usage & SBGL_BUFFER_USAGE_STORAGE ? VK_BUFFER_USAGE_STORAGE_BUFFER_BIT : 0) |
1349 (usage & SBGL_BUFFER_USAGE_INDIRECT ? VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT : 0) |
1350 (usage & SBGL_BUFFER_USAGE_TRANSFER_DST ? VK_BUFFER_USAGE_TRANSFER_DST_BIT : 0) |
1351 VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT,
1352 .sharingMode = VK_SHARING_MODE_EXCLUSIVE,
1353 };
1354
1355 SBGL_VulkanBuffer* buffer = &ctx->buffers[index];
1356 if (ctx->vk.vkCreateBuffer(ctx->device, &bufferInfo, NULL, &buffer->handle) != VK_SUCCESS) {
1357 return SBGL_INVALID_HANDLE;
1358 }
1359
1360 VkMemoryRequirements memRequirements;
1361 ctx->vk.vkGetBufferMemoryRequirements(ctx->device, buffer->handle, &memRequirements);
1362
1363 /* Sub-allocate the required memory range from the selected hybrid heap. */
1364 uint32_t offset = SBGL_INVALID_OFFSET;
1365 VkDeviceMemory heapMemory = VK_NULL_HANDLE;
1366 void* heapMappedBase = NULL;
1367
1368 switch (heapType) {
1370 offset = static_heap_alloc(ctx, memRequirements.size);
1371 heapMemory = ctx->staticHeap.memory;
1372 heapMappedBase = ctx->staticHeap.mapped;
1373 break;
1375 offset = dynamic_heap_alloc(ctx, memRequirements.size);
1376 heapMemory = ctx->dynamicHeap.memory;
1377 heapMappedBase = ctx->dynamicHeap.mapped[ctx->currentFrame];
1378 break;
1380 offset = managed_heap_alloc(ctx, memRequirements.size);
1381 heapMemory = ctx->managedHeap.memory;
1382 heapMappedBase = ctx->managedHeap.mapped;
1383 break;
1384 }
1385
1386 if (offset == SBGL_INVALID_OFFSET) {
1387 ctx->vk.vkDestroyBuffer(ctx->device, buffer->handle, NULL);
1388 return SBGL_INVALID_HANDLE;
1389 }
1390
1391 /* Bind the buffer handle to the sub-allocated memory region within the heap. */
1392 ctx->vk.vkBindBufferMemory(ctx->device, buffer->handle, heapMemory, offset);
1393
1394 buffer->size = size;
1395 buffer->offset = offset;
1396 buffer->heapType = heapType;
1397 buffer->mapped = (char*)heapMappedBase + offset;
1398 ctx->bufferActive[index] = true;
1399
1400 /* If initial data is provided, perform an immediate memory copy to the
1401 persistently mapped buffer address. */
1402 if (data && buffer->mapped) {
1403 memcpy(buffer->mapped, data, size);
1404 }
1405
1406 return (sbgl_Buffer)(index + 1);
1407}
@ SBGL_HEAP_TYPE_STATIC
@ SBGL_HEAP_TYPE_MANAGED
@ SBGL_HEAP_TYPE_DYNAMIC
static uint32_t static_heap_alloc(sbgl_GfxContext *ctx, size_t size)
static uint32_t dynamic_heap_alloc(sbgl_GfxContext *ctx, size_t size)
static uint32_t managed_heap_alloc(sbgl_GfxContext *ctx, size_t size)
#define SBGL_INVALID_OFFSET
Definition sbgl_types.h:9
@ SBGL_BUFFER_USAGE_INDIRECT
Definition sbgl_types.h:126
@ SBGL_BUFFER_USAGE_TRANSFER_DST
Definition sbgl_types.h:127
@ SBGL_BUFFER_USAGE_STORAGE
Definition sbgl_types.h:125
uint32_t sbgl_Buffer
Handle for a GPU-side buffer.
Definition sbgl_types.h:37
sbgl_GfxManagedHeap managedHeap
sbgl_GfxStaticHeap staticHeap

◆ sbgl_gfx_CreateComputePipeline()

sbgl_ComputePipeline sbgl_gfx_CreateComputePipeline ( sbgl_GfxContext * ctx,
sbgl_Shader shader )

Definition at line 1763 of file sbgl_backend_vulkan.c.

1763 {
1764 /* The system scans the internal pipeline storage for an available slot to allocate
1765 the new compute pipeline state. */
1766 uint32_t index = 0;
1767 for (; index < ctx->limits.maxPipelines; index++) {
1768 if (!ctx->computePipelines[index].active)
1769 break;
1770 }
1771 if (index == ctx->limits.maxPipelines)
1772 return SBGL_INVALID_HANDLE;
1773
1774 if (handle == SBGL_INVALID_HANDLE || handle > ctx->limits.maxShaders) {
1775 fprintf(stderr, "[Vulkan] Invalid compute shader handle\n");
1776 return SBGL_INVALID_HANDLE;
1777 }
1778 uint32_t shaderIndex = handle - 1;
1779 if (!ctx->shaders[shaderIndex].active || ctx->shaders[shaderIndex].stage != SBGL_SHADER_STAGE_COMPUTE) {
1780 fprintf(stderr, "[Vulkan] Invalid compute shader stage or inactive shader\n");
1781 return SBGL_INVALID_HANDLE;
1782 }
1783
1784 VkPipelineShaderStageCreateInfo stageInfo = {
1785 .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
1786 .stage = VK_SHADER_STAGE_COMPUTE_BIT,
1787 .module = ctx->shaders[shaderIndex].module,
1788 .pName = "main",
1789 };
1790
1791 VkPipelineLayoutCreateInfo layoutInfo = {
1792 .sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
1793 };
1794
1795 /* The system utilizes a standardized push constant block across both graphics
1796 and compute pipelines to maintain architectural consistency. */
1797 VkPushConstantRange pushConstantRange = {
1798 .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
1799 .offset = 0,
1801 };
1802 layoutInfo.pushConstantRangeCount = 1;
1803 layoutInfo.pPushConstantRanges = &pushConstantRange;
1804
1805 if (ctx->vk.vkCreatePipelineLayout(
1806 ctx->device,
1807 &layoutInfo,
1808 NULL,
1809 &ctx->computePipelines[index].layout
1810 ) != VK_SUCCESS) {
1811 return SBGL_INVALID_HANDLE;
1812 }
1813
1814 VkComputePipelineCreateInfo pipelineInfo = {
1815 .sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO,
1816 .stage = stageInfo,
1817 .layout = ctx->computePipelines[index].layout,
1818 };
1819
1820 if (ctx->vk.vkCreateComputePipelines(
1821 ctx->device,
1822 VK_NULL_HANDLE,
1823 1,
1824 &pipelineInfo,
1825 NULL,
1826 &ctx->computePipelines[index].handle
1827 ) != VK_SUCCESS) {
1828 ctx->vk.vkDestroyPipelineLayout(ctx->device, ctx->computePipelines[index].layout, NULL);
1829 return SBGL_INVALID_HANDLE;
1830 }
1831
1832 ctx->computePipelines[index].active = true;
1833 return (sbgl_ComputePipeline)(index + 1);
1834}
#define SBGL_VK_PUSH_CONSTANT_SIZE
@ SBGL_SHADER_STAGE_COMPUTE
Definition sbgl_types.h:136
uint32_t sbgl_ComputePipeline
Handle for a compute pipeline.
Definition sbgl_types.h:52
sbgl_ShaderStage stage
SBGL_VulkanShader * shaders

◆ sbgl_gfx_CreatePipeline()

sbgl_Pipeline sbgl_gfx_CreatePipeline ( sbgl_GfxContext * ctx,
const sbgl_PipelineConfig * config )

Definition at line 1550 of file sbgl_backend_vulkan.c.

1550 {
1551 uint32_t index = 0;
1552 for (; index < ctx->limits.maxPipelines; index++) {
1553 if (!ctx->pipelines[index].active)
1554 break;
1555 }
1556 if (index == ctx->limits.maxPipelines)
1557 return SBGL_INVALID_HANDLE;
1558
1559 VkPipelineShaderStageCreateInfo shaderStages[2] = { 0 };
1560
1561 // Vertex Shader
1562 if (config->vertexShader == SBGL_INVALID_HANDLE || config->vertexShader > ctx->limits.maxShaders) {
1563 fprintf(stderr, "[Vulkan] Invalid vertex shader handle\n");
1564 return SBGL_INVALID_HANDLE;
1565 }
1566 uint32_t vsIndex = config->vertexShader - 1;
1567 if (!ctx->shaders[vsIndex].active || ctx->shaders[vsIndex].stage != SBGL_SHADER_STAGE_VERTEX) {
1568 fprintf(stderr, "[Vulkan] Invalid vertex shader stage or inactive shader\n");
1569 return SBGL_INVALID_HANDLE;
1570 }
1571 shaderStages[0].sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
1572 shaderStages[0].stage = VK_SHADER_STAGE_VERTEX_BIT;
1573 shaderStages[0].module = ctx->shaders[vsIndex].module;
1574 shaderStages[0].pName = "main";
1575
1576 // Fragment Shader
1577 if (config->fragmentShader == SBGL_INVALID_HANDLE || config->fragmentShader > ctx->limits.maxShaders) {
1578 fprintf(stderr, "[Vulkan] Invalid fragment shader handle\n");
1579 return SBGL_INVALID_HANDLE;
1580 }
1581 uint32_t fsIndex = config->fragmentShader - 1;
1582 if (!ctx->shaders[fsIndex].active || ctx->shaders[fsIndex].stage != SBGL_SHADER_STAGE_FRAGMENT) {
1583 fprintf(stderr, "[Vulkan] Invalid fragment shader stage or inactive shader\n");
1584 return SBGL_INVALID_HANDLE;
1585 }
1586 shaderStages[1].sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
1587 shaderStages[1].stage = VK_SHADER_STAGE_FRAGMENT_BIT;
1588 shaderStages[1].module = ctx->shaders[fsIndex].module;
1589 shaderStages[1].pName = "main";
1590
1591 VkVertexInputBindingDescription bindingDescription = {
1592 .binding = 0,
1593 .stride = config->vertexLayout.stride,
1594 .inputRate = VK_VERTEX_INPUT_RATE_VERTEX,
1595 };
1596
1597 SblArenaMark mark = sbl_arena_mark(ctx->arena);
1598 VkVertexInputAttributeDescription* attributeDescriptions = SBL_ARENA_PUSH_ARRAY(
1599 ctx->arena,
1600 VkVertexInputAttributeDescription,
1602 );
1603 if (!attributeDescriptions && config->vertexLayout.attributeCount > 0) {
1604 return SBGL_INVALID_HANDLE;
1605 }
1606 for (uint32_t i = 0; i < config->vertexLayout.attributeCount; i++) {
1607 attributeDescriptions[i].binding = 0;
1608 attributeDescriptions[i].location = config->vertexLayout.attributes[i].location;
1609 attributeDescriptions[i].format =
1611 attributeDescriptions[i].offset = config->vertexLayout.attributes[i].offset;
1612 }
1613
1614 VkPipelineVertexInputStateCreateInfo vertexInputInfo = {
1615 .sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO,
1616 .vertexBindingDescriptionCount = 1,
1617 .pVertexBindingDescriptions = &bindingDescription,
1618 .vertexAttributeDescriptionCount = config->vertexLayout.attributeCount,
1619 .pVertexAttributeDescriptions = attributeDescriptions,
1620 };
1621
1622 VkPipelineInputAssemblyStateCreateInfo inputAssembly = {
1623 .sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO,
1624 .topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST,
1625 .primitiveRestartEnable = VK_FALSE,
1626 };
1627
1628 VkPipelineViewportStateCreateInfo viewportState = {
1629 .sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO,
1630 .viewportCount = 1,
1631 .scissorCount = 1,
1632 };
1633
1634 VkPipelineRasterizationStateCreateInfo rasterizer = {
1635 .sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO,
1636 .depthClampEnable = VK_FALSE,
1637 .rasterizerDiscardEnable = VK_FALSE,
1638 .polygonMode = VK_POLYGON_MODE_FILL,
1639 .lineWidth = 1.0f,
1640 .cullMode = VK_CULL_MODE_BACK_BIT,
1641 .frontFace = VK_FRONT_FACE_COUNTER_CLOCKWISE,
1642 .depthBiasEnable = VK_FALSE,
1643 };
1644
1645 VkPipelineMultisampleStateCreateInfo multisampling = {
1646 .sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO,
1647 .sampleShadingEnable = VK_FALSE,
1648 .rasterizationSamples = VK_SAMPLE_COUNT_1_BIT,
1649 };
1650
1651 VkPipelineDepthStencilStateCreateInfo depthStencil = {
1652 .sType = VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO,
1653 .depthTestEnable = VK_TRUE,
1654 .depthWriteEnable = VK_TRUE,
1655 .depthCompareOp = VK_COMPARE_OP_LESS,
1656 .depthBoundsTestEnable = VK_FALSE,
1657 .stencilTestEnable = VK_FALSE,
1658 };
1659
1660 VkPipelineColorBlendAttachmentState colorBlendAttachment = {
1661 .colorWriteMask = VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT |
1662 VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT,
1663 .blendEnable = (config->blendMode != SBGL_BLEND_MODE_NONE) ? VK_TRUE : VK_FALSE,
1664 .srcColorBlendFactor = (config->blendMode == SBGL_BLEND_MODE_ADDITIVE) ? VK_BLEND_FACTOR_ONE : VK_BLEND_FACTOR_SRC_ALPHA,
1665 .dstColorBlendFactor = (config->blendMode == SBGL_BLEND_MODE_ADDITIVE) ? VK_BLEND_FACTOR_ONE : VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA,
1666 .colorBlendOp = VK_BLEND_OP_ADD,
1667 .srcAlphaBlendFactor = VK_BLEND_FACTOR_ONE,
1668 .dstAlphaBlendFactor = VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA,
1669 .alphaBlendOp = VK_BLEND_OP_ADD,
1670 };
1671
1672 VkPipelineColorBlendStateCreateInfo colorBlending = {
1673 .sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO,
1674 .logicOpEnable = VK_FALSE,
1675 .attachmentCount = 1,
1676 .pAttachments = &colorBlendAttachment,
1677 };
1678
1679 VkDynamicState dynamicStates[] = { VK_DYNAMIC_STATE_VIEWPORT, VK_DYNAMIC_STATE_SCISSOR };
1680 VkPipelineDynamicStateCreateInfo dynamicState = {
1681 .sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO,
1682 .dynamicStateCount = 2,
1683 .pDynamicStates = dynamicStates,
1684 };
1685
1686 VkPipelineLayoutCreateInfo pipelineLayoutInfo = {
1687 .sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
1688 };
1689
1690 VkPushConstantRange pushConstantRange = {
1691 .stageFlags = VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT,
1692 .offset = 0,
1694 };
1695 pipelineLayoutInfo.pushConstantRangeCount = 1;
1696 pipelineLayoutInfo.pPushConstantRanges = &pushConstantRange;
1697
1698 if (ctx->vk.vkCreatePipelineLayout(
1699 ctx->device,
1700 &pipelineLayoutInfo,
1701 NULL,
1702 &ctx->pipelines[index].layout
1703 ) != VK_SUCCESS) {
1704 sbl_arena_rewind(ctx->arena, mark);
1705 return SBGL_INVALID_HANDLE;
1706 }
1707
1708 VkPipelineRenderingCreateInfo renderingCreateInfo = {
1709 .sType = VK_STRUCTURE_TYPE_PIPELINE_RENDERING_CREATE_INFO,
1710 .colorAttachmentCount = 1,
1711 .pColorAttachmentFormats = &ctx->swapchainFormat,
1712 .depthAttachmentFormat = ctx->depthFormat,
1713 };
1714
1715 VkGraphicsPipelineCreateInfo pipelineInfo = {
1716 .sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO,
1717 .pNext = &renderingCreateInfo,
1718 .stageCount = 2,
1719 .pStages = shaderStages,
1720 .pVertexInputState = &vertexInputInfo,
1721 .pInputAssemblyState = &inputAssembly,
1722 .pViewportState = &viewportState,
1723 .pRasterizationState = &rasterizer,
1724 .pMultisampleState = &multisampling,
1725 .pDepthStencilState = &depthStencil,
1726 .pColorBlendState = &colorBlending,
1727 .pDynamicState = &dynamicState,
1728 .layout = ctx->pipelines[index].layout,
1729 .renderPass = VK_NULL_HANDLE,
1730 .subpass = 0,
1731 };
1732
1733 if (ctx->vk.vkCreateGraphicsPipelines(
1734 ctx->device,
1735 VK_NULL_HANDLE,
1736 1,
1737 &pipelineInfo,
1738 NULL,
1739 &ctx->pipelines[index].handle
1740 ) != VK_SUCCESS) {
1741 ctx->vk.vkDestroyPipelineLayout(ctx->device, ctx->pipelines[index].layout, NULL);
1742 sbl_arena_rewind(ctx->arena, mark);
1743 return SBGL_INVALID_HANDLE;
1744 }
1745
1746 sbl_arena_rewind(ctx->arena, mark);
1747 ctx->pipelines[index].active = true;
1748 return (sbgl_Pipeline)(index + 1);
1749}
static VkFormat sbgl_to_vk_format(sbgl_Format format)
@ SBGL_BLEND_MODE_ADDITIVE
Definition sbgl_types.h:198
@ SBGL_BLEND_MODE_NONE
Definition sbgl_types.h:196
@ SBGL_SHADER_STAGE_FRAGMENT
Definition sbgl_types.h:135
@ SBGL_SHADER_STAGE_VERTEX
Definition sbgl_types.h:134
uint32_t sbgl_Pipeline
Handle for a graphics pipeline.
Definition sbgl_types.h:47
SBL_ARENA_DEF SblArenaMark sbl_arena_mark(SblArena *arena)
#define SBL_ARENA_PUSH_ARRAY(arena, type, count)
Definition sbl_arena.h:21
SBL_ARENA_DEF void sbl_arena_rewind(SblArena *arena, SblArenaMark mark)
VkPipelineLayout layout
Bookmark for arena state.
Definition sbl_arena.h:57
sbgl_Shader fragmentShader
Definition sbgl_types.h:206
sbgl_Shader vertexShader
Definition sbgl_types.h:205
sbgl_BlendMode blendMode
Definition sbgl_types.h:208
sbgl_VertexLayout vertexLayout
Definition sbgl_types.h:207
const sbgl_VertexAttribute * attributes
Definition sbgl_types.h:189
uint32_t attributeCount
Definition sbgl_types.h:188

◆ sbgl_gfx_DestroyBuffer()

void sbgl_gfx_DestroyBuffer ( sbgl_GfxContext * ctx,
sbgl_Buffer buffer )

Definition at line 1409 of file sbgl_backend_vulkan.c.

1409 {
1410 /* The system releases the GPU-side buffer handle and, if the memory was
1411 allocated from the managed heap, returns the range to the sub-allocator
1412 to mitigate memory fragmentation. Static and Dynamic allocations are
1413 reclaimed automatically or persist until shutdown. */
1414 if (handle == SBGL_INVALID_HANDLE)
1415 return;
1416 uint32_t index = (uint32_t)handle - 1;
1417 if (index >= ctx->limits.maxBuffers || !ctx->bufferActive[index])
1418 return;
1419
1420 SBGL_VulkanBuffer* buffer = &ctx->buffers[index];
1421 ctx->vk.vkDestroyBuffer(ctx->device, buffer->handle, NULL);
1422
1423 if (buffer->heapType == SBGL_HEAP_TYPE_MANAGED) {
1424 managed_heap_free(ctx, buffer->offset);
1425 }
1426
1427 ctx->bufferActive[index] = false;
1428}
static void managed_heap_free(sbgl_GfxContext *ctx, uint32_t offset)

◆ sbgl_gfx_DestroyBufferDeferred()

void sbgl_gfx_DestroyBufferDeferred ( sbgl_GfxContext * ctx,
sbgl_Buffer buffer )

Marks a buffer for destruction after current frames complete.

This function should be used for temporary buffers that are submitted for GPU execution in the current frame and must not be destroyed until the GPU has finished using them.

Parameters
ctxThe graphics context.
bufferHandle to the buffer to destroy.

Definition at line 1479 of file sbgl_backend_vulkan.c.

1479 {
1480 /* The system queues the buffer for destruction after the current frame's GPU work
1481 is guaranteed to be complete, preventing premature release of in-flight resources. */
1482 if (ctx->deferredCount[ctx->currentFrame] < 64) {
1483 ctx->deferredBuffers[ctx->currentFrame][ctx->deferredCount[ctx->currentFrame]++] = handle;
1484 } else {
1485 /* If the deferred queue is full, the system falls back to immediate destruction
1486 after a device idle wait to maintain safety at the cost of performance. */
1488 sbgl_gfx_DestroyBuffer(ctx, handle);
1489 }
1490}
void sbgl_gfx_DeviceWaitIdle(sbgl_GfxContext *ctx)

◆ sbgl_gfx_DestroyComputePipeline()

void sbgl_gfx_DestroyComputePipeline ( sbgl_GfxContext * ctx,
sbgl_ComputePipeline pipeline )

Definition at line 1836 of file sbgl_backend_vulkan.c.

1836 {
1837 /* The system releases the GPU-side pipeline and layout resources and marks
1838 the internal slot as inactive for future reuse. */
1839 if (handle == SBGL_INVALID_HANDLE)
1840 return;
1841 uint32_t index = (uint32_t)handle - 1;
1842 if (index >= ctx->limits.maxPipelines || !ctx->computePipelines[index].active)
1843 return;
1844
1845 ctx->vk.vkDestroyPipeline(ctx->device, ctx->computePipelines[index].handle, NULL);
1846 ctx->vk.vkDestroyPipelineLayout(ctx->device, ctx->computePipelines[index].layout, NULL);
1847 ctx->computePipelines[index].active = false;
1848}

◆ sbgl_gfx_DestroyPipeline()

void sbgl_gfx_DestroyPipeline ( sbgl_GfxContext * ctx,
sbgl_Pipeline pipeline )

Definition at line 1751 of file sbgl_backend_vulkan.c.

1751 {
1752 if (handle == SBGL_INVALID_HANDLE)
1753 return;
1754 uint32_t index = (uint32_t)handle - 1;
1755 if (index >= ctx->limits.maxPipelines || !ctx->pipelines[index].active)
1756 return;
1757
1758 ctx->vk.vkDestroyPipeline(ctx->device, ctx->pipelines[index].handle, NULL);
1759 ctx->vk.vkDestroyPipelineLayout(ctx->device, ctx->pipelines[index].layout, NULL);
1760 ctx->pipelines[index].active = false;
1761}

◆ sbgl_gfx_DestroyShader()

void sbgl_gfx_DestroyShader ( sbgl_GfxContext * ctx,
sbgl_Shader shader )

Definition at line 1539 of file sbgl_backend_vulkan.c.

1539 {
1540 if (handle == SBGL_INVALID_HANDLE)
1541 return;
1542 uint32_t index = (uint32_t)handle - 1;
1543 if (index >= ctx->limits.maxShaders || !ctx->shaders[index].active)
1544 return;
1545
1546 ctx->vk.vkDestroyShaderModule(ctx->device, ctx->shaders[index].module, NULL);
1547 ctx->shaders[index].active = false;
1548}

◆ sbgl_gfx_DeviceWaitIdle()

void sbgl_gfx_DeviceWaitIdle ( sbgl_GfxContext * ctx)

Definition at line 1316 of file sbgl_backend_vulkan.c.

1316 {
1317 if (ctx && ctx->device) {
1318 ctx->vk.vkDeviceWaitIdle(ctx->device);
1319 }
1320}

◆ sbgl_gfx_DispatchCompute()

void sbgl_gfx_DispatchCompute ( sbgl_GfxContext * ctx,
uint32_t groupCountX,
uint32_t groupCountY,
uint32_t groupCountZ )

Definition at line 1869 of file sbgl_backend_vulkan.c.

1869 {
1870 /* A compute dispatch command is recorded into the current frame's command buffer,
1871 triggering parallel execution across the specified workgroup dimensions. */
1872 ctx->vk.vkCmdDispatch(ctx->commandBuffers[ctx->currentFrame], x, y, z);
1873}

◆ sbgl_gfx_Draw()

void sbgl_gfx_Draw ( sbgl_GfxContext * ctx,
uint32_t vertexCount,
uint32_t firstVertex,
uint32_t instanceCount )

Definition at line 1996 of file sbgl_backend_vulkan.c.

1996 {
1997 ctx->vk.vkCmdDraw(ctx->commandBuffers[ctx->currentFrame], vertexCount, instanceCount, firstVertex, 0);
1998}

◆ sbgl_gfx_DrawIndexed()

void sbgl_gfx_DrawIndexed ( sbgl_GfxContext * ctx,
uint32_t indexCount,
uint32_t firstIndex,
int32_t vertexOffset,
uint32_t instanceCount )

Definition at line 2000 of file sbgl_backend_vulkan.c.

2006 {
2007 ctx->vk.vkCmdDrawIndexed(
2008 ctx->commandBuffers[ctx->currentFrame],
2009 indexCount,
2010 instanceCount,
2011 firstIndex,
2012 vertexOffset,
2013 0
2014 );
2015}

◆ sbgl_gfx_DrawIndirect()

void sbgl_gfx_DrawIndirect ( sbgl_GfxContext * ctx,
sbgl_Buffer buffer,
size_t offset,
uint32_t drawCount )

Submits a batch of draw calls stored in a GPU buffer.

Parameters
ctxThe graphics context.
bufferHandle to the buffer containing an array of sbgl_IndirectCommand.
offsetThe byte offset into the buffer where the commands begin.
drawCountThe number of commands to execute from the buffer.

Definition at line 2017 of file sbgl_backend_vulkan.c.

2022 {
2023 if (handle == SBGL_INVALID_HANDLE)
2024 return;
2025 uint32_t index = (uint32_t)handle - 1;
2026 if (index >= ctx->limits.maxBuffers || !ctx->bufferActive[index])
2027 return;
2028
2029 ctx->vk.vkCmdDrawIndexedIndirect(
2030 ctx->commandBuffers[ctx->currentFrame],
2031 ctx->buffers[index].handle,
2032 (VkDeviceSize)offset,
2033 drawCount,
2034 sizeof(sbgl_IndirectCommand)
2035 );
2036}
Standard Vulkan Indirect Draw command layout.
Definition sbgl_types.h:111

◆ sbgl_gfx_EndFrame()

void sbgl_gfx_EndFrame ( sbgl_GfxContext * ctx)

Submits the current frame's commands and presents the image.

Definition at line 1276 of file sbgl_backend_vulkan.c.

1276 {
1277 ctx->vk.vkEndCommandBuffer(ctx->commandBuffers[ctx->currentFrame]);
1278
1279 VkPipelineStageFlags waitStages[] = { VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT };
1280
1281 /* The system utilizes a semaphore indexed by the current image to signal completion
1282 to the presentation engine, preventing reuse conflicts during high-frequency updates. */
1283 VkSemaphore signalSemaphore = ctx->renderFinishedSemaphores[ctx->currentImageIndex];
1284
1285 VkSubmitInfo submitInfo = {
1286 .sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,
1287 .waitSemaphoreCount = 1,
1288 .pWaitSemaphores = &ctx->imageAvailableSemaphores[ctx->semaphoreIndex],
1289 .pWaitDstStageMask = waitStages,
1290 .commandBufferCount = 1,
1291 .pCommandBuffers = &ctx->commandBuffers[ctx->currentFrame],
1292 .signalSemaphoreCount = 1,
1293 .pSignalSemaphores = &signalSemaphore,
1294 };
1295 ctx->vk
1296 .vkQueueSubmit(ctx->graphicsQueue, 1, &submitInfo, ctx->inFlightFences[ctx->currentFrame]);
1297
1298 VkPresentInfoKHR presentInfo = {
1299 .sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR,
1300 .waitSemaphoreCount = 1,
1301 .pWaitSemaphores = &signalSemaphore,
1302 .swapchainCount = 1,
1303 .pSwapchains = &ctx->swapchain,
1304 .pImageIndices = &ctx->currentImageIndex,
1305 };
1306 VkResult result = ctx->vk.vkQueuePresentKHR(ctx->graphicsQueue, &presentInfo);
1307
1308 if (result == VK_ERROR_OUT_OF_DATE_KHR || result == VK_SUBOPTIMAL_KHR) {
1309 recreate_swapchain(ctx);
1310 }
1311
1314}
#define SBGL_MAX_FRAMES_IN_FLIGHT
#define SBGL_MAX_SWAPCHAIN_IMAGES
VkSemaphore renderFinishedSemaphores[SBGL_MAX_SWAPCHAIN_IMAGES]

◆ sbgl_gfx_EndRenderPass()

void sbgl_gfx_EndRenderPass ( sbgl_GfxContext * ctx)

Ends the current graphics rendering pass.

Definition at line 1239 of file sbgl_backend_vulkan.c.

1239 {
1240 /* The system records the ending timestamp at the conclusion of the frame's rendering commands.
1241 */
1242 ctx->vk.vkCmdWriteTimestamp(
1243 ctx->commandBuffers[ctx->currentFrame],
1244 VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT,
1245 ctx->queryPool,
1246 ctx->currentFrame * 2 + 1
1247 );
1248
1249 ctx->vk.vkCmdEndRendering(ctx->commandBuffers[ctx->currentFrame]);
1250
1251 VkImageMemoryBarrier barrier = {
1252 .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
1253 .oldLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
1254 .newLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR,
1255 .image = ctx->images[ctx->currentImageIndex],
1256 .subresourceRange = { .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
1257 .levelCount = 1,
1258 .layerCount = 1 },
1259 .srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
1260 .dstAccessMask = 0,
1261 };
1262 ctx->vk.vkCmdPipelineBarrier(
1263 ctx->commandBuffers[ctx->currentFrame],
1264 VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
1265 VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT,
1266 0,
1267 0,
1268 NULL,
1269 0,
1270 NULL,
1271 1,
1272 &barrier
1273 );
1274}

◆ sbgl_gfx_FillBuffer()

void sbgl_gfx_FillBuffer ( sbgl_GfxContext * ctx,
sbgl_Buffer buffer,
size_t offset,
size_t size,
uint32_t value )

Performs a hardware-accelerated buffer fill.

Definition at line 1430 of file sbgl_backend_vulkan.c.

1436 {
1437 /* A hardware-accelerated fill operation is recorded into the current frame's
1438 command buffer, utilizing the GPU's DMA engine for maximum performance. */
1439 if (handle == SBGL_INVALID_HANDLE)
1440 return;
1441 uint32_t index = (uint32_t)handle - 1;
1442 if (index >= ctx->limits.maxBuffers || !ctx->bufferActive[index])
1443 return;
1444
1445 ctx->vk.vkCmdFillBuffer(
1446 ctx->commandBuffers[ctx->currentFrame],
1447 ctx->buffers[index].handle,
1448 (VkDeviceSize)offset,
1449 (VkDeviceSize)size,
1450 value
1451 );
1452}

◆ sbgl_gfx_GetBufferDeviceAddress()

uint64_t sbgl_gfx_GetBufferDeviceAddress ( sbgl_GfxContext * ctx,
sbgl_Buffer buffer )

Retrieves the 64-bit GPU virtual address for a buffer.

Used primarily for passing buffer pointers to shaders via push constants or storage buffers when using VK_KHR_buffer_device_address.

Parameters
ctxThe graphics context.
bufferThe buffer to query.
Returns
The 64-bit device address, or 0 if retrieval failed.

Definition at line 1492 of file sbgl_backend_vulkan.c.

1492 {
1493 /* The system retrieves the 64-bit GPU virtual address for the specified buffer,
1494 enabling direct memory access within shaders via Buffer Device Address. */
1495 if (handle == SBGL_INVALID_HANDLE)
1496 return 0;
1497 uint32_t index = (uint32_t)handle - 1;
1498 if (index >= ctx->limits.maxBuffers || !ctx->bufferActive[index])
1499 return 0;
1500
1501 VkBufferDeviceAddressInfo info = {
1502 .sType = VK_STRUCTURE_TYPE_BUFFER_DEVICE_ADDRESS_INFO,
1503 .buffer = ctx->buffers[index].handle,
1504 };
1505
1506 return ctx->vk.vkGetBufferDeviceAddress(ctx->device, &info);
1507}

◆ sbgl_gfx_GetFrameIndex()

uint32_t sbgl_gfx_GetFrameIndex ( sbgl_GfxContext * ctx)

Retrieves the current backend frame index.

Definition at line 1454 of file sbgl_backend_vulkan.c.

1454 {
1455 /* Returns the current frame index, which is used by the core engine to
1456 manage multi-buffered resources. */
1457 return ctx->currentFrame;
1458}

◆ sbgl_gfx_GetGpuTime()

float sbgl_gfx_GetGpuTime ( sbgl_GfxContext * ctx)

Retrieves the elapsed GPU time for the previous frame in milliseconds.

Parameters
ctxThe graphics context.
Returns
The duration in milliseconds.

Definition at line 2099 of file sbgl_backend_vulkan.c.

2099 {
2100 /* The system retrieves the recorded timestamps from the GPU and calculates the elapsed time
2101 in milliseconds, providing a non-blocking performance measurement. */
2102 uint64_t results[2] = { 0 };
2103 VkResult res = ctx->vk.vkGetQueryPoolResults(
2104 ctx->device,
2105 ctx->queryPool,
2106 ctx->currentFrame * 2,
2107 2,
2108 sizeof(results),
2109 results,
2110 sizeof(uint64_t),
2111 VK_QUERY_RESULT_64_BIT
2112 );
2113
2114 if (res == VK_SUCCESS) {
2115 uint64_t start = results[0];
2116 uint64_t end = results[1];
2117 return (float)(end - start) * ctx->timestampPeriod / 1e6f;
2118 }
2119
2120 return 0.0f;
2121}

◆ sbgl_gfx_GetLastVkResult()

int32_t sbgl_gfx_GetLastVkResult ( sbgl_GfxContext * ctx)

Retrieves the last VkResult from the backend for error inspection.

Parameters
ctxThe graphics context.
Returns
The last VkResult code, or 0 if no error occurred.

Definition at line 2123 of file sbgl_backend_vulkan.c.

2123 {
2124 if (!ctx) return 0;
2125 return ctx->backendResult;
2126}

◆ sbgl_gfx_Init()

sbgl_GfxContext * sbgl_gfx_Init ( sbgl_Window * window,
struct SblArena * arena,
const sbgl_ResourceLimits * limits,
bool enableValidation )

Initializes the graphics backend with configurable resource limits.

Parameters
windowThe platform window handle.
arenaThe arena for persistent allocations.
limitsPointer to resource limits (must not be NULL).
enableValidationWhether to enable Vulkan validation layers.
Returns
A pointer to the graphics context, or NULL on failure.

Definition at line 985 of file sbgl_backend_vulkan.c.

985 {
986 if (volkInitialize() != VK_SUCCESS) {
987 fprintf(stderr, "[Vulkan] Failed to initialize volk\n");
988 return NULL;
989 }
990
992 if (!ctx)
993 return NULL;
994
995 ctx->window = window;
996 ctx->arena = arena;
997
998 // Apply resource limits (use defaults if not provided)
999 if (limits) {
1000 ctx->limits = *limits;
1001 // Enforce minimums to prevent crashes
1002 if (ctx->limits.maxBuffers < 64) ctx->limits.maxBuffers = 64;
1003 if (ctx->limits.maxShaders < 16) ctx->limits.maxShaders = 16;
1004 if (ctx->limits.maxPipelines < 16) ctx->limits.maxPipelines = 16;
1005 } else {
1007 }
1008
1009 // Dynamically allocate resource arrays from the arena
1010 // Use raw byte allocation since SBGL_Vulkan* types are defined later in this file
1011 ctx->bufferActive = (bool*)sbl_arena_alloc_zero(arena, sizeof(bool) * ctx->limits.maxBuffers);
1016
1017 if (!ctx->bufferActive || !ctx->buffers || !ctx->shaders || !ctx->pipelines || !ctx->computePipelines) {
1018 fprintf(stderr, "[Vulkan] Failed to allocate resource arrays\n");
1019 sbgl_gfx_Shutdown(ctx);
1020 return NULL;
1021 }
1022
1023 if (!create_instance(ctx, enableValidation) || !create_surface(ctx, window) || !select_physical_device(ctx) ||
1024 !create_logical_device(ctx) || !create_heaps(ctx) || !create_swapchain(ctx, window) ||
1027 sbgl_gfx_Shutdown(ctx);
1028 return NULL;
1029 }
1030
1031 /* The query pool is reset on the host immediately after creation to ensure that all
1032 queries are in a valid state before the first attempt to retrieve results. */
1033 ctx->vk.vkResetQueryPool(ctx->device, ctx->queryPool, 0, SBGL_MAX_FRAMES_IN_FLIGHT * 2);
1034
1035 return ctx;
1036}
static bool create_sync_and_command(sbgl_GfxContext *ctx)
void sbgl_gfx_Shutdown(sbgl_GfxContext *ctx)
static bool create_instance(sbgl_GfxContext *ctx, bool enableValidation)
static bool create_heaps(sbgl_GfxContext *ctx)
static bool create_swapchain(sbgl_GfxContext *ctx, sbgl_Window *window)
static bool select_physical_device(sbgl_GfxContext *ctx)
static const sbgl_ResourceLimits sbgl_DefaultResourceLimits
static bool create_logical_device(sbgl_GfxContext *ctx)
static bool create_telemetry_resources(sbgl_GfxContext *ctx)
static bool create_transient_resources(sbgl_GfxContext *ctx)
static bool create_surface(sbgl_GfxContext *ctx, sbgl_Window *window)
SBL_ARENA_DEF void * sbl_arena_alloc_zero(SblArena *arena, uint64_t size)
#define SBL_ARENA_PUSH_STRUCT_ZERO(arena, type)
Definition sbl_arena.h:20

◆ sbgl_gfx_LoadShader()

sbgl_Shader sbgl_gfx_LoadShader ( sbgl_GfxContext * ctx,
sbgl_ShaderStage stage,
const uint32_t * bytecode,
size_t size )

Definition at line 1509 of file sbgl_backend_vulkan.c.

1514 {
1515 uint32_t index = 0;
1516 for (; index < ctx->limits.maxShaders; index++) {
1517 if (!ctx->shaders[index].active)
1518 break;
1519 }
1520 if (index == ctx->limits.maxShaders)
1521 return SBGL_INVALID_HANDLE;
1522
1523 VkShaderModuleCreateInfo createInfo = {
1524 .sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO,
1525 .codeSize = size,
1526 .pCode = bytecode,
1527 };
1528
1529 if (ctx->vk.vkCreateShaderModule(ctx->device, &createInfo, NULL, &ctx->shaders[index].module) !=
1530 VK_SUCCESS) {
1531 return SBGL_INVALID_HANDLE;
1532 }
1533
1534 ctx->shaders[index].stage = stage;
1535 ctx->shaders[index].active = true;
1536 return (sbgl_Shader)(index + 1);
1537}
uint32_t sbgl_Shader
Handle for a shader module.
Definition sbgl_types.h:42

◆ sbgl_gfx_MapBuffer()

void * sbgl_gfx_MapBuffer ( sbgl_GfxContext * ctx,
sbgl_Buffer buffer )

Definition at line 1460 of file sbgl_backend_vulkan.c.

1460 {
1461 /* The system returns the persistently mapped pointer for the specified buffer,
1462 enabling high-performance data updates without the overhead of repeated mapping. */
1463 if (handle == SBGL_INVALID_HANDLE)
1464 return NULL;
1465 uint32_t index = (uint32_t)handle - 1;
1466 if (index >= ctx->limits.maxBuffers || !ctx->bufferActive[index])
1467 return NULL;
1468
1469 return ctx->buffers[index].mapped;
1470}

◆ sbgl_gfx_MemoryBarrier()

void sbgl_gfx_MemoryBarrier ( sbgl_GfxContext * ctx,
sbgl_BarrierType type )

Definition at line 1875 of file sbgl_backend_vulkan.c.

1875 {
1876 /* The system injects a pipeline barrier into the command stream to synchronize
1877 memory access between different execution stages, preventing race conditions. */
1878 VkMemoryBarrier barrier = { .sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER };
1879 VkPipelineStageFlags srcStage = 0;
1880 VkPipelineStageFlags dstStage = 0;
1881
1882 switch (type) {
1884 /* Synchronizes compute and transfer (fill) writes to be visible to
1885 subsequent compute operations. */
1886 barrier.srcAccessMask = VK_ACCESS_SHADER_WRITE_BIT | VK_ACCESS_TRANSFER_WRITE_BIT;
1887 barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
1888 srcStage = VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT | VK_PIPELINE_STAGE_TRANSFER_BIT;
1889 dstStage = VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
1890 break;
1892 /* Synchronizes compute writes to SSBOs for use in indirect draw command buffers. */
1893 barrier.srcAccessMask = VK_ACCESS_SHADER_WRITE_BIT;
1894 barrier.dstAccessMask = VK_ACCESS_INDIRECT_COMMAND_READ_BIT;
1895 srcStage = VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
1896 dstStage = VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT;
1897 break;
1899 /* Synchronizes compute writes to be visible to vertex input and shader stages. */
1900 barrier.srcAccessMask = VK_ACCESS_SHADER_WRITE_BIT;
1901 barrier.dstAccessMask = VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT | VK_ACCESS_SHADER_READ_BIT;
1902 srcStage = VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
1903 dstStage = VK_PIPELINE_STAGE_VERTEX_INPUT_BIT | VK_PIPELINE_STAGE_VERTEX_SHADER_BIT;
1904 break;
1906 /* Synchronizes graphics writes to be visible to subsequent compute operations. */
1907 barrier.srcAccessMask = VK_ACCESS_SHADER_WRITE_BIT;
1908 barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
1909 srcStage = VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
1910 dstStage = VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
1911 break;
1913 /* Synchronizes host writes to be visible to subsequent compute operations. */
1914 barrier.srcAccessMask = VK_ACCESS_HOST_WRITE_BIT;
1915 barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
1916 srcStage = VK_PIPELINE_STAGE_HOST_BIT;
1917 dstStage = VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
1918 break;
1920 /* Synchronizes host writes to be visible to subsequent graphics (vertex) operations. */
1921 barrier.srcAccessMask = VK_ACCESS_HOST_WRITE_BIT;
1922 barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
1923 srcStage = VK_PIPELINE_STAGE_HOST_BIT;
1924 dstStage = VK_PIPELINE_STAGE_VERTEX_SHADER_BIT;
1925 break;
1926 }
1927
1928 ctx->vk.vkCmdPipelineBarrier(
1929 ctx->commandBuffers[ctx->currentFrame],
1930 srcStage,
1931 dstStage,
1932 0,
1933 1,
1934 &barrier,
1935 0,
1936 NULL,
1937 0,
1938 NULL
1939 );
1940}
@ SBGL_BARRIER_COMPUTE_TO_INDIRECT
Definition sbgl_types.h:144
@ SBGL_BARRIER_GRAPHICS_TO_COMPUTE
Definition sbgl_types.h:146
@ SBGL_BARRIER_COMPUTE_TO_COMPUTE
Definition sbgl_types.h:143
@ SBGL_BARRIER_HOST_TO_GRAPHICS
Definition sbgl_types.h:148
@ SBGL_BARRIER_HOST_TO_COMPUTE
Definition sbgl_types.h:147
@ SBGL_BARRIER_COMPUTE_TO_GRAPHICS
Definition sbgl_types.h:145

◆ sbgl_gfx_PushConstants()

void sbgl_gfx_PushConstants ( sbgl_GfxContext * ctx,
size_t size,
const void * data )

Definition at line 2066 of file sbgl_backend_vulkan.c.

2066 {
2067 /* Push constants are submitted to both the currently bound graphics and compute
2068 pipelines to ensure that metadata is available across all execution stages. */
2069 if (size > SBGL_VK_PUSH_CONSTANT_SIZE) {
2070 fprintf(stderr, "[Vulkan] Push constant size (%zu) exceeds maximum (%d)\n", size, SBGL_VK_PUSH_CONSTANT_SIZE);
2071 return;
2072 }
2073
2074 if (ctx->boundPipeline != SBGL_INVALID_HANDLE) {
2075 uint32_t index = (uint32_t)ctx->boundPipeline - 1;
2076 ctx->vk.vkCmdPushConstants(
2077 ctx->commandBuffers[ctx->currentFrame],
2078 ctx->pipelines[index].layout,
2079 VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT,
2080 0,
2081 (uint32_t)size,
2082 data
2083 );
2084 }
2085
2087 uint32_t index = (uint32_t)ctx->boundComputePipeline - 1;
2088 ctx->vk.vkCmdPushConstants(
2089 ctx->commandBuffers[ctx->currentFrame],
2090 ctx->computePipelines[index].layout,
2091 VK_SHADER_STAGE_COMPUTE_BIT,
2092 0,
2093 (uint32_t)size,
2094 data
2095 );
2096 }
2097}

◆ sbgl_gfx_Shutdown()

void sbgl_gfx_Shutdown ( sbgl_GfxContext * ctx)

Definition at line 1038 of file sbgl_backend_vulkan.c.

1038 {
1039 if (!ctx)
1040 return;
1041
1042 if (ctx->device) {
1043 ctx->vk.vkDeviceWaitIdle(ctx->device);
1044
1045 // Clean up all active buffers
1046 for (uint32_t i = 0; i < ctx->limits.maxBuffers; i++) {
1047 if (ctx->bufferActive[i]) {
1048 sbgl_gfx_DestroyBuffer(ctx, (sbgl_Buffer)(i + 1));
1049 }
1050 }
1051
1052 // Clean up all active shaders
1053 for (uint32_t i = 0; i < ctx->limits.maxShaders; i++) {
1054 if (ctx->shaders[i].active) {
1055 sbgl_gfx_DestroyShader(ctx, (sbgl_Shader)(i + 1));
1056 }
1057 }
1058
1059 // Clean up all active pipelines
1060 for (uint32_t i = 0; i < ctx->limits.maxPipelines; i++) {
1061 if (ctx->pipelines[i].active) {
1063 }
1064 if (ctx->computePipelines[i].active) {
1066 }
1067 }
1068
1069 // Process any remaining deferred buffers
1070 for (uint32_t f = 0; f < SBGL_MAX_FRAMES_IN_FLIGHT; f++) {
1071 for (uint32_t i = 0; i < ctx->deferredCount[f]; i++) {
1072 sbgl_gfx_DestroyBuffer(ctx, ctx->deferredBuffers[f][i]);
1073 }
1074 ctx->deferredCount[f] = 0;
1075 }
1076
1077 for (uint32_t i = 0; i < SBGL_MAX_FRAMES_IN_FLIGHT; i++) {
1078 ctx->vk.vkDestroyFence(ctx->device, ctx->inFlightFences[i], NULL);
1079 }
1080
1081 for (uint32_t i = 0; i < SBGL_MAX_SWAPCHAIN_IMAGES; i++) {
1082 if (ctx->imageAvailableSemaphores[i] != VK_NULL_HANDLE) {
1083 ctx->vk.vkDestroySemaphore(ctx->device, ctx->imageAvailableSemaphores[i], NULL);
1084 }
1085 if (ctx->renderFinishedSemaphores[i] != VK_NULL_HANDLE) {
1086 ctx->vk.vkDestroySemaphore(ctx->device, ctx->renderFinishedSemaphores[i], NULL);
1087 }
1088 }
1089 ctx->vk.vkDestroyQueryPool(ctx->device, ctx->queryPool, NULL);
1090 ctx->vk.vkDestroyCommandPool(ctx->device, ctx->commandPool, NULL);
1091
1092 ctx->vk.vkFreeMemory(ctx->device, ctx->staticHeap.memory, NULL);
1093 ctx->vk.vkFreeMemory(ctx->device, ctx->dynamicHeap.memory, NULL);
1094 ctx->vk.vkFreeMemory(ctx->device, ctx->managedHeap.memory, NULL);
1095
1096 cleanup_swapchain(ctx);
1097 ctx->vk.vkDestroyDevice(ctx->device, NULL);
1098 }
1099 if (ctx->instance) {
1100 vkDestroySurfaceKHR(ctx->instance, ctx->surface, NULL);
1101 vkDestroyInstance(ctx->instance, NULL);
1102 }
1103}
void sbgl_gfx_DestroyShader(sbgl_GfxContext *ctx, sbgl_Shader handle)
void sbgl_gfx_DestroyPipeline(sbgl_GfxContext *ctx, sbgl_Pipeline handle)
static void cleanup_swapchain(sbgl_GfxContext *ctx)
void sbgl_gfx_DestroyComputePipeline(sbgl_GfxContext *ctx, sbgl_ComputePipeline handle)
VkCommandPool commandPool

◆ sbgl_gfx_UnmapBuffer()

void sbgl_gfx_UnmapBuffer ( sbgl_GfxContext * ctx,
sbgl_Buffer buffer )

Definition at line 1472 of file sbgl_backend_vulkan.c.

1472 {
1473 /* Persistent mapping remains active for the buffer's lifecycle, so unmapping
1474 is a no-op to maintain API compatibility while maximizing performance. */
1475 (void)ctx;
1476 (void)handle;
1477}