SBgl 0.1.0
A graphics framework in C99
Loading...
Searching...
No Matches
sbgl_backend_vulkan.c File Reference
#include "core/sbgl_platform.h"
#include "core/sbl_arena.h"
#include "core/sbgl_internal_log.h"
#include "sbgl_graphics_hal.h"
#include <volk.h>
#include <stdio.h>
#include <string.h>
Include dependency graph for sbgl_backend_vulkan.c:

Go to the source code of this file.

Data Structures

struct  SBGL_VulkanBuffer
 
struct  SBGL_VulkanShader
 
struct  SBGL_VulkanPipeline
 
struct  SBGL_VulkanComputePipeline
 
struct  sbgl_GfxMemoryRange
 
struct  sbgl_GfxStaticHeap
 
struct  sbgl_GfxDynamicHeap
 
struct  sbgl_GfxManagedHeap
 
struct  sbgl_GfxContext
 

Macros

#define VK_NO_PROTOTYPES
 
#define SBGL_MAX_FRAMES_IN_FLIGHT   2
 
#define SBGL_MAX_SWAPCHAIN_IMAGES   8
 
#define SBGL_TRANSIENT_BUFFER_SIZE   (16 * 1024 * 1024)
 
#define SBGL_STATIC_HEAP_SIZE   (128 * 1024 * 1024)
 
#define SBGL_DYNAMIC_HEAP_SIZE   (128 * 1024 * 1024)
 
#define SBGL_MANAGED_HEAP_SIZE   (512 * 1024 * 1024)
 
#define SBGL_VK_PUSH_CONSTANT_SIZE   128
 

Enumerations

enum  SBGL_HeapType { SBGL_HEAP_TYPE_STATIC , SBGL_HEAP_TYPE_DYNAMIC , SBGL_HEAP_TYPE_MANAGED }
 

Functions

static void cleanup_swapchain (sbgl_GfxContext *ctx)
 
static bool create_swapchain (sbgl_GfxContext *ctx, sbgl_Window *window)
 
static void recreate_swapchain (sbgl_GfxContext *ctx)
 
static VKAPI_ATTR VkBool32 VKAPI_CALL sbgl_vk_debug_callback (VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity, VkDebugUtilsMessageTypeFlagsEXT messageType, const VkDebugUtilsMessengerCallbackDataEXT *pCallbackData, void *pUserData)
 
static void sbgl_setup_debug_utils (sbgl_GfxContext *ctx)
 
static VkFormat sbgl_to_vk_format (sbgl_Format format)
 
static uint32_t find_memory_type (sbgl_GfxContext *ctx, uint32_t typeFilter, VkMemoryPropertyFlags properties)
 
static uint32_t static_heap_alloc (sbgl_GfxContext *ctx, size_t size)
 
static uint32_t dynamic_heap_alloc (sbgl_GfxContext *ctx, size_t size)
 
static uint32_t managed_heap_alloc (sbgl_GfxContext *ctx, size_t size)
 
static void managed_heap_free (sbgl_GfxContext *ctx, uint32_t offset)
 
static bool create_heaps (sbgl_GfxContext *ctx)
 
static bool create_instance (sbgl_GfxContext *ctx, bool enableValidation)
 
static bool create_surface (sbgl_GfxContext *ctx, sbgl_Window *window)
 
static bool select_physical_device (sbgl_GfxContext *ctx)
 
static bool create_logical_device (sbgl_GfxContext *ctx)
 
static bool find_depth_format (sbgl_GfxContext *ctx)
 
static bool create_depth_resources (sbgl_GfxContext *ctx)
 
static bool create_sync_and_command (sbgl_GfxContext *ctx)
 
static bool create_telemetry_resources (sbgl_GfxContext *ctx)
 
static bool create_transient_resources (sbgl_GfxContext *ctx)
 
sbgl_GfxContextsbgl_gfx_Init (sbgl_Window *window, struct SblArena *arena, const sbgl_ResourceLimits *limits, bool enableValidation)
 Initializes the graphics backend with configurable resource limits.
 
void sbgl_gfx_Shutdown (sbgl_GfxContext *ctx)
 
bool sbgl_gfx_BeginFrame (sbgl_GfxContext *ctx)
 Starts a new frame, acquiring an image and starting the command buffer.
 
void sbgl_gfx_BeginRenderPass (sbgl_GfxContext *ctx, float r, float g, float b, float a)
 Starts a graphics rendering pass.
 
void sbgl_gfx_EndRenderPass (sbgl_GfxContext *ctx)
 Ends the current graphics rendering pass.
 
void sbgl_gfx_EndFrame (sbgl_GfxContext *ctx)
 Submits the current frame's commands and presents the image.
 
void sbgl_gfx_DeviceWaitIdle (sbgl_GfxContext *ctx)
 
sbgl_Buffer sbgl_gfx_CreateBuffer (sbgl_GfxContext *ctx, sbgl_BufferUsage usage, size_t size, const void *data)
 
void sbgl_gfx_DestroyBuffer (sbgl_GfxContext *ctx, sbgl_Buffer handle)
 
void sbgl_gfx_FillBuffer (sbgl_GfxContext *ctx, sbgl_Buffer handle, size_t offset, size_t size, uint32_t value)
 Performs a hardware-accelerated buffer fill.
 
uint32_t sbgl_gfx_GetFrameIndex (sbgl_GfxContext *ctx)
 Retrieves the current backend frame index.
 
void * sbgl_gfx_MapBuffer (sbgl_GfxContext *ctx, sbgl_Buffer handle)
 
void sbgl_gfx_UnmapBuffer (sbgl_GfxContext *ctx, sbgl_Buffer handle)
 
void sbgl_gfx_DestroyBufferDeferred (sbgl_GfxContext *ctx, sbgl_Buffer handle)
 Marks a buffer for destruction after current frames complete.
 
uint64_t sbgl_gfx_GetBufferDeviceAddress (sbgl_GfxContext *ctx, sbgl_Buffer handle)
 Retrieves the 64-bit GPU virtual address for a buffer.
 
sbgl_Shader sbgl_gfx_LoadShader (sbgl_GfxContext *ctx, sbgl_ShaderStage stage, const uint32_t *bytecode, size_t size)
 
void sbgl_gfx_DestroyShader (sbgl_GfxContext *ctx, sbgl_Shader handle)
 
sbgl_Pipeline sbgl_gfx_CreatePipeline (sbgl_GfxContext *ctx, const sbgl_PipelineConfig *config)
 
void sbgl_gfx_DestroyPipeline (sbgl_GfxContext *ctx, sbgl_Pipeline handle)
 
sbgl_ComputePipeline sbgl_gfx_CreateComputePipeline (sbgl_GfxContext *ctx, sbgl_Shader handle)
 
void sbgl_gfx_DestroyComputePipeline (sbgl_GfxContext *ctx, sbgl_ComputePipeline handle)
 
void sbgl_gfx_BindComputePipeline (sbgl_GfxContext *ctx, sbgl_ComputePipeline handle)
 
void sbgl_gfx_DispatchCompute (sbgl_GfxContext *ctx, uint32_t x, uint32_t y, uint32_t z)
 
void sbgl_gfx_MemoryBarrier (sbgl_GfxContext *ctx, sbgl_BarrierType type)
 
void sbgl_gfx_BindPipeline (sbgl_GfxContext *ctx, sbgl_Pipeline handle)
 
void sbgl_gfx_BindBuffer (sbgl_GfxContext *ctx, sbgl_Buffer handle, sbgl_BufferUsage usage)
 
void sbgl_gfx_Draw (sbgl_GfxContext *ctx, uint32_t vertexCount, uint32_t firstVertex, uint32_t instanceCount)
 
void sbgl_gfx_DrawIndexed (sbgl_GfxContext *ctx, uint32_t indexCount, uint32_t firstIndex, int32_t vertexOffset, uint32_t instanceCount)
 
void sbgl_gfx_DrawIndirect (sbgl_GfxContext *ctx, sbgl_Buffer handle, size_t offset, uint32_t drawCount)
 Submits a batch of draw calls stored in a GPU buffer.
 
sbgl_GfxTransientAllocation sbgl_gfx_AllocateTransient (sbgl_GfxContext *ctx, size_t size, uint32_t alignment)
 Allocates a slice of GPU-visible memory for transient per-frame data.
 
void sbgl_gfx_PushConstants (sbgl_GfxContext *ctx, size_t size, const void *data)
 
float sbgl_gfx_GetGpuTime (sbgl_GfxContext *ctx)
 Retrieves the elapsed GPU time for the previous frame in milliseconds.
 
int32_t sbgl_gfx_GetLastVkResult (sbgl_GfxContext *ctx)
 Retrieves the last VkResult from the backend for error inspection.
 

Variables

static const sbgl_ResourceLimits sbgl_DefaultResourceLimits
 

Macro Definition Documentation

◆ SBGL_DYNAMIC_HEAP_SIZE

#define SBGL_DYNAMIC_HEAP_SIZE   (128 * 1024 * 1024)

Definition at line 22 of file sbgl_backend_vulkan.c.

◆ SBGL_MANAGED_HEAP_SIZE

#define SBGL_MANAGED_HEAP_SIZE   (512 * 1024 * 1024)

Definition at line 23 of file sbgl_backend_vulkan.c.

◆ SBGL_MAX_FRAMES_IN_FLIGHT

#define SBGL_MAX_FRAMES_IN_FLIGHT   2

Definition at line 18 of file sbgl_backend_vulkan.c.

◆ SBGL_MAX_SWAPCHAIN_IMAGES

#define SBGL_MAX_SWAPCHAIN_IMAGES   8

Definition at line 19 of file sbgl_backend_vulkan.c.

◆ SBGL_STATIC_HEAP_SIZE

#define SBGL_STATIC_HEAP_SIZE   (128 * 1024 * 1024)

Definition at line 21 of file sbgl_backend_vulkan.c.

◆ SBGL_TRANSIENT_BUFFER_SIZE

#define SBGL_TRANSIENT_BUFFER_SIZE   (16 * 1024 * 1024)

Definition at line 20 of file sbgl_backend_vulkan.c.

◆ SBGL_VK_PUSH_CONSTANT_SIZE

#define SBGL_VK_PUSH_CONSTANT_SIZE   128

Definition at line 197 of file sbgl_backend_vulkan.c.

◆ VK_NO_PROTOTYPES

#define VK_NO_PROTOTYPES

Definition at line 6 of file sbgl_backend_vulkan.c.

Enumeration Type Documentation

◆ SBGL_HeapType

Enumerator
SBGL_HEAP_TYPE_STATIC 
SBGL_HEAP_TYPE_DYNAMIC 
SBGL_HEAP_TYPE_MANAGED 

Definition at line 32 of file sbgl_backend_vulkan.c.

Function Documentation

◆ cleanup_swapchain()

static void cleanup_swapchain ( sbgl_GfxContext * ctx)
static

Definition at line 169 of file sbgl_backend_vulkan.c.

169 {
170 ctx->vk.vkDestroyImageView(ctx->device, ctx->depthImageView, NULL);
171 ctx->vk.vkDestroyImage(ctx->device, ctx->depthImage, NULL);
172 ctx->vk.vkFreeMemory(ctx->device, ctx->depthMemory, NULL);
173
174 for (uint32_t i = 0; i < ctx->imageCount; i++) {
175 ctx->vk.vkDestroyImageView(ctx->device, ctx->imageViews[i], NULL);
176 }
177 ctx->vk.vkDestroySwapchainKHR(ctx->device, ctx->swapchain, NULL);
179}
SBL_ARENA_DEF void sbl_arena_rewind(SblArena *arena, SblArenaMark mark)
SblArenaMark swapchainMark
VkSwapchainKHR swapchain
struct VolkDeviceTable vk
VkImageView * imageViews
VkDeviceMemory depthMemory

◆ create_depth_resources()

static bool create_depth_resources ( sbgl_GfxContext * ctx)
static

Definition at line 686 of file sbgl_backend_vulkan.c.

686 {
687 if (!find_depth_format(ctx))
688 return false;
689
690 VkImageCreateInfo imageInfo = {
691 .sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
692 .imageType = VK_IMAGE_TYPE_2D,
693 .extent = { .width = ctx->swapchainExtent.width,
694 .height = ctx->swapchainExtent.height,
695 .depth = 1 },
696 .mipLevels = 1,
697 .arrayLayers = 1,
698 .format = ctx->depthFormat,
699 .tiling = VK_IMAGE_TILING_OPTIMAL,
700 .initialLayout = VK_IMAGE_LAYOUT_UNDEFINED,
701 .usage = VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT,
702 .samples = VK_SAMPLE_COUNT_1_BIT,
703 .sharingMode = VK_SHARING_MODE_EXCLUSIVE,
704 };
705
706 if (ctx->vk.vkCreateImage(ctx->device, &imageInfo, NULL, &ctx->depthImage) != VK_SUCCESS)
707 return false;
708
709 VkMemoryRequirements memRequirements;
710 ctx->vk.vkGetImageMemoryRequirements(ctx->device, ctx->depthImage, &memRequirements);
711
712 VkMemoryAllocateInfo allocInfo = {
713 .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
714 .allocationSize = memRequirements.size,
715 .memoryTypeIndex = find_memory_type(
716 ctx,
717 memRequirements.memoryTypeBits,
718 VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT
719 ),
720 };
721
722 if (ctx->vk.vkAllocateMemory(ctx->device, &allocInfo, NULL, &ctx->depthMemory) != VK_SUCCESS)
723 return false;
724
725 ctx->vk.vkBindImageMemory(ctx->device, ctx->depthImage, ctx->depthMemory, 0);
726
727 VkImageViewCreateInfo viewInfo = {
728 .sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
729 .image = ctx->depthImage,
730 .viewType = VK_IMAGE_VIEW_TYPE_2D,
731 .format = ctx->depthFormat,
732 .subresourceRange = { .aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT,
733 .levelCount = 1,
734 .layerCount = 1 },
735 };
736
737 if (ctx->vk.vkCreateImageView(ctx->device, &viewInfo, NULL, &ctx->depthImageView) != VK_SUCCESS)
738 return false;
739
740 return true;
741}
static bool find_depth_format(sbgl_GfxContext *ctx)
static uint32_t find_memory_type(sbgl_GfxContext *ctx, uint32_t typeFilter, VkMemoryPropertyFlags properties)

◆ create_heaps()

static bool create_heaps ( sbgl_GfxContext * ctx)
static

Definition at line 401 of file sbgl_backend_vulkan.c.

401 {
402 /* The system initializes three distinct memory heaps to support diverse resource
403 allocation patterns: Static for long-lived assets, Dynamic for per-frame data,
404 and Managed for flexible sub-allocations. All heaps are host-visible and
405 persistently mapped for zero-copy access. */
406
407 uint32_t memoryTypeIndex = find_memory_type(
408 ctx,
409 0xFFFFFFFF,
410 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT
411 );
412
413 if (memoryTypeIndex == SBGL_INVALID_OFFSET) {
414 return false;
415 }
416
417 VkMemoryAllocateFlagsInfo flagsInfo = {
418 .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO,
419 .flags = VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT,
420 };
421
422 VkMemoryAllocateInfo allocInfo = {
423 .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
424 .pNext = &flagsInfo,
425 .memoryTypeIndex = memoryTypeIndex,
426 };
427
428 // Static Heap
430 allocInfo.allocationSize = ctx->staticHeap.size;
431 if (ctx->vk.vkAllocateMemory(ctx->device, &allocInfo, NULL, &ctx->staticHeap.memory) != VK_SUCCESS) {
432 return false;
433 }
434 ctx->vk.vkMapMemory(ctx->device, ctx->staticHeap.memory, 0, ctx->staticHeap.size, 0, &ctx->staticHeap.mapped);
435 ctx->staticHeap.offset = 0;
436
437 // Dynamic Heap (split into two 64MB buffers for double buffering)
439 allocInfo.allocationSize = ctx->dynamicHeap.size;
440 if (ctx->vk.vkAllocateMemory(ctx->device, &allocInfo, NULL, &ctx->dynamicHeap.memory) != VK_SUCCESS) {
441 return false;
442 }
443 void* dynamicBase;
444 ctx->vk.vkMapMemory(ctx->device, ctx->dynamicHeap.memory, 0, ctx->dynamicHeap.size, 0, &dynamicBase);
445 ctx->dynamicHeap.mapped[0] = dynamicBase;
446 ctx->dynamicHeap.mapped[1] = (char*)dynamicBase + (SBGL_DYNAMIC_HEAP_SIZE / 2);
447 ctx->dynamicHeap.offset[0] = 0;
448 ctx->dynamicHeap.offset[1] = 0;
449
450 // Managed Heap
452 allocInfo.allocationSize = ctx->managedHeap.size;
453 if (ctx->vk.vkAllocateMemory(ctx->device, &allocInfo, NULL, &ctx->managedHeap.memory) != VK_SUCCESS) {
454 return false;
455 }
456 ctx->vk.vkMapMemory(ctx->device, ctx->managedHeap.memory, 0, ctx->managedHeap.size, 0, &ctx->managedHeap.mapped);
457 ctx->managedHeap.rangeCount = 1;
458 ctx->managedHeap.ranges[0] = (sbgl_GfxMemoryRange){ .offset = 0, .size = SBGL_MANAGED_HEAP_SIZE, .handle_index = 0 };
459
460 return true;
461}
#define SBGL_STATIC_HEAP_SIZE
#define SBGL_DYNAMIC_HEAP_SIZE
#define SBGL_MANAGED_HEAP_SIZE
#define SBGL_INVALID_OFFSET
Definition sbgl_types.h:9
sbgl_GfxDynamicHeap dynamicHeap
sbgl_GfxManagedHeap managedHeap
sbgl_GfxStaticHeap staticHeap
sbgl_GfxMemoryRange ranges[1024]

◆ create_instance()

static bool create_instance ( sbgl_GfxContext * ctx,
bool enableValidation )
static

Definition at line 463 of file sbgl_backend_vulkan.c.

463 {
464 VkApplicationInfo appInfo = {
465 .sType = VK_STRUCTURE_TYPE_APPLICATION_INFO,
466 .pApplicationName = "SBgl Application",
467 .applicationVersion = VK_MAKE_VERSION(1, 0, 0),
468 .pEngineName = "SBgl",
469 .engineVersion = VK_MAKE_VERSION(1, 0, 0),
470 .apiVersion = VK_API_VERSION_1_3,
471 };
472
473 const char* extensions[] = {
474 VK_KHR_SURFACE_EXTENSION_NAME,
475#ifdef SBGL_PLATFORM_WAYLAND
476 VK_KHR_WAYLAND_SURFACE_EXTENSION_NAME,
477#elif defined(SBGL_PLATFORM_X11)
478 VK_KHR_XLIB_SURFACE_EXTENSION_NAME,
479#elif defined(_WIN32)
480 VK_KHR_WIN32_SURFACE_EXTENSION_NAME,
481#endif
482 };
483
484 VkInstanceCreateInfo createInfo = {
485 .sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO,
486 .pApplicationInfo = &appInfo,
487 .enabledExtensionCount = sizeof(extensions) / sizeof(extensions[0]),
488 .ppEnabledExtensionNames = extensions,
489 };
490
491 if (enableValidation) {
492 const char* layers[] = { "VK_LAYER_KHRONOS_validation" };
493 createInfo.enabledLayerCount = 1;
494 createInfo.ppEnabledLayerNames = layers;
495 }
496
497 VkResult result = vkCreateInstance(&createInfo, NULL, &ctx->instance);
498 ctx->backendResult = result;
499
500 if (result != VK_SUCCESS) {
501 return false;
502 }
503
504 volkLoadInstance(ctx->instance);
505
506 if (enableValidation) {
508 }
509
510 return true;
511}
static void sbgl_setup_debug_utils(sbgl_GfxContext *ctx)

◆ create_logical_device()

static bool create_logical_device ( sbgl_GfxContext * ctx)
static

Definition at line 585 of file sbgl_backend_vulkan.c.

585 {
586 uint32_t queueFamilyCount = 0;
587 vkGetPhysicalDeviceQueueFamilyProperties(ctx->physicalDevice, &queueFamilyCount, NULL);
588
589 SblArenaMark mark = sbl_arena_mark(ctx->arena);
590 VkQueueFamilyProperties* queueFamilies =
591 SBL_ARENA_PUSH_ARRAY(ctx->arena, VkQueueFamilyProperties, queueFamilyCount);
592 if (!queueFamilies)
593 return false;
594 vkGetPhysicalDeviceQueueFamilyProperties(ctx->physicalDevice, &queueFamilyCount, queueFamilies);
595
596 int graphicsFamily = -1;
597 for (uint32_t i = 0; i < queueFamilyCount; i++) {
598 VkBool32 presentSupport = false;
599 vkGetPhysicalDeviceSurfaceSupportKHR(ctx->physicalDevice, i, ctx->surface, &presentSupport);
600 if ((queueFamilies[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) && presentSupport) {
601 graphicsFamily = i;
602 break;
603 }
604 }
605 sbl_arena_rewind(ctx->arena, mark);
606
607 if (graphicsFamily == -1) {
608 fprintf(stderr, "[Vulkan] No suitable queue family found\n");
609 return false;
610 }
611 ctx->graphicsQueueFamily = (uint32_t)graphicsFamily;
612
613 float queuePriority = 1.0f;
614 VkDeviceQueueCreateInfo queueCreateInfo = {
615 .sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO,
616 .queueFamilyIndex = ctx->graphicsQueueFamily,
617 .queueCount = 1,
618 .pQueuePriorities = &queuePriority,
619 };
620
621 VkPhysicalDeviceFeatures deviceFeatures = {
622 .multiDrawIndirect = VK_TRUE,
623 .shaderInt64 = VK_TRUE,
624 };
625
626 const char* deviceExtensions[] = { VK_KHR_SWAPCHAIN_EXTENSION_NAME,
627 VK_KHR_DYNAMIC_RENDERING_EXTENSION_NAME,
628 VK_KHR_BUFFER_DEVICE_ADDRESS_EXTENSION_NAME };
629
630 VkPhysicalDeviceVulkan11Features features11 = {
631 .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_FEATURES,
632 .shaderDrawParameters = VK_TRUE,
633 };
634
635 VkPhysicalDeviceVulkan12Features features12 = {
636 .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_FEATURES,
637 .pNext = &features11,
638 .bufferDeviceAddress = VK_TRUE,
639 .hostQueryReset = VK_TRUE,
640 };
641
642 VkPhysicalDeviceDynamicRenderingFeatures dynamicRenderingFeatures = {
643 .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DYNAMIC_RENDERING_FEATURES,
644 .pNext = &features12,
645 .dynamicRendering = VK_TRUE,
646 };
647
648 VkDeviceCreateInfo createInfo = {
649 .sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO,
650 .pNext = &dynamicRenderingFeatures,
651 .queueCreateInfoCount = 1,
652 .pQueueCreateInfos = &queueCreateInfo,
653 .pEnabledFeatures = &deviceFeatures,
654 .enabledExtensionCount = 3,
655 .ppEnabledExtensionNames = deviceExtensions,
656 };
657
658 if (vkCreateDevice(ctx->physicalDevice, &createInfo, NULL, &ctx->device) != VK_SUCCESS) {
659 fprintf(stderr, "[Vulkan] Failed to create logical device\n");
660 return false;
661 }
662
663 volkLoadDeviceTable(&ctx->vk, ctx->device);
664
665 ctx->vk.vkGetDeviceQueue(ctx->device, ctx->graphicsQueueFamily, 0, &ctx->graphicsQueue);
666
667 printf("[Vulkan] Logical Device created (Dynamic Rendering enabled)\n");
668 return true;
669}
SBL_ARENA_DEF SblArenaMark sbl_arena_mark(SblArena *arena)
#define SBL_ARENA_PUSH_ARRAY(arena, type, count)
Definition sbl_arena.h:21
Bookmark for arena state.
Definition sbl_arena.h:57
VkPhysicalDevice physicalDevice

◆ create_surface()

static bool create_surface ( sbgl_GfxContext * ctx,
sbgl_Window * window )
static

Definition at line 513 of file sbgl_backend_vulkan.c.

513 {
514#ifdef SBGL_PLATFORM_WAYLAND
515 VkWaylandSurfaceCreateInfoKHR createInfo = {
516 .sType = VK_STRUCTURE_TYPE_WAYLAND_SURFACE_CREATE_INFO_KHR,
517 .display = (struct wl_display*)sbgl_os_GetNativeDisplayHandle(window),
518 .surface = (struct wl_surface*)sbgl_os_GetNativeWindowHandle(window),
519 };
520 if (vkCreateWaylandSurfaceKHR(ctx->instance, &createInfo, NULL, &ctx->surface) != VK_SUCCESS) {
521 fprintf(stderr, "[Vulkan] Failed to create Wayland surface\n");
522 return false;
523 }
524#elif defined(SBGL_PLATFORM_X11)
525 VkXlibSurfaceCreateInfoKHR createInfo = {
526 .sType = VK_STRUCTURE_TYPE_XLIB_SURFACE_CREATE_INFO_KHR,
527 .dpy = (Display*)sbgl_os_GetNativeDisplayHandle(window),
528 .window = (Window)(uintptr_t)sbgl_os_GetNativeWindowHandle(window),
529 };
530 if (vkCreateXlibSurfaceKHR(ctx->instance, &createInfo, NULL, &ctx->surface) != VK_SUCCESS) {
531 fprintf(stderr, "[Vulkan] Failed to create Xlib surface\n");
532 return false;
533 }
534#elif defined(_WIN32)
535 VkWin32SurfaceCreateInfoKHR createInfo = {
536 .sType = VK_STRUCTURE_TYPE_WIN32_SURFACE_CREATE_INFO_KHR,
537 .hinstance = (HINSTANCE)sbgl_os_GetNativeInstanceHandle(window),
538 .hwnd = (HWND)sbgl_os_GetNativeWindowHandle(window),
539 };
540 if (vkCreateWin32SurfaceKHR(ctx->instance, &createInfo, NULL, &ctx->surface) != VK_SUCCESS) {
541 fprintf(stderr, "[Vulkan] Failed to create Win32 surface\n");
542 return false;
543 }
544#endif
545
546 printf("[Vulkan] Surface created successfully\n");
547 return true;
548}
void * sbgl_os_GetNativeWindowHandle(sbgl_Window *window)
Retrieves the raw window handle for Vulkan surface creation.
void * sbgl_os_GetNativeDisplayHandle(sbgl_Window *window)
Retrieves the native display handle (Linux specific).
void * sbgl_os_GetNativeInstanceHandle(sbgl_Window *window)
Retrieves the native instance handle (Win32 specific).
Definition window_x11.c:152

◆ create_swapchain()

static bool create_swapchain ( sbgl_GfxContext * ctx,
sbgl_Window * window )
static

Definition at line 743 of file sbgl_backend_vulkan.c.

743 {
744 int w, h;
745 sbgl_os_GetWindowSize(window, &w, &h);
746
747 VkSurfaceCapabilitiesKHR capabilities;
748 vkGetPhysicalDeviceSurfaceCapabilitiesKHR(ctx->physicalDevice, ctx->surface, &capabilities);
749
750 VkExtent2D extent = { (uint32_t)w, (uint32_t)h };
751 if (capabilities.currentExtent.width != 0xFFFFFFFF) {
752 extent = capabilities.currentExtent;
753 }
754
755 if (extent.width == 0 || extent.height == 0) {
756 return false;
757 }
758
759 uint32_t formatCount;
760 vkGetPhysicalDeviceSurfaceFormatsKHR(ctx->physicalDevice, ctx->surface, &formatCount, NULL);
761 if (formatCount == 0) {
762 fprintf(stderr, "[Vulkan] No supported surface formats found\n");
763 return false;
764 }
765 VkSurfaceFormatKHR formats[64];
766 if (formatCount > 64)
767 formatCount = 64;
768 vkGetPhysicalDeviceSurfaceFormatsKHR(ctx->physicalDevice, ctx->surface, &formatCount, formats);
769
770 VkSurfaceFormatKHR selectedFormat = formats[0];
771 for (uint32_t i = 0; i < formatCount; i++) {
772 if ((formats[i].format == VK_FORMAT_B8G8R8A8_SRGB ||
773 formats[i].format == VK_FORMAT_R8G8B8A8_SRGB) &&
774 formats[i].colorSpace == VK_COLOR_SPACE_SRGB_NONLINEAR_KHR) {
775 selectedFormat = formats[i];
776 break;
777 }
778 }
779
780 uint32_t imageCount = capabilities.minImageCount + 1;
781 if (capabilities.maxImageCount > 0 && imageCount > capabilities.maxImageCount) {
782 imageCount = capabilities.maxImageCount;
783 }
784
785 VkPresentModeKHR presentMode = VK_PRESENT_MODE_FIFO_KHR;
786 uint32_t presentModeCount;
787 vkGetPhysicalDeviceSurfacePresentModesKHR(
788 ctx->physicalDevice,
789 ctx->surface,
790 &presentModeCount,
791 NULL
792 );
793 if (presentModeCount > 0) {
794 VkPresentModeKHR presentModes[16];
795 if (presentModeCount > 16)
796 presentModeCount = 16;
797 vkGetPhysicalDeviceSurfacePresentModesKHR(
798 ctx->physicalDevice,
799 ctx->surface,
800 &presentModeCount,
801 presentModes
802 );
803
804 /* The system prioritizes present modes that minimize latency and maximize throughput.
805 IMMEDIATE is preferred for raw benchmarks, while MAILBOX provides high-performance
806 triple-buffering without tearing. */
807 bool mailboxSupported = false;
808 bool immediateSupported = false;
809 for (uint32_t i = 0; i < presentModeCount; i++) {
810 if (presentModes[i] == VK_PRESENT_MODE_IMMEDIATE_KHR)
811 immediateSupported = true;
812 if (presentModes[i] == VK_PRESENT_MODE_MAILBOX_KHR)
813 mailboxSupported = true;
814 }
815
816 if (immediateSupported) {
817 presentMode = VK_PRESENT_MODE_IMMEDIATE_KHR;
818 } else if (mailboxSupported) {
819 presentMode = VK_PRESENT_MODE_MAILBOX_KHR;
820 }
821 }
822
823 VkSwapchainCreateInfoKHR createInfo = {
824 .sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR,
825 .surface = ctx->surface,
826 .minImageCount = imageCount,
827 .imageFormat = selectedFormat.format,
828 .imageColorSpace = selectedFormat.colorSpace,
829 .imageExtent = extent,
830 .imageArrayLayers = 1,
831 .imageUsage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT,
832 .imageSharingMode = VK_SHARING_MODE_EXCLUSIVE,
833 .preTransform = capabilities.currentTransform,
834 .compositeAlpha = VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR,
835 .presentMode = presentMode,
836 .clipped = VK_TRUE,
837 };
838
839 if (ctx->vk.vkCreateSwapchainKHR(ctx->device, &createInfo, NULL, &ctx->swapchain) !=
840 VK_SUCCESS) {
841 fprintf(stderr, "[Vulkan] Failed to create swapchain\n");
842 return false;
843 }
844
845 ctx->swapchainExtent = createInfo.imageExtent;
846 ctx->swapchainFormat = createInfo.imageFormat;
847
848 ctx->vk.vkGetSwapchainImagesKHR(ctx->device, ctx->swapchain, &ctx->imageCount, NULL);
849
851 ctx->images = SBL_ARENA_PUSH_ARRAY(ctx->arena, VkImage, ctx->imageCount);
852 if (!ctx->images)
853 return false;
854 ctx->vk.vkGetSwapchainImagesKHR(ctx->device, ctx->swapchain, &ctx->imageCount, ctx->images);
855
856 ctx->imageViews = SBL_ARENA_PUSH_ARRAY(ctx->arena, VkImageView, ctx->imageCount);
857 if (!ctx->imageViews)
858 return false;
859 for (uint32_t i = 0; i < ctx->imageCount; i++) {
860 VkImageViewCreateInfo viewInfo = {
861 .sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
862 .image = ctx->images[i],
863 .viewType = VK_IMAGE_VIEW_TYPE_2D,
864 .format = ctx->swapchainFormat,
865 .subresourceRange = { .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
866 .levelCount = 1,
867 .layerCount = 1 },
868 };
869 ctx->vk.vkCreateImageView(ctx->device, &viewInfo, NULL, &ctx->imageViews[i]);
870 }
871
872 printf(
873 "[Vulkan] Swapchain created (%dx%d, %u images, format: %d)\n",
874 ctx->swapchainExtent.width,
875 ctx->swapchainExtent.height,
876 ctx->imageCount,
877 ctx->swapchainFormat
878 );
879
880 if (!create_depth_resources(ctx))
881 return false;
882
883 return true;
884}
static bool create_depth_resources(sbgl_GfxContext *ctx)
void sbgl_os_GetWindowSize(sbgl_Window *window, int *w, int *h)
Retrieves the current client area size.

◆ create_sync_and_command()

static bool create_sync_and_command ( sbgl_GfxContext * ctx)
static

Definition at line 886 of file sbgl_backend_vulkan.c.

886 {
887 VkCommandPoolCreateInfo poolInfo = {
888 .sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO,
889 .flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT,
890 .queueFamilyIndex = ctx->graphicsQueueFamily,
891 };
892 if (ctx->vk.vkCreateCommandPool(ctx->device, &poolInfo, NULL, &ctx->commandPool) != VK_SUCCESS)
893 return false;
894
895 VkCommandBufferAllocateInfo allocInfo = {
896 .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO,
897 .commandPool = ctx->commandPool,
898 .level = VK_COMMAND_BUFFER_LEVEL_PRIMARY,
899 .commandBufferCount = SBGL_MAX_FRAMES_IN_FLIGHT,
900 };
901 if (ctx->vk.vkAllocateCommandBuffers(ctx->device, &allocInfo, ctx->commandBuffers) !=
902 VK_SUCCESS)
903 return false;
904
905 VkSemaphoreCreateInfo semInfo = { .sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO };
906 VkFenceCreateInfo fenceInfo = { .sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO,
907 .flags = VK_FENCE_CREATE_SIGNALED_BIT };
908
909 for (uint32_t i = 0; i < SBGL_MAX_FRAMES_IN_FLIGHT; i++) {
910 if (ctx->vk.vkCreateFence(ctx->device, &fenceInfo, NULL, &ctx->inFlightFences[i]) !=
911 VK_SUCCESS)
912 return false;
913 }
914
915 for (uint32_t i = 0; i < SBGL_MAX_SWAPCHAIN_IMAGES; i++) {
916 if (ctx->vk.vkCreateSemaphore(
917 ctx->device,
918 &semInfo,
919 NULL,
921 ) != VK_SUCCESS ||
922 ctx->vk.vkCreateSemaphore(
923 ctx->device,
924 &semInfo,
925 NULL,
927 ) != VK_SUCCESS)
928 return false;
929 }
930
931 return true;
932}
#define SBGL_MAX_FRAMES_IN_FLIGHT
#define SBGL_MAX_SWAPCHAIN_IMAGES
VkCommandBuffer commandBuffers[SBGL_MAX_FRAMES_IN_FLIGHT]
VkSemaphore imageAvailableSemaphores[SBGL_MAX_SWAPCHAIN_IMAGES]
VkSemaphore renderFinishedSemaphores[SBGL_MAX_SWAPCHAIN_IMAGES]
VkFence inFlightFences[SBGL_MAX_FRAMES_IN_FLIGHT]
VkCommandPool commandPool

◆ create_telemetry_resources()

static bool create_telemetry_resources ( sbgl_GfxContext * ctx)
static

Definition at line 934 of file sbgl_backend_vulkan.c.

934 {
935 /* The system initializes a query pool with two timestamp slots for each frame in flight
936 to track GPU execution time, enabling performance telemetry across the full pipeline. */
937 VkQueryPoolCreateInfo queryPoolInfo = {
938 .sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO,
939 .queryType = VK_QUERY_TYPE_TIMESTAMP,
940 .queryCount = SBGL_MAX_FRAMES_IN_FLIGHT * 2,
941 };
942
943 if (ctx->vk.vkCreateQueryPool(ctx->device, &queryPoolInfo, NULL, &ctx->queryPool) !=
944 VK_SUCCESS) {
945 return false;
946 }
947
948 /* The timestamp period is retrieved from physical device properties to convert
949 GPU clock cycles into nanoseconds. */
950 VkPhysicalDeviceProperties props;
951 vkGetPhysicalDeviceProperties(ctx->physicalDevice, &props);
952 ctx->timestampPeriod = props.limits.timestampPeriod;
953
954 return true;
955}

◆ create_transient_resources()

static bool create_transient_resources ( sbgl_GfxContext * ctx)
static

Definition at line 957 of file sbgl_backend_vulkan.c.

957 {
958 /* The system allocates persistent, large-scale buffers for each frame in flight,
959 enabling high-frequency data updates without the overhead of per-frame allocations. */
960 for (uint32_t i = 0; i < SBGL_MAX_FRAMES_IN_FLIGHT; i++) {
962 ctx,
965 NULL
966 );
967 if (ctx->transientBuffers[i] == SBGL_INVALID_HANDLE) {
968 return false;
969 }
970
971 uint32_t idx = (uint32_t)ctx->transientBuffers[i] - 1;
972 SBGL_VulkanBuffer* buffer = &ctx->buffers[idx];
973
974 /* The system utilizes the persistent mapping established during buffer creation
975 to provide zero-copy access to the transient memory pools. */
976 ctx->transientMapped[i] = buffer->mapped;
977 if (!ctx->transientMapped[i]) {
978 return false;
979 }
980 ctx->transientOffsets[i] = 0;
981 }
982 return true;
983}
sbgl_Buffer sbgl_gfx_CreateBuffer(sbgl_GfxContext *ctx, sbgl_BufferUsage usage, size_t size, const void *data)
#define SBGL_TRANSIENT_BUFFER_SIZE
@ SBGL_BUFFER_USAGE_INDIRECT
Definition sbgl_types.h:126
@ SBGL_BUFFER_USAGE_STORAGE
Definition sbgl_types.h:125
#define SBGL_INVALID_HANDLE
Definition sbgl_types.h:8
void * transientMapped[SBGL_MAX_FRAMES_IN_FLIGHT]
uint32_t transientOffsets[SBGL_MAX_FRAMES_IN_FLIGHT]
sbgl_Buffer transientBuffers[SBGL_MAX_FRAMES_IN_FLIGHT]
SBGL_VulkanBuffer * buffers

◆ dynamic_heap_alloc()

static uint32_t dynamic_heap_alloc ( sbgl_GfxContext * ctx,
size_t size )
static

Definition at line 298 of file sbgl_backend_vulkan.c.

298 {
299 /* Dynamic allocations are performed within the heap slice corresponding to the
300 currently active frame, utilizing a 256-byte alignment for hardware compatibility. */
301 uint32_t alignedSize = (uint32_t)((size + 255) & ~255);
302 uint32_t frame = ctx->currentFrame;
303 uint32_t halfSize = ctx->dynamicHeap.size / 2;
304
305 if (ctx->dynamicHeap.offset[frame] + alignedSize > halfSize) {
306 /* If the dynamic allocation exceeds the current frame's capacity, the context
307 is marked with an out-of-memory error to prevent invalid GPU access. */
309 return SBGL_INVALID_OFFSET;
310 }
311
312 uint32_t offset = ctx->dynamicHeap.offset[frame];
313 ctx->dynamicHeap.offset[frame] += alignedSize;
314 return offset;
315}
@ SBGL_ERROR_OUT_OF_MEMORY
Definition sbgl_types.h:221

◆ find_depth_format()

static bool find_depth_format ( sbgl_GfxContext * ctx)
static

Definition at line 671 of file sbgl_backend_vulkan.c.

671 {
672 VkFormat candidates[] = { VK_FORMAT_D32_SFLOAT,
673 VK_FORMAT_D32_SFLOAT_S8_UINT,
674 VK_FORMAT_D24_UNORM_S8_UINT };
675 for (uint32_t i = 0; i < 3; i++) {
676 VkFormatProperties props;
677 vkGetPhysicalDeviceFormatProperties(ctx->physicalDevice, candidates[i], &props);
678 if (props.optimalTilingFeatures & VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT) {
679 ctx->depthFormat = candidates[i];
680 return true;
681 }
682 }
683 return false;
684}

◆ find_memory_type()

static uint32_t find_memory_type ( sbgl_GfxContext * ctx,
uint32_t typeFilter,
VkMemoryPropertyFlags properties )
static

Definition at line 268 of file sbgl_backend_vulkan.c.

268 {
269 VkPhysicalDeviceMemoryProperties memProperties;
270 vkGetPhysicalDeviceMemoryProperties(ctx->physicalDevice, &memProperties);
271
272 for (uint32_t i = 0; i < memProperties.memoryTypeCount; i++) {
273 if ((typeFilter & (1 << i)) &&
274 (memProperties.memoryTypes[i].propertyFlags & properties) == properties) {
275 return i;
276 }
277 }
278 return SBGL_INVALID_OFFSET;
279}

◆ managed_heap_alloc()

static uint32_t managed_heap_alloc ( sbgl_GfxContext * ctx,
size_t size )
static

Definition at line 317 of file sbgl_backend_vulkan.c.

317 {
318 /* The sub-allocator utilizes a first-fit strategy across an array of memory
319 ranges, ensuring each allocation is aligned to a 256-byte boundary. */
320 uint32_t alignedSize = (uint32_t)((size + 255) & ~255);
321
322 for (uint32_t i = 0; i < ctx->managedHeap.rangeCount; i++) {
323 sbgl_GfxMemoryRange* range = &ctx->managedHeap.ranges[i];
324
325 /* A range is considered a candidate if it is currently unassigned (handle_index 0)
326 and possesses sufficient capacity to accommodate the requested aligned size. */
327 if (range->handle_index == 0 && range->size >= alignedSize) {
328 uint32_t offset = range->offset;
329
330 if (range->size > alignedSize) {
331 /* If the selected range is larger than the requested size, it is split into
332 two segments: the allocated block and a new unassigned trailing range. */
333 if (ctx->managedHeap.rangeCount >= 1024) {
334 /* The allocation fails if the maximum number of range descriptors is exceeded. */
336 return SBGL_INVALID_OFFSET;
337 }
338
339 /* Shift subsequent ranges to maintain the sequential order of the range array. */
340 for (uint32_t j = ctx->managedHeap.rangeCount; j > i + 1; j--) {
341 ctx->managedHeap.ranges[j] = ctx->managedHeap.ranges[j - 1];
342 }
343
344 /* Initialize the new free range representing the remaining space in the block. */
345 ctx->managedHeap.ranges[i + 1] = (sbgl_GfxMemoryRange){
346 .offset = offset + alignedSize,
347 .size = range->size - alignedSize,
348 .handle_index = 0
349 };
350
351 ctx->managedHeap.rangeCount++;
352 range->size = alignedSize;
353 }
354
355 /* The range is marked as active by assigning a non-zero handle index (1 is used
356 as a generic active marker in this implementation stage). */
357 range->handle_index = 1;
358 return offset;
359 }
360 }
361
362 /* If no suitable range is identified, the context state is updated to reflect
363 the allocation failure. */
365 return SBGL_INVALID_OFFSET;
366}

◆ managed_heap_free()

static void managed_heap_free ( sbgl_GfxContext * ctx,
uint32_t offset )
static

Definition at line 368 of file sbgl_backend_vulkan.c.

368 {
369 /* The freeing process identifies the range associated with the given offset
370 and attempts to merge it with adjacent free blocks to mitigate fragmentation. */
371 for (uint32_t i = 0; i < ctx->managedHeap.rangeCount; i++) {
372 if (ctx->managedHeap.ranges[i].offset == offset) {
373 /* The block is transitioned back to an unassigned state. */
374 ctx->managedHeap.ranges[i].handle_index = 0;
375
376 /* Coalescing with the subsequent block occurs if it is also unassigned. */
377 if (i + 1 < ctx->managedHeap.rangeCount && ctx->managedHeap.ranges[i + 1].handle_index == 0) {
378 ctx->managedHeap.ranges[i].size += ctx->managedHeap.ranges[i + 1].size;
379 for (uint32_t j = i + 1; j < ctx->managedHeap.rangeCount - 1; j++) {
380 ctx->managedHeap.ranges[j] = ctx->managedHeap.ranges[j + 1];
381 }
382 ctx->managedHeap.rangeCount--;
383 }
384
385 /* Coalescing with the preceding block occurs if it is also unassigned. */
386 if (i > 0 && ctx->managedHeap.ranges[i - 1].handle_index == 0) {
387 ctx->managedHeap.ranges[i - 1].size += ctx->managedHeap.ranges[i].size;
388 for (uint32_t j = i; j < ctx->managedHeap.rangeCount - 1; j++) {
389 ctx->managedHeap.ranges[j] = ctx->managedHeap.ranges[j + 1];
390 }
391 ctx->managedHeap.rangeCount--;
392 }
393
394 return;
395 }
396 }
397
398 fprintf(stderr, "[Vulkan] managed_heap_free: offset %u not found in tracked ranges (possible double-free or corruption)\n", offset);
399}

◆ recreate_swapchain()

static void recreate_swapchain ( sbgl_GfxContext * ctx)
static

Definition at line 183 of file sbgl_backend_vulkan.c.

183 {
184 int w = 0, h = 0;
185 sbgl_os_GetWindowSize(ctx->window, &w, &h);
186 while (w == 0 || h == 0) {
187 sbgl_os_GetWindowSize(ctx->window, &w, &h);
189 }
190
191 ctx->vk.vkDeviceWaitIdle(ctx->device);
192
194 create_swapchain(ctx, ctx->window);
195}
static bool create_swapchain(sbgl_GfxContext *ctx, sbgl_Window *window)
static void cleanup_swapchain(sbgl_GfxContext *ctx)
void sbgl_os_PollEvents(sbgl_Window *window)
Dispatches OS events (messages/protocol requests).

◆ sbgl_gfx_AllocateTransient()

sbgl_GfxTransientAllocation sbgl_gfx_AllocateTransient ( sbgl_GfxContext * ctx,
size_t size,
uint32_t alignment )

Allocates a slice of GPU-visible memory for transient per-frame data.

This memory is managed by the backend's internal per-frame ring buffers and does not require manual destruction.

Parameters
ctxThe graphics context.
sizeThe number of bytes to allocate.
alignmentThe required byte alignment for the allocation.
Returns
A structure containing the allocation metadata and mapped pointer.

Definition at line 2039 of file sbgl_backend_vulkan.c.

2039 {
2040 /* The system sub-allocates from the current frame's persistent buffer, respecting
2041 the requested alignment to ensure compatibility with Vulkan requirements. */
2042 uint32_t frame = ctx->currentFrame;
2043 uint32_t offset = ctx->transientOffsets[frame];
2044
2045 if (alignment > 0) {
2046 offset = (offset + alignment - 1) & ~(alignment - 1);
2047 }
2048
2049 if (offset + size > SBGL_TRANSIENT_BUFFER_SIZE) {
2050 fprintf(stderr, "[Vulkan] Transient buffer overflow for frame %u!\n", frame);
2051 return (sbgl_GfxTransientAllocation){ 0 };
2052 }
2053
2055 .buffer = ctx->transientBuffers[frame],
2056 .offset = offset,
2057 .size = (uint32_t)size,
2058 .mapped = (char*)ctx->transientMapped[frame] + offset,
2059 .deviceAddress = sbgl_gfx_GetBufferDeviceAddress(ctx, ctx->transientBuffers[frame]) + offset
2060 };
2061
2062 ctx->transientOffsets[frame] = offset + (uint32_t)size;
2063 return alloc;
2064}
uint64_t sbgl_gfx_GetBufferDeviceAddress(sbgl_GfxContext *ctx, sbgl_Buffer handle)
Retrieves the 64-bit GPU virtual address for a buffer.
Represents a slice of a persistent GPU buffer used for transient data.

◆ sbgl_gfx_BeginFrame()

bool sbgl_gfx_BeginFrame ( sbgl_GfxContext * ctx)

Starts a new frame, acquiring an image and starting the command buffer.

This must be called before any GPU commands (Compute or Graphics) are recorded.

Definition at line 1105 of file sbgl_backend_vulkan.c.

1105 {
1106 ctx->vk.vkWaitForFences(
1107 ctx->device,
1108 1,
1109 &ctx->inFlightFences[ctx->currentFrame],
1110 VK_TRUE,
1111 UINT64_MAX
1112 );
1113
1114 /* The system processes the deferred destruction queue for the current frame slot,
1115 releasing GPU resources that are no longer in flight. */
1116 for (uint32_t i = 0; i < ctx->deferredCount[ctx->currentFrame]; i++) {
1118 }
1119 ctx->deferredCount[ctx->currentFrame] = 0;
1120
1121 /* The transient allocation offset is reset for the current frame, effectively
1122 recycling the GPU memory for new data while ensuring it does not overlap with
1123 memory currently in use by other frames in flight. */
1124 ctx->transientOffsets[ctx->currentFrame] = 0;
1125 ctx->dynamicHeap.offset[ctx->currentFrame] = 0;
1126
1127 if (sbgl_os_WasWindowResized(ctx->window)) {
1128 recreate_swapchain(ctx);
1129 }
1130
1131 VkResult result = ctx->vk.vkAcquireNextImageKHR(
1132 ctx->device,
1133 ctx->swapchain,
1134 UINT64_MAX,
1136 VK_NULL_HANDLE,
1137 &ctx->currentImageIndex
1138 );
1139
1140 ctx->backendResult = result;
1141
1142 if (result == VK_ERROR_OUT_OF_DATE_KHR) {
1143 recreate_swapchain(ctx);
1144 return false;
1145 } else if (result != VK_SUCCESS && result != VK_SUBOPTIMAL_KHR) {
1146 return false;
1147 }
1148
1149 ctx->vk.vkResetFences(ctx->device, 1, &ctx->inFlightFences[ctx->currentFrame]);
1150 ctx->vk.vkResetCommandBuffer(ctx->commandBuffers[ctx->currentFrame], 0);
1151 VkCommandBufferBeginInfo beginInfo = { .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO };
1152 ctx->vk.vkBeginCommandBuffer(ctx->commandBuffers[ctx->currentFrame], &beginInfo);
1153
1154 /* The system resets the query pool for the current frame to prepare for new
1155 timestamp recordings. */
1156 ctx->vk.vkCmdResetQueryPool(
1157 ctx->commandBuffers[ctx->currentFrame],
1158 ctx->queryPool,
1159 ctx->currentFrame * 2,
1160 2
1161 );
1162
1163 return true;
1164}
static void recreate_swapchain(sbgl_GfxContext *ctx)
void sbgl_gfx_DestroyBuffer(sbgl_GfxContext *ctx, sbgl_Buffer handle)
bool sbgl_os_WasWindowResized(sbgl_Window *window)
Checks if the window has been resized since the last check.
sbgl_Buffer deferredBuffers[SBGL_MAX_FRAMES_IN_FLIGHT][64]
uint32_t deferredCount[SBGL_MAX_FRAMES_IN_FLIGHT]

◆ sbgl_gfx_BeginRenderPass()

void sbgl_gfx_BeginRenderPass ( sbgl_GfxContext * ctx,
float r,
float g,
float b,
float a )

Starts a graphics rendering pass.

This must be called before any draw commands are recorded. It handles clearing the attachments if requested.

Definition at line 1166 of file sbgl_backend_vulkan.c.

1166 {
1167 /* The system records the starting timestamp at the beginning of the graphics pass. */
1168 ctx->vk.vkCmdWriteTimestamp(
1169 ctx->commandBuffers[ctx->currentFrame],
1170 VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
1171 ctx->queryPool,
1172 ctx->currentFrame * 2
1173 );
1174
1175 VkImageMemoryBarrier barriers[2] = { 0 };
1176 barriers[0].sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
1177 barriers[0].oldLayout = VK_IMAGE_LAYOUT_UNDEFINED;
1178 barriers[0].newLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
1179 barriers[0].image = ctx->images[ctx->currentImageIndex];
1180 barriers[0].subresourceRange =
1181 (VkImageSubresourceRange){ .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
1182 .levelCount = 1,
1183 .layerCount = 1 };
1184 barriers[0].dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
1185
1186 barriers[1].sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
1187 barriers[1].oldLayout = VK_IMAGE_LAYOUT_UNDEFINED;
1188 barriers[1].newLayout = VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL;
1189 barriers[1].image = ctx->depthImage;
1190 barriers[1].subresourceRange =
1191 (VkImageSubresourceRange){ .aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT,
1192 .levelCount = 1,
1193 .layerCount = 1 };
1194 barriers[1].dstAccessMask = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
1195
1196 ctx->vk.vkCmdPipelineBarrier(
1197 ctx->commandBuffers[ctx->currentFrame],
1198 VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
1199 VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT | VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT,
1200 0,
1201 0,
1202 NULL,
1203 0,
1204 NULL,
1205 2,
1206 barriers
1207 );
1208
1209 VkRenderingAttachmentInfo colorAttachment = {
1210 .sType = VK_STRUCTURE_TYPE_RENDERING_ATTACHMENT_INFO,
1211 .imageView = ctx->imageViews[ctx->currentImageIndex],
1212 .imageLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
1213 .loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR,
1214 .storeOp = VK_ATTACHMENT_STORE_OP_STORE,
1215 .clearValue = { { { r, g, b, a } } },
1216 };
1217
1218 VkRenderingAttachmentInfo depthAttachment = {
1219 .sType = VK_STRUCTURE_TYPE_RENDERING_ATTACHMENT_INFO,
1220 .imageView = ctx->depthImageView,
1221 .imageLayout = VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL,
1222 .loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR,
1223 .storeOp = VK_ATTACHMENT_STORE_OP_STORE,
1224 .clearValue = { .depthStencil = { 1.0f, 0 } },
1225 };
1226
1227 VkRenderingInfo renderingInfo = {
1228 .sType = VK_STRUCTURE_TYPE_RENDERING_INFO,
1229 .renderArea = { .extent = ctx->swapchainExtent },
1230 .layerCount = 1,
1231 .colorAttachmentCount = 1,
1232 .pColorAttachments = &colorAttachment,
1233 .pDepthAttachment = &depthAttachment,
1234 };
1235
1236 ctx->vk.vkCmdBeginRendering(ctx->commandBuffers[ctx->currentFrame], &renderingInfo);
1237}

◆ sbgl_gfx_BindBuffer()

void sbgl_gfx_BindBuffer ( sbgl_GfxContext * ctx,
sbgl_Buffer handle,
sbgl_BufferUsage usage )

Definition at line 1970 of file sbgl_backend_vulkan.c.

1970 {
1971 if (handle == SBGL_INVALID_HANDLE)
1972 return;
1973 uint32_t index = (uint32_t)handle - 1;
1974 if (index >= ctx->limits.maxBuffers || !ctx->bufferActive[index])
1975 return;
1976
1977 if (usage == SBGL_BUFFER_USAGE_VERTEX) {
1978 VkDeviceSize offsets[] = { 0 };
1979 ctx->vk.vkCmdBindVertexBuffers(
1980 ctx->commandBuffers[ctx->currentFrame],
1981 0,
1982 1,
1983 &ctx->buffers[index].handle,
1984 offsets
1985 );
1986 } else if (usage == SBGL_BUFFER_USAGE_INDEX) {
1987 ctx->vk.vkCmdBindIndexBuffer(
1988 ctx->commandBuffers[ctx->currentFrame],
1989 ctx->buffers[index].handle,
1990 0,
1991 VK_INDEX_TYPE_UINT32
1992 );
1993 }
1994}
@ SBGL_BUFFER_USAGE_INDEX
Definition sbgl_types.h:124
@ SBGL_BUFFER_USAGE_VERTEX
Definition sbgl_types.h:123
sbgl_ResourceLimits limits

◆ sbgl_gfx_BindComputePipeline()

void sbgl_gfx_BindComputePipeline ( sbgl_GfxContext * ctx,
sbgl_ComputePipeline handle )

Definition at line 1850 of file sbgl_backend_vulkan.c.

1850 {
1851 /* The currently active command buffer is updated to utilize the specified compute
1852 pipeline for all subsequent dispatch operations. */
1853 if (handle == SBGL_INVALID_HANDLE) {
1855 return;
1856 }
1857 uint32_t index = (uint32_t)handle - 1;
1858 if (index >= ctx->limits.maxPipelines || !ctx->computePipelines[index].active)
1859 return;
1860
1861 ctx->vk.vkCmdBindPipeline(
1862 ctx->commandBuffers[ctx->currentFrame],
1863 VK_PIPELINE_BIND_POINT_COMPUTE,
1864 ctx->computePipelines[index].handle
1865 );
1866 ctx->boundComputePipeline = handle;
1867}
SBGL_VulkanComputePipeline * computePipelines
sbgl_ComputePipeline boundComputePipeline

◆ sbgl_gfx_BindPipeline()

void sbgl_gfx_BindPipeline ( sbgl_GfxContext * ctx,
sbgl_Pipeline handle )

Definition at line 1942 of file sbgl_backend_vulkan.c.

1942 {
1943 if (handle == SBGL_INVALID_HANDLE)
1944 return;
1945 uint32_t index = (uint32_t)handle - 1;
1946 if (index >= ctx->limits.maxPipelines || !ctx->pipelines[index].active)
1947 return;
1948
1949 ctx->vk.vkCmdBindPipeline(
1950 ctx->commandBuffers[ctx->currentFrame],
1951 VK_PIPELINE_BIND_POINT_GRAPHICS,
1952 ctx->pipelines[index].handle
1953 );
1954 ctx->boundPipeline = handle;
1955
1956 VkViewport viewport = {
1957 .x = 0.0f,
1958 .y = (float)ctx->swapchainExtent.height,
1959 .width = (float)ctx->swapchainExtent.width,
1960 .height = -(float)ctx->swapchainExtent.height,
1961 .minDepth = 0.0f,
1962 .maxDepth = 1.0f,
1963 };
1964 ctx->vk.vkCmdSetViewport(ctx->commandBuffers[ctx->currentFrame], 0, 1, &viewport);
1965
1966 VkRect2D scissor = { .offset = { 0, 0 }, .extent = ctx->swapchainExtent };
1967 ctx->vk.vkCmdSetScissor(ctx->commandBuffers[ctx->currentFrame], 0, 1, &scissor);
1968}
SBGL_VulkanPipeline * pipelines
sbgl_Pipeline boundPipeline

◆ sbgl_gfx_CreateBuffer()

sbgl_Buffer sbgl_gfx_CreateBuffer ( sbgl_GfxContext * ctx,
sbgl_BufferUsage usage,
size_t size,
const void * data )

Definition at line 1323 of file sbgl_backend_vulkan.c.

1323 {
1324 /* Search for an available buffer slot in the internal tracking arrays. */
1325 uint32_t index = 0;
1326 for (; index < ctx->limits.maxBuffers; index++) {
1327 if (!ctx->bufferActive[index])
1328 break;
1329 }
1330 if (index == ctx->limits.maxBuffers)
1331 return SBGL_INVALID_HANDLE;
1332
1333 /* Identify the target memory heap based on the buffer's intended usage.
1334 Vertex and index buffers are assigned to the static heap, while storage
1335 buffers utilize the managed heap for persistence. */
1338 heapType = SBGL_HEAP_TYPE_STATIC;
1339 } else if (usage & SBGL_BUFFER_USAGE_STORAGE) {
1340 heapType = SBGL_HEAP_TYPE_MANAGED;
1341 }
1342
1343 VkBufferCreateInfo bufferInfo = {
1344 .sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
1345 .size = size,
1346 .usage = (usage & SBGL_BUFFER_USAGE_VERTEX ? VK_BUFFER_USAGE_VERTEX_BUFFER_BIT : 0) |
1347 (usage & SBGL_BUFFER_USAGE_INDEX ? VK_BUFFER_USAGE_INDEX_BUFFER_BIT : 0) |
1348 (usage & SBGL_BUFFER_USAGE_STORAGE ? VK_BUFFER_USAGE_STORAGE_BUFFER_BIT : 0) |
1349 (usage & SBGL_BUFFER_USAGE_INDIRECT ? VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT : 0) |
1350 (usage & SBGL_BUFFER_USAGE_TRANSFER_DST ? VK_BUFFER_USAGE_TRANSFER_DST_BIT : 0) |
1351 VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT,
1352 .sharingMode = VK_SHARING_MODE_EXCLUSIVE,
1353 };
1354
1355 SBGL_VulkanBuffer* buffer = &ctx->buffers[index];
1356 if (ctx->vk.vkCreateBuffer(ctx->device, &bufferInfo, NULL, &buffer->handle) != VK_SUCCESS) {
1357 return SBGL_INVALID_HANDLE;
1358 }
1359
1360 VkMemoryRequirements memRequirements;
1361 ctx->vk.vkGetBufferMemoryRequirements(ctx->device, buffer->handle, &memRequirements);
1362
1363 /* Sub-allocate the required memory range from the selected hybrid heap. */
1364 uint32_t offset = SBGL_INVALID_OFFSET;
1365 VkDeviceMemory heapMemory = VK_NULL_HANDLE;
1366 void* heapMappedBase = NULL;
1367
1368 switch (heapType) {
1370 offset = static_heap_alloc(ctx, memRequirements.size);
1371 heapMemory = ctx->staticHeap.memory;
1372 heapMappedBase = ctx->staticHeap.mapped;
1373 break;
1375 offset = dynamic_heap_alloc(ctx, memRequirements.size);
1376 heapMemory = ctx->dynamicHeap.memory;
1377 heapMappedBase = ctx->dynamicHeap.mapped[ctx->currentFrame];
1378 break;
1380 offset = managed_heap_alloc(ctx, memRequirements.size);
1381 heapMemory = ctx->managedHeap.memory;
1382 heapMappedBase = ctx->managedHeap.mapped;
1383 break;
1384 }
1385
1386 if (offset == SBGL_INVALID_OFFSET) {
1387 ctx->vk.vkDestroyBuffer(ctx->device, buffer->handle, NULL);
1388 return SBGL_INVALID_HANDLE;
1389 }
1390
1391 /* Bind the buffer handle to the sub-allocated memory region within the heap. */
1392 ctx->vk.vkBindBufferMemory(ctx->device, buffer->handle, heapMemory, offset);
1393
1394 buffer->size = size;
1395 buffer->offset = offset;
1396 buffer->heapType = heapType;
1397 buffer->mapped = (char*)heapMappedBase + offset;
1398 ctx->bufferActive[index] = true;
1399
1400 /* If initial data is provided, perform an immediate memory copy to the
1401 persistently mapped buffer address. */
1402 if (data && buffer->mapped) {
1403 memcpy(buffer->mapped, data, size);
1404 }
1405
1406 return (sbgl_Buffer)(index + 1);
1407}
static uint32_t static_heap_alloc(sbgl_GfxContext *ctx, size_t size)
static uint32_t dynamic_heap_alloc(sbgl_GfxContext *ctx, size_t size)
static uint32_t managed_heap_alloc(sbgl_GfxContext *ctx, size_t size)
@ SBGL_BUFFER_USAGE_TRANSFER_DST
Definition sbgl_types.h:127
uint32_t sbgl_Buffer
Handle for a GPU-side buffer.
Definition sbgl_types.h:37

◆ sbgl_gfx_CreateComputePipeline()

sbgl_ComputePipeline sbgl_gfx_CreateComputePipeline ( sbgl_GfxContext * ctx,
sbgl_Shader handle )

Definition at line 1763 of file sbgl_backend_vulkan.c.

1763 {
1764 /* The system scans the internal pipeline storage for an available slot to allocate
1765 the new compute pipeline state. */
1766 uint32_t index = 0;
1767 for (; index < ctx->limits.maxPipelines; index++) {
1768 if (!ctx->computePipelines[index].active)
1769 break;
1770 }
1771 if (index == ctx->limits.maxPipelines)
1772 return SBGL_INVALID_HANDLE;
1773
1774 if (handle == SBGL_INVALID_HANDLE || handle > ctx->limits.maxShaders) {
1775 fprintf(stderr, "[Vulkan] Invalid compute shader handle\n");
1776 return SBGL_INVALID_HANDLE;
1777 }
1778 uint32_t shaderIndex = handle - 1;
1779 if (!ctx->shaders[shaderIndex].active || ctx->shaders[shaderIndex].stage != SBGL_SHADER_STAGE_COMPUTE) {
1780 fprintf(stderr, "[Vulkan] Invalid compute shader stage or inactive shader\n");
1781 return SBGL_INVALID_HANDLE;
1782 }
1783
1784 VkPipelineShaderStageCreateInfo stageInfo = {
1785 .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
1786 .stage = VK_SHADER_STAGE_COMPUTE_BIT,
1787 .module = ctx->shaders[shaderIndex].module,
1788 .pName = "main",
1789 };
1790
1791 VkPipelineLayoutCreateInfo layoutInfo = {
1792 .sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
1793 };
1794
1795 /* The system utilizes a standardized push constant block across both graphics
1796 and compute pipelines to maintain architectural consistency. */
1797 VkPushConstantRange pushConstantRange = {
1798 .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
1799 .offset = 0,
1801 };
1802 layoutInfo.pushConstantRangeCount = 1;
1803 layoutInfo.pPushConstantRanges = &pushConstantRange;
1804
1805 if (ctx->vk.vkCreatePipelineLayout(
1806 ctx->device,
1807 &layoutInfo,
1808 NULL,
1809 &ctx->computePipelines[index].layout
1810 ) != VK_SUCCESS) {
1811 return SBGL_INVALID_HANDLE;
1812 }
1813
1814 VkComputePipelineCreateInfo pipelineInfo = {
1815 .sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO,
1816 .stage = stageInfo,
1817 .layout = ctx->computePipelines[index].layout,
1818 };
1819
1820 if (ctx->vk.vkCreateComputePipelines(
1821 ctx->device,
1822 VK_NULL_HANDLE,
1823 1,
1824 &pipelineInfo,
1825 NULL,
1826 &ctx->computePipelines[index].handle
1827 ) != VK_SUCCESS) {
1828 ctx->vk.vkDestroyPipelineLayout(ctx->device, ctx->computePipelines[index].layout, NULL);
1829 return SBGL_INVALID_HANDLE;
1830 }
1831
1832 ctx->computePipelines[index].active = true;
1833 return (sbgl_ComputePipeline)(index + 1);
1834}
#define SBGL_VK_PUSH_CONSTANT_SIZE
@ SBGL_SHADER_STAGE_COMPUTE
Definition sbgl_types.h:136
uint32_t sbgl_ComputePipeline
Handle for a compute pipeline.
Definition sbgl_types.h:52
sbgl_ShaderStage stage
SBGL_VulkanShader * shaders

◆ sbgl_gfx_CreatePipeline()

sbgl_Pipeline sbgl_gfx_CreatePipeline ( sbgl_GfxContext * ctx,
const sbgl_PipelineConfig * config )

Definition at line 1550 of file sbgl_backend_vulkan.c.

1550 {
1551 uint32_t index = 0;
1552 for (; index < ctx->limits.maxPipelines; index++) {
1553 if (!ctx->pipelines[index].active)
1554 break;
1555 }
1556 if (index == ctx->limits.maxPipelines)
1557 return SBGL_INVALID_HANDLE;
1558
1559 VkPipelineShaderStageCreateInfo shaderStages[2] = { 0 };
1560
1561 // Vertex Shader
1562 if (config->vertexShader == SBGL_INVALID_HANDLE || config->vertexShader > ctx->limits.maxShaders) {
1563 fprintf(stderr, "[Vulkan] Invalid vertex shader handle\n");
1564 return SBGL_INVALID_HANDLE;
1565 }
1566 uint32_t vsIndex = config->vertexShader - 1;
1567 if (!ctx->shaders[vsIndex].active || ctx->shaders[vsIndex].stage != SBGL_SHADER_STAGE_VERTEX) {
1568 fprintf(stderr, "[Vulkan] Invalid vertex shader stage or inactive shader\n");
1569 return SBGL_INVALID_HANDLE;
1570 }
1571 shaderStages[0].sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
1572 shaderStages[0].stage = VK_SHADER_STAGE_VERTEX_BIT;
1573 shaderStages[0].module = ctx->shaders[vsIndex].module;
1574 shaderStages[0].pName = "main";
1575
1576 // Fragment Shader
1577 if (config->fragmentShader == SBGL_INVALID_HANDLE || config->fragmentShader > ctx->limits.maxShaders) {
1578 fprintf(stderr, "[Vulkan] Invalid fragment shader handle\n");
1579 return SBGL_INVALID_HANDLE;
1580 }
1581 uint32_t fsIndex = config->fragmentShader - 1;
1582 if (!ctx->shaders[fsIndex].active || ctx->shaders[fsIndex].stage != SBGL_SHADER_STAGE_FRAGMENT) {
1583 fprintf(stderr, "[Vulkan] Invalid fragment shader stage or inactive shader\n");
1584 return SBGL_INVALID_HANDLE;
1585 }
1586 shaderStages[1].sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
1587 shaderStages[1].stage = VK_SHADER_STAGE_FRAGMENT_BIT;
1588 shaderStages[1].module = ctx->shaders[fsIndex].module;
1589 shaderStages[1].pName = "main";
1590
1591 VkVertexInputBindingDescription bindingDescription = {
1592 .binding = 0,
1593 .stride = config->vertexLayout.stride,
1594 .inputRate = VK_VERTEX_INPUT_RATE_VERTEX,
1595 };
1596
1597 SblArenaMark mark = sbl_arena_mark(ctx->arena);
1598 VkVertexInputAttributeDescription* attributeDescriptions = SBL_ARENA_PUSH_ARRAY(
1599 ctx->arena,
1600 VkVertexInputAttributeDescription,
1602 );
1603 if (!attributeDescriptions && config->vertexLayout.attributeCount > 0) {
1604 return SBGL_INVALID_HANDLE;
1605 }
1606 for (uint32_t i = 0; i < config->vertexLayout.attributeCount; i++) {
1607 attributeDescriptions[i].binding = 0;
1608 attributeDescriptions[i].location = config->vertexLayout.attributes[i].location;
1609 attributeDescriptions[i].format =
1611 attributeDescriptions[i].offset = config->vertexLayout.attributes[i].offset;
1612 }
1613
1614 VkPipelineVertexInputStateCreateInfo vertexInputInfo = {
1615 .sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO,
1616 .vertexBindingDescriptionCount = 1,
1617 .pVertexBindingDescriptions = &bindingDescription,
1618 .vertexAttributeDescriptionCount = config->vertexLayout.attributeCount,
1619 .pVertexAttributeDescriptions = attributeDescriptions,
1620 };
1621
1622 VkPipelineInputAssemblyStateCreateInfo inputAssembly = {
1623 .sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO,
1624 .topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST,
1625 .primitiveRestartEnable = VK_FALSE,
1626 };
1627
1628 VkPipelineViewportStateCreateInfo viewportState = {
1629 .sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO,
1630 .viewportCount = 1,
1631 .scissorCount = 1,
1632 };
1633
1634 VkPipelineRasterizationStateCreateInfo rasterizer = {
1635 .sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO,
1636 .depthClampEnable = VK_FALSE,
1637 .rasterizerDiscardEnable = VK_FALSE,
1638 .polygonMode = VK_POLYGON_MODE_FILL,
1639 .lineWidth = 1.0f,
1640 .cullMode = VK_CULL_MODE_BACK_BIT,
1641 .frontFace = VK_FRONT_FACE_COUNTER_CLOCKWISE,
1642 .depthBiasEnable = VK_FALSE,
1643 };
1644
1645 VkPipelineMultisampleStateCreateInfo multisampling = {
1646 .sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO,
1647 .sampleShadingEnable = VK_FALSE,
1648 .rasterizationSamples = VK_SAMPLE_COUNT_1_BIT,
1649 };
1650
1651 VkPipelineDepthStencilStateCreateInfo depthStencil = {
1652 .sType = VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO,
1653 .depthTestEnable = VK_TRUE,
1654 .depthWriteEnable = VK_TRUE,
1655 .depthCompareOp = VK_COMPARE_OP_LESS,
1656 .depthBoundsTestEnable = VK_FALSE,
1657 .stencilTestEnable = VK_FALSE,
1658 };
1659
1660 VkPipelineColorBlendAttachmentState colorBlendAttachment = {
1661 .colorWriteMask = VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT |
1662 VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT,
1663 .blendEnable = (config->blendMode != SBGL_BLEND_MODE_NONE) ? VK_TRUE : VK_FALSE,
1664 .srcColorBlendFactor = (config->blendMode == SBGL_BLEND_MODE_ADDITIVE) ? VK_BLEND_FACTOR_ONE : VK_BLEND_FACTOR_SRC_ALPHA,
1665 .dstColorBlendFactor = (config->blendMode == SBGL_BLEND_MODE_ADDITIVE) ? VK_BLEND_FACTOR_ONE : VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA,
1666 .colorBlendOp = VK_BLEND_OP_ADD,
1667 .srcAlphaBlendFactor = VK_BLEND_FACTOR_ONE,
1668 .dstAlphaBlendFactor = VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA,
1669 .alphaBlendOp = VK_BLEND_OP_ADD,
1670 };
1671
1672 VkPipelineColorBlendStateCreateInfo colorBlending = {
1673 .sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO,
1674 .logicOpEnable = VK_FALSE,
1675 .attachmentCount = 1,
1676 .pAttachments = &colorBlendAttachment,
1677 };
1678
1679 VkDynamicState dynamicStates[] = { VK_DYNAMIC_STATE_VIEWPORT, VK_DYNAMIC_STATE_SCISSOR };
1680 VkPipelineDynamicStateCreateInfo dynamicState = {
1681 .sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO,
1682 .dynamicStateCount = 2,
1683 .pDynamicStates = dynamicStates,
1684 };
1685
1686 VkPipelineLayoutCreateInfo pipelineLayoutInfo = {
1687 .sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
1688 };
1689
1690 VkPushConstantRange pushConstantRange = {
1691 .stageFlags = VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT,
1692 .offset = 0,
1694 };
1695 pipelineLayoutInfo.pushConstantRangeCount = 1;
1696 pipelineLayoutInfo.pPushConstantRanges = &pushConstantRange;
1697
1698 if (ctx->vk.vkCreatePipelineLayout(
1699 ctx->device,
1700 &pipelineLayoutInfo,
1701 NULL,
1702 &ctx->pipelines[index].layout
1703 ) != VK_SUCCESS) {
1704 sbl_arena_rewind(ctx->arena, mark);
1705 return SBGL_INVALID_HANDLE;
1706 }
1707
1708 VkPipelineRenderingCreateInfo renderingCreateInfo = {
1709 .sType = VK_STRUCTURE_TYPE_PIPELINE_RENDERING_CREATE_INFO,
1710 .colorAttachmentCount = 1,
1711 .pColorAttachmentFormats = &ctx->swapchainFormat,
1712 .depthAttachmentFormat = ctx->depthFormat,
1713 };
1714
1715 VkGraphicsPipelineCreateInfo pipelineInfo = {
1716 .sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO,
1717 .pNext = &renderingCreateInfo,
1718 .stageCount = 2,
1719 .pStages = shaderStages,
1720 .pVertexInputState = &vertexInputInfo,
1721 .pInputAssemblyState = &inputAssembly,
1722 .pViewportState = &viewportState,
1723 .pRasterizationState = &rasterizer,
1724 .pMultisampleState = &multisampling,
1725 .pDepthStencilState = &depthStencil,
1726 .pColorBlendState = &colorBlending,
1727 .pDynamicState = &dynamicState,
1728 .layout = ctx->pipelines[index].layout,
1729 .renderPass = VK_NULL_HANDLE,
1730 .subpass = 0,
1731 };
1732
1733 if (ctx->vk.vkCreateGraphicsPipelines(
1734 ctx->device,
1735 VK_NULL_HANDLE,
1736 1,
1737 &pipelineInfo,
1738 NULL,
1739 &ctx->pipelines[index].handle
1740 ) != VK_SUCCESS) {
1741 ctx->vk.vkDestroyPipelineLayout(ctx->device, ctx->pipelines[index].layout, NULL);
1742 sbl_arena_rewind(ctx->arena, mark);
1743 return SBGL_INVALID_HANDLE;
1744 }
1745
1746 sbl_arena_rewind(ctx->arena, mark);
1747 ctx->pipelines[index].active = true;
1748 return (sbgl_Pipeline)(index + 1);
1749}
static VkFormat sbgl_to_vk_format(sbgl_Format format)
@ SBGL_BLEND_MODE_ADDITIVE
Definition sbgl_types.h:198
@ SBGL_BLEND_MODE_NONE
Definition sbgl_types.h:196
@ SBGL_SHADER_STAGE_FRAGMENT
Definition sbgl_types.h:135
@ SBGL_SHADER_STAGE_VERTEX
Definition sbgl_types.h:134
uint32_t sbgl_Pipeline
Handle for a graphics pipeline.
Definition sbgl_types.h:47
VkPipelineLayout layout
sbgl_Shader fragmentShader
Definition sbgl_types.h:206
sbgl_Shader vertexShader
Definition sbgl_types.h:205
sbgl_BlendMode blendMode
Definition sbgl_types.h:208
sbgl_VertexLayout vertexLayout
Definition sbgl_types.h:207
const sbgl_VertexAttribute * attributes
Definition sbgl_types.h:189
uint32_t attributeCount
Definition sbgl_types.h:188

◆ sbgl_gfx_DestroyBuffer()

void sbgl_gfx_DestroyBuffer ( sbgl_GfxContext * ctx,
sbgl_Buffer handle )

Definition at line 1409 of file sbgl_backend_vulkan.c.

1409 {
1410 /* The system releases the GPU-side buffer handle and, if the memory was
1411 allocated from the managed heap, returns the range to the sub-allocator
1412 to mitigate memory fragmentation. Static and Dynamic allocations are
1413 reclaimed automatically or persist until shutdown. */
1414 if (handle == SBGL_INVALID_HANDLE)
1415 return;
1416 uint32_t index = (uint32_t)handle - 1;
1417 if (index >= ctx->limits.maxBuffers || !ctx->bufferActive[index])
1418 return;
1419
1420 SBGL_VulkanBuffer* buffer = &ctx->buffers[index];
1421 ctx->vk.vkDestroyBuffer(ctx->device, buffer->handle, NULL);
1422
1423 if (buffer->heapType == SBGL_HEAP_TYPE_MANAGED) {
1424 managed_heap_free(ctx, buffer->offset);
1425 }
1426
1427 ctx->bufferActive[index] = false;
1428}
static void managed_heap_free(sbgl_GfxContext *ctx, uint32_t offset)

◆ sbgl_gfx_DestroyBufferDeferred()

void sbgl_gfx_DestroyBufferDeferred ( sbgl_GfxContext * ctx,
sbgl_Buffer buffer )

Marks a buffer for destruction after current frames complete.

This function should be used for temporary buffers that are submitted for GPU execution in the current frame and must not be destroyed until the GPU has finished using them.

Parameters
ctxThe graphics context.
bufferHandle to the buffer to destroy.

Definition at line 1479 of file sbgl_backend_vulkan.c.

1479 {
1480 /* The system queues the buffer for destruction after the current frame's GPU work
1481 is guaranteed to be complete, preventing premature release of in-flight resources. */
1482 if (ctx->deferredCount[ctx->currentFrame] < 64) {
1483 ctx->deferredBuffers[ctx->currentFrame][ctx->deferredCount[ctx->currentFrame]++] = handle;
1484 } else {
1485 /* If the deferred queue is full, the system falls back to immediate destruction
1486 after a device idle wait to maintain safety at the cost of performance. */
1488 sbgl_gfx_DestroyBuffer(ctx, handle);
1489 }
1490}
void sbgl_gfx_DeviceWaitIdle(sbgl_GfxContext *ctx)

◆ sbgl_gfx_DestroyComputePipeline()

void sbgl_gfx_DestroyComputePipeline ( sbgl_GfxContext * ctx,
sbgl_ComputePipeline handle )

Definition at line 1836 of file sbgl_backend_vulkan.c.

1836 {
1837 /* The system releases the GPU-side pipeline and layout resources and marks
1838 the internal slot as inactive for future reuse. */
1839 if (handle == SBGL_INVALID_HANDLE)
1840 return;
1841 uint32_t index = (uint32_t)handle - 1;
1842 if (index >= ctx->limits.maxPipelines || !ctx->computePipelines[index].active)
1843 return;
1844
1845 ctx->vk.vkDestroyPipeline(ctx->device, ctx->computePipelines[index].handle, NULL);
1846 ctx->vk.vkDestroyPipelineLayout(ctx->device, ctx->computePipelines[index].layout, NULL);
1847 ctx->computePipelines[index].active = false;
1848}

◆ sbgl_gfx_DestroyPipeline()

void sbgl_gfx_DestroyPipeline ( sbgl_GfxContext * ctx,
sbgl_Pipeline handle )

Definition at line 1751 of file sbgl_backend_vulkan.c.

1751 {
1752 if (handle == SBGL_INVALID_HANDLE)
1753 return;
1754 uint32_t index = (uint32_t)handle - 1;
1755 if (index >= ctx->limits.maxPipelines || !ctx->pipelines[index].active)
1756 return;
1757
1758 ctx->vk.vkDestroyPipeline(ctx->device, ctx->pipelines[index].handle, NULL);
1759 ctx->vk.vkDestroyPipelineLayout(ctx->device, ctx->pipelines[index].layout, NULL);
1760 ctx->pipelines[index].active = false;
1761}

◆ sbgl_gfx_DestroyShader()

void sbgl_gfx_DestroyShader ( sbgl_GfxContext * ctx,
sbgl_Shader handle )

Definition at line 1539 of file sbgl_backend_vulkan.c.

1539 {
1540 if (handle == SBGL_INVALID_HANDLE)
1541 return;
1542 uint32_t index = (uint32_t)handle - 1;
1543 if (index >= ctx->limits.maxShaders || !ctx->shaders[index].active)
1544 return;
1545
1546 ctx->vk.vkDestroyShaderModule(ctx->device, ctx->shaders[index].module, NULL);
1547 ctx->shaders[index].active = false;
1548}

◆ sbgl_gfx_DeviceWaitIdle()

void sbgl_gfx_DeviceWaitIdle ( sbgl_GfxContext * ctx)

Definition at line 1316 of file sbgl_backend_vulkan.c.

1316 {
1317 if (ctx && ctx->device) {
1318 ctx->vk.vkDeviceWaitIdle(ctx->device);
1319 }
1320}

◆ sbgl_gfx_DispatchCompute()

void sbgl_gfx_DispatchCompute ( sbgl_GfxContext * ctx,
uint32_t x,
uint32_t y,
uint32_t z )

Definition at line 1869 of file sbgl_backend_vulkan.c.

1869 {
1870 /* A compute dispatch command is recorded into the current frame's command buffer,
1871 triggering parallel execution across the specified workgroup dimensions. */
1872 ctx->vk.vkCmdDispatch(ctx->commandBuffers[ctx->currentFrame], x, y, z);
1873}

◆ sbgl_gfx_Draw()

void sbgl_gfx_Draw ( sbgl_GfxContext * ctx,
uint32_t vertexCount,
uint32_t firstVertex,
uint32_t instanceCount )

Definition at line 1996 of file sbgl_backend_vulkan.c.

1996 {
1997 ctx->vk.vkCmdDraw(ctx->commandBuffers[ctx->currentFrame], vertexCount, instanceCount, firstVertex, 0);
1998}

◆ sbgl_gfx_DrawIndexed()

void sbgl_gfx_DrawIndexed ( sbgl_GfxContext * ctx,
uint32_t indexCount,
uint32_t firstIndex,
int32_t vertexOffset,
uint32_t instanceCount )

Definition at line 2000 of file sbgl_backend_vulkan.c.

2006 {
2007 ctx->vk.vkCmdDrawIndexed(
2008 ctx->commandBuffers[ctx->currentFrame],
2009 indexCount,
2010 instanceCount,
2011 firstIndex,
2012 vertexOffset,
2013 0
2014 );
2015}

◆ sbgl_gfx_DrawIndirect()

void sbgl_gfx_DrawIndirect ( sbgl_GfxContext * ctx,
sbgl_Buffer buffer,
size_t offset,
uint32_t drawCount )

Submits a batch of draw calls stored in a GPU buffer.

Parameters
ctxThe graphics context.
bufferHandle to the buffer containing an array of sbgl_IndirectCommand.
offsetThe byte offset into the buffer where the commands begin.
drawCountThe number of commands to execute from the buffer.

Definition at line 2017 of file sbgl_backend_vulkan.c.

2022 {
2023 if (handle == SBGL_INVALID_HANDLE)
2024 return;
2025 uint32_t index = (uint32_t)handle - 1;
2026 if (index >= ctx->limits.maxBuffers || !ctx->bufferActive[index])
2027 return;
2028
2029 ctx->vk.vkCmdDrawIndexedIndirect(
2030 ctx->commandBuffers[ctx->currentFrame],
2031 ctx->buffers[index].handle,
2032 (VkDeviceSize)offset,
2033 drawCount,
2034 sizeof(sbgl_IndirectCommand)
2035 );
2036}
Standard Vulkan Indirect Draw command layout.
Definition sbgl_types.h:111

◆ sbgl_gfx_EndFrame()

void sbgl_gfx_EndFrame ( sbgl_GfxContext * ctx)

Submits the current frame's commands and presents the image.

Definition at line 1276 of file sbgl_backend_vulkan.c.

1276 {
1277 ctx->vk.vkEndCommandBuffer(ctx->commandBuffers[ctx->currentFrame]);
1278
1279 VkPipelineStageFlags waitStages[] = { VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT };
1280
1281 /* The system utilizes a semaphore indexed by the current image to signal completion
1282 to the presentation engine, preventing reuse conflicts during high-frequency updates. */
1283 VkSemaphore signalSemaphore = ctx->renderFinishedSemaphores[ctx->currentImageIndex];
1284
1285 VkSubmitInfo submitInfo = {
1286 .sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,
1287 .waitSemaphoreCount = 1,
1288 .pWaitSemaphores = &ctx->imageAvailableSemaphores[ctx->semaphoreIndex],
1289 .pWaitDstStageMask = waitStages,
1290 .commandBufferCount = 1,
1291 .pCommandBuffers = &ctx->commandBuffers[ctx->currentFrame],
1292 .signalSemaphoreCount = 1,
1293 .pSignalSemaphores = &signalSemaphore,
1294 };
1295 ctx->vk
1296 .vkQueueSubmit(ctx->graphicsQueue, 1, &submitInfo, ctx->inFlightFences[ctx->currentFrame]);
1297
1298 VkPresentInfoKHR presentInfo = {
1299 .sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR,
1300 .waitSemaphoreCount = 1,
1301 .pWaitSemaphores = &signalSemaphore,
1302 .swapchainCount = 1,
1303 .pSwapchains = &ctx->swapchain,
1304 .pImageIndices = &ctx->currentImageIndex,
1305 };
1306 VkResult result = ctx->vk.vkQueuePresentKHR(ctx->graphicsQueue, &presentInfo);
1307
1308 if (result == VK_ERROR_OUT_OF_DATE_KHR || result == VK_SUBOPTIMAL_KHR) {
1309 recreate_swapchain(ctx);
1310 }
1311
1314}

◆ sbgl_gfx_EndRenderPass()

void sbgl_gfx_EndRenderPass ( sbgl_GfxContext * ctx)

Ends the current graphics rendering pass.

Definition at line 1239 of file sbgl_backend_vulkan.c.

1239 {
1240 /* The system records the ending timestamp at the conclusion of the frame's rendering commands.
1241 */
1242 ctx->vk.vkCmdWriteTimestamp(
1243 ctx->commandBuffers[ctx->currentFrame],
1244 VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT,
1245 ctx->queryPool,
1246 ctx->currentFrame * 2 + 1
1247 );
1248
1249 ctx->vk.vkCmdEndRendering(ctx->commandBuffers[ctx->currentFrame]);
1250
1251 VkImageMemoryBarrier barrier = {
1252 .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
1253 .oldLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
1254 .newLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR,
1255 .image = ctx->images[ctx->currentImageIndex],
1256 .subresourceRange = { .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
1257 .levelCount = 1,
1258 .layerCount = 1 },
1259 .srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
1260 .dstAccessMask = 0,
1261 };
1262 ctx->vk.vkCmdPipelineBarrier(
1263 ctx->commandBuffers[ctx->currentFrame],
1264 VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
1265 VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT,
1266 0,
1267 0,
1268 NULL,
1269 0,
1270 NULL,
1271 1,
1272 &barrier
1273 );
1274}

◆ sbgl_gfx_FillBuffer()

void sbgl_gfx_FillBuffer ( sbgl_GfxContext * ctx,
sbgl_Buffer handle,
size_t offset,
size_t size,
uint32_t value )

Performs a hardware-accelerated buffer fill.

Definition at line 1430 of file sbgl_backend_vulkan.c.

1436 {
1437 /* A hardware-accelerated fill operation is recorded into the current frame's
1438 command buffer, utilizing the GPU's DMA engine for maximum performance. */
1439 if (handle == SBGL_INVALID_HANDLE)
1440 return;
1441 uint32_t index = (uint32_t)handle - 1;
1442 if (index >= ctx->limits.maxBuffers || !ctx->bufferActive[index])
1443 return;
1444
1445 ctx->vk.vkCmdFillBuffer(
1446 ctx->commandBuffers[ctx->currentFrame],
1447 ctx->buffers[index].handle,
1448 (VkDeviceSize)offset,
1449 (VkDeviceSize)size,
1450 value
1451 );
1452}

◆ sbgl_gfx_GetBufferDeviceAddress()

uint64_t sbgl_gfx_GetBufferDeviceAddress ( sbgl_GfxContext * ctx,
sbgl_Buffer buffer )

Retrieves the 64-bit GPU virtual address for a buffer.

Used primarily for passing buffer pointers to shaders via push constants or storage buffers when using VK_KHR_buffer_device_address.

Parameters
ctxThe graphics context.
bufferThe buffer to query.
Returns
The 64-bit device address, or 0 if retrieval failed.

Definition at line 1492 of file sbgl_backend_vulkan.c.

1492 {
1493 /* The system retrieves the 64-bit GPU virtual address for the specified buffer,
1494 enabling direct memory access within shaders via Buffer Device Address. */
1495 if (handle == SBGL_INVALID_HANDLE)
1496 return 0;
1497 uint32_t index = (uint32_t)handle - 1;
1498 if (index >= ctx->limits.maxBuffers || !ctx->bufferActive[index])
1499 return 0;
1500
1501 VkBufferDeviceAddressInfo info = {
1502 .sType = VK_STRUCTURE_TYPE_BUFFER_DEVICE_ADDRESS_INFO,
1503 .buffer = ctx->buffers[index].handle,
1504 };
1505
1506 return ctx->vk.vkGetBufferDeviceAddress(ctx->device, &info);
1507}

◆ sbgl_gfx_GetFrameIndex()

uint32_t sbgl_gfx_GetFrameIndex ( sbgl_GfxContext * ctx)

Retrieves the current backend frame index.

Definition at line 1454 of file sbgl_backend_vulkan.c.

1454 {
1455 /* Returns the current frame index, which is used by the core engine to
1456 manage multi-buffered resources. */
1457 return ctx->currentFrame;
1458}

◆ sbgl_gfx_GetGpuTime()

float sbgl_gfx_GetGpuTime ( sbgl_GfxContext * ctx)

Retrieves the elapsed GPU time for the previous frame in milliseconds.

Parameters
ctxThe graphics context.
Returns
The duration in milliseconds.

Definition at line 2099 of file sbgl_backend_vulkan.c.

2099 {
2100 /* The system retrieves the recorded timestamps from the GPU and calculates the elapsed time
2101 in milliseconds, providing a non-blocking performance measurement. */
2102 uint64_t results[2] = { 0 };
2103 VkResult res = ctx->vk.vkGetQueryPoolResults(
2104 ctx->device,
2105 ctx->queryPool,
2106 ctx->currentFrame * 2,
2107 2,
2108 sizeof(results),
2109 results,
2110 sizeof(uint64_t),
2111 VK_QUERY_RESULT_64_BIT
2112 );
2113
2114 if (res == VK_SUCCESS) {
2115 uint64_t start = results[0];
2116 uint64_t end = results[1];
2117 return (float)(end - start) * ctx->timestampPeriod / 1e6f;
2118 }
2119
2120 return 0.0f;
2121}

◆ sbgl_gfx_GetLastVkResult()

int32_t sbgl_gfx_GetLastVkResult ( sbgl_GfxContext * ctx)

Retrieves the last VkResult from the backend for error inspection.

Parameters
ctxThe graphics context.
Returns
The last VkResult code, or 0 if no error occurred.

Definition at line 2123 of file sbgl_backend_vulkan.c.

2123 {
2124 if (!ctx) return 0;
2125 return ctx->backendResult;
2126}

◆ sbgl_gfx_Init()

sbgl_GfxContext * sbgl_gfx_Init ( sbgl_Window * window,
struct SblArena * arena,
const sbgl_ResourceLimits * limits,
bool enableValidation )

Initializes the graphics backend with configurable resource limits.

Parameters
windowThe platform window handle.
arenaThe arena for persistent allocations.
limitsPointer to resource limits (must not be NULL).
enableValidationWhether to enable Vulkan validation layers.
Returns
A pointer to the graphics context, or NULL on failure.

Definition at line 985 of file sbgl_backend_vulkan.c.

985 {
986 if (volkInitialize() != VK_SUCCESS) {
987 fprintf(stderr, "[Vulkan] Failed to initialize volk\n");
988 return NULL;
989 }
990
992 if (!ctx)
993 return NULL;
994
995 ctx->window = window;
996 ctx->arena = arena;
997
998 // Apply resource limits (use defaults if not provided)
999 if (limits) {
1000 ctx->limits = *limits;
1001 // Enforce minimums to prevent crashes
1002 if (ctx->limits.maxBuffers < 64) ctx->limits.maxBuffers = 64;
1003 if (ctx->limits.maxShaders < 16) ctx->limits.maxShaders = 16;
1004 if (ctx->limits.maxPipelines < 16) ctx->limits.maxPipelines = 16;
1005 } else {
1007 }
1008
1009 // Dynamically allocate resource arrays from the arena
1010 // Use raw byte allocation since SBGL_Vulkan* types are defined later in this file
1011 ctx->bufferActive = (bool*)sbl_arena_alloc_zero(arena, sizeof(bool) * ctx->limits.maxBuffers);
1016
1017 if (!ctx->bufferActive || !ctx->buffers || !ctx->shaders || !ctx->pipelines || !ctx->computePipelines) {
1018 fprintf(stderr, "[Vulkan] Failed to allocate resource arrays\n");
1019 sbgl_gfx_Shutdown(ctx);
1020 return NULL;
1021 }
1022
1023 if (!create_instance(ctx, enableValidation) || !create_surface(ctx, window) || !select_physical_device(ctx) ||
1024 !create_logical_device(ctx) || !create_heaps(ctx) || !create_swapchain(ctx, window) ||
1027 sbgl_gfx_Shutdown(ctx);
1028 return NULL;
1029 }
1030
1031 /* The query pool is reset on the host immediately after creation to ensure that all
1032 queries are in a valid state before the first attempt to retrieve results. */
1033 ctx->vk.vkResetQueryPool(ctx->device, ctx->queryPool, 0, SBGL_MAX_FRAMES_IN_FLIGHT * 2);
1034
1035 return ctx;
1036}
static bool create_sync_and_command(sbgl_GfxContext *ctx)
void sbgl_gfx_Shutdown(sbgl_GfxContext *ctx)
static bool create_instance(sbgl_GfxContext *ctx, bool enableValidation)
static bool create_heaps(sbgl_GfxContext *ctx)
static bool select_physical_device(sbgl_GfxContext *ctx)
static const sbgl_ResourceLimits sbgl_DefaultResourceLimits
static bool create_logical_device(sbgl_GfxContext *ctx)
static bool create_telemetry_resources(sbgl_GfxContext *ctx)
static bool create_transient_resources(sbgl_GfxContext *ctx)
static bool create_surface(sbgl_GfxContext *ctx, sbgl_Window *window)
SBL_ARENA_DEF void * sbl_arena_alloc_zero(SblArena *arena, uint64_t size)
#define SBL_ARENA_PUSH_STRUCT_ZERO(arena, type)
Definition sbl_arena.h:20

◆ sbgl_gfx_LoadShader()

sbgl_Shader sbgl_gfx_LoadShader ( sbgl_GfxContext * ctx,
sbgl_ShaderStage stage,
const uint32_t * bytecode,
size_t size )

Definition at line 1509 of file sbgl_backend_vulkan.c.

1514 {
1515 uint32_t index = 0;
1516 for (; index < ctx->limits.maxShaders; index++) {
1517 if (!ctx->shaders[index].active)
1518 break;
1519 }
1520 if (index == ctx->limits.maxShaders)
1521 return SBGL_INVALID_HANDLE;
1522
1523 VkShaderModuleCreateInfo createInfo = {
1524 .sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO,
1525 .codeSize = size,
1526 .pCode = bytecode,
1527 };
1528
1529 if (ctx->vk.vkCreateShaderModule(ctx->device, &createInfo, NULL, &ctx->shaders[index].module) !=
1530 VK_SUCCESS) {
1531 return SBGL_INVALID_HANDLE;
1532 }
1533
1534 ctx->shaders[index].stage = stage;
1535 ctx->shaders[index].active = true;
1536 return (sbgl_Shader)(index + 1);
1537}
uint32_t sbgl_Shader
Handle for a shader module.
Definition sbgl_types.h:42

◆ sbgl_gfx_MapBuffer()

void * sbgl_gfx_MapBuffer ( sbgl_GfxContext * ctx,
sbgl_Buffer handle )

Definition at line 1460 of file sbgl_backend_vulkan.c.

1460 {
1461 /* The system returns the persistently mapped pointer for the specified buffer,
1462 enabling high-performance data updates without the overhead of repeated mapping. */
1463 if (handle == SBGL_INVALID_HANDLE)
1464 return NULL;
1465 uint32_t index = (uint32_t)handle - 1;
1466 if (index >= ctx->limits.maxBuffers || !ctx->bufferActive[index])
1467 return NULL;
1468
1469 return ctx->buffers[index].mapped;
1470}

◆ sbgl_gfx_MemoryBarrier()

void sbgl_gfx_MemoryBarrier ( sbgl_GfxContext * ctx,
sbgl_BarrierType type )

Definition at line 1875 of file sbgl_backend_vulkan.c.

1875 {
1876 /* The system injects a pipeline barrier into the command stream to synchronize
1877 memory access between different execution stages, preventing race conditions. */
1878 VkMemoryBarrier barrier = { .sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER };
1879 VkPipelineStageFlags srcStage = 0;
1880 VkPipelineStageFlags dstStage = 0;
1881
1882 switch (type) {
1884 /* Synchronizes compute and transfer (fill) writes to be visible to
1885 subsequent compute operations. */
1886 barrier.srcAccessMask = VK_ACCESS_SHADER_WRITE_BIT | VK_ACCESS_TRANSFER_WRITE_BIT;
1887 barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
1888 srcStage = VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT | VK_PIPELINE_STAGE_TRANSFER_BIT;
1889 dstStage = VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
1890 break;
1892 /* Synchronizes compute writes to SSBOs for use in indirect draw command buffers. */
1893 barrier.srcAccessMask = VK_ACCESS_SHADER_WRITE_BIT;
1894 barrier.dstAccessMask = VK_ACCESS_INDIRECT_COMMAND_READ_BIT;
1895 srcStage = VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
1896 dstStage = VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT;
1897 break;
1899 /* Synchronizes compute writes to be visible to vertex input and shader stages. */
1900 barrier.srcAccessMask = VK_ACCESS_SHADER_WRITE_BIT;
1901 barrier.dstAccessMask = VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT | VK_ACCESS_SHADER_READ_BIT;
1902 srcStage = VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
1903 dstStage = VK_PIPELINE_STAGE_VERTEX_INPUT_BIT | VK_PIPELINE_STAGE_VERTEX_SHADER_BIT;
1904 break;
1906 /* Synchronizes graphics writes to be visible to subsequent compute operations. */
1907 barrier.srcAccessMask = VK_ACCESS_SHADER_WRITE_BIT;
1908 barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
1909 srcStage = VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
1910 dstStage = VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
1911 break;
1913 /* Synchronizes host writes to be visible to subsequent compute operations. */
1914 barrier.srcAccessMask = VK_ACCESS_HOST_WRITE_BIT;
1915 barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
1916 srcStage = VK_PIPELINE_STAGE_HOST_BIT;
1917 dstStage = VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
1918 break;
1920 /* Synchronizes host writes to be visible to subsequent graphics (vertex) operations. */
1921 barrier.srcAccessMask = VK_ACCESS_HOST_WRITE_BIT;
1922 barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
1923 srcStage = VK_PIPELINE_STAGE_HOST_BIT;
1924 dstStage = VK_PIPELINE_STAGE_VERTEX_SHADER_BIT;
1925 break;
1926 }
1927
1928 ctx->vk.vkCmdPipelineBarrier(
1929 ctx->commandBuffers[ctx->currentFrame],
1930 srcStage,
1931 dstStage,
1932 0,
1933 1,
1934 &barrier,
1935 0,
1936 NULL,
1937 0,
1938 NULL
1939 );
1940}
@ SBGL_BARRIER_COMPUTE_TO_INDIRECT
Definition sbgl_types.h:144
@ SBGL_BARRIER_GRAPHICS_TO_COMPUTE
Definition sbgl_types.h:146
@ SBGL_BARRIER_COMPUTE_TO_COMPUTE
Definition sbgl_types.h:143
@ SBGL_BARRIER_HOST_TO_GRAPHICS
Definition sbgl_types.h:148
@ SBGL_BARRIER_HOST_TO_COMPUTE
Definition sbgl_types.h:147
@ SBGL_BARRIER_COMPUTE_TO_GRAPHICS
Definition sbgl_types.h:145

◆ sbgl_gfx_PushConstants()

void sbgl_gfx_PushConstants ( sbgl_GfxContext * ctx,
size_t size,
const void * data )

Definition at line 2066 of file sbgl_backend_vulkan.c.

2066 {
2067 /* Push constants are submitted to both the currently bound graphics and compute
2068 pipelines to ensure that metadata is available across all execution stages. */
2069 if (size > SBGL_VK_PUSH_CONSTANT_SIZE) {
2070 fprintf(stderr, "[Vulkan] Push constant size (%zu) exceeds maximum (%d)\n", size, SBGL_VK_PUSH_CONSTANT_SIZE);
2071 return;
2072 }
2073
2074 if (ctx->boundPipeline != SBGL_INVALID_HANDLE) {
2075 uint32_t index = (uint32_t)ctx->boundPipeline - 1;
2076 ctx->vk.vkCmdPushConstants(
2077 ctx->commandBuffers[ctx->currentFrame],
2078 ctx->pipelines[index].layout,
2079 VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT,
2080 0,
2081 (uint32_t)size,
2082 data
2083 );
2084 }
2085
2087 uint32_t index = (uint32_t)ctx->boundComputePipeline - 1;
2088 ctx->vk.vkCmdPushConstants(
2089 ctx->commandBuffers[ctx->currentFrame],
2090 ctx->computePipelines[index].layout,
2091 VK_SHADER_STAGE_COMPUTE_BIT,
2092 0,
2093 (uint32_t)size,
2094 data
2095 );
2096 }
2097}

◆ sbgl_gfx_Shutdown()

void sbgl_gfx_Shutdown ( sbgl_GfxContext * ctx)

Definition at line 1038 of file sbgl_backend_vulkan.c.

1038 {
1039 if (!ctx)
1040 return;
1041
1042 if (ctx->device) {
1043 ctx->vk.vkDeviceWaitIdle(ctx->device);
1044
1045 // Clean up all active buffers
1046 for (uint32_t i = 0; i < ctx->limits.maxBuffers; i++) {
1047 if (ctx->bufferActive[i]) {
1048 sbgl_gfx_DestroyBuffer(ctx, (sbgl_Buffer)(i + 1));
1049 }
1050 }
1051
1052 // Clean up all active shaders
1053 for (uint32_t i = 0; i < ctx->limits.maxShaders; i++) {
1054 if (ctx->shaders[i].active) {
1055 sbgl_gfx_DestroyShader(ctx, (sbgl_Shader)(i + 1));
1056 }
1057 }
1058
1059 // Clean up all active pipelines
1060 for (uint32_t i = 0; i < ctx->limits.maxPipelines; i++) {
1061 if (ctx->pipelines[i].active) {
1063 }
1064 if (ctx->computePipelines[i].active) {
1066 }
1067 }
1068
1069 // Process any remaining deferred buffers
1070 for (uint32_t f = 0; f < SBGL_MAX_FRAMES_IN_FLIGHT; f++) {
1071 for (uint32_t i = 0; i < ctx->deferredCount[f]; i++) {
1072 sbgl_gfx_DestroyBuffer(ctx, ctx->deferredBuffers[f][i]);
1073 }
1074 ctx->deferredCount[f] = 0;
1075 }
1076
1077 for (uint32_t i = 0; i < SBGL_MAX_FRAMES_IN_FLIGHT; i++) {
1078 ctx->vk.vkDestroyFence(ctx->device, ctx->inFlightFences[i], NULL);
1079 }
1080
1081 for (uint32_t i = 0; i < SBGL_MAX_SWAPCHAIN_IMAGES; i++) {
1082 if (ctx->imageAvailableSemaphores[i] != VK_NULL_HANDLE) {
1083 ctx->vk.vkDestroySemaphore(ctx->device, ctx->imageAvailableSemaphores[i], NULL);
1084 }
1085 if (ctx->renderFinishedSemaphores[i] != VK_NULL_HANDLE) {
1086 ctx->vk.vkDestroySemaphore(ctx->device, ctx->renderFinishedSemaphores[i], NULL);
1087 }
1088 }
1089 ctx->vk.vkDestroyQueryPool(ctx->device, ctx->queryPool, NULL);
1090 ctx->vk.vkDestroyCommandPool(ctx->device, ctx->commandPool, NULL);
1091
1092 ctx->vk.vkFreeMemory(ctx->device, ctx->staticHeap.memory, NULL);
1093 ctx->vk.vkFreeMemory(ctx->device, ctx->dynamicHeap.memory, NULL);
1094 ctx->vk.vkFreeMemory(ctx->device, ctx->managedHeap.memory, NULL);
1095
1096 cleanup_swapchain(ctx);
1097 ctx->vk.vkDestroyDevice(ctx->device, NULL);
1098 }
1099 if (ctx->instance) {
1100 vkDestroySurfaceKHR(ctx->instance, ctx->surface, NULL);
1101 vkDestroyInstance(ctx->instance, NULL);
1102 }
1103}
void sbgl_gfx_DestroyShader(sbgl_GfxContext *ctx, sbgl_Shader handle)
void sbgl_gfx_DestroyPipeline(sbgl_GfxContext *ctx, sbgl_Pipeline handle)
void sbgl_gfx_DestroyComputePipeline(sbgl_GfxContext *ctx, sbgl_ComputePipeline handle)

◆ sbgl_gfx_UnmapBuffer()

void sbgl_gfx_UnmapBuffer ( sbgl_GfxContext * ctx,
sbgl_Buffer handle )

Definition at line 1472 of file sbgl_backend_vulkan.c.

1472 {
1473 /* Persistent mapping remains active for the buffer's lifecycle, so unmapping
1474 is a no-op to maintain API compatibility while maximizing performance. */
1475 (void)ctx;
1476 (void)handle;
1477}

◆ sbgl_setup_debug_utils()

static void sbgl_setup_debug_utils ( sbgl_GfxContext * ctx)
static

Definition at line 221 of file sbgl_backend_vulkan.c.

221 {
222 VkDebugUtilsMessengerCreateInfoEXT createInfo = {
223 .sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT,
224 .messageSeverity = VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT |
225 VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT,
226 .messageType = VK_DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT |
227 VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT,
228 .pfnUserCallback = sbgl_vk_debug_callback,
229 .pUserData = NULL,
230 };
231
232 PFN_vkCreateDebugUtilsMessengerEXT func =
233 (PFN_vkCreateDebugUtilsMessengerEXT)vkGetInstanceProcAddr(
234 ctx->instance, "vkCreateDebugUtilsMessengerEXT");
235
236 if (!func) {
237 return;
238 }
239
240 VkDebugUtilsMessengerEXT messenger;
241 if (func(ctx->instance, &createInfo, NULL, &messenger) != VK_SUCCESS) {
242 return;
243 }
244
245 (void)messenger;
246}
static VKAPI_ATTR VkBool32 VKAPI_CALL sbgl_vk_debug_callback(VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity, VkDebugUtilsMessageTypeFlagsEXT messageType, const VkDebugUtilsMessengerCallbackDataEXT *pCallbackData, void *pUserData)

◆ sbgl_to_vk_format()

static VkFormat sbgl_to_vk_format ( sbgl_Format format)
static

Definition at line 248 of file sbgl_backend_vulkan.c.

248 {
249 switch (format) {
251 return VK_FORMAT_R32_SFLOAT;
253 return VK_FORMAT_R32G32_SFLOAT;
255 return VK_FORMAT_R32G32B32_SFLOAT;
257 return VK_FORMAT_R32G32B32A32_SFLOAT;
259 return VK_FORMAT_R16G16B16A16_SNORM;
261 return VK_FORMAT_R8G8B8A8_UNORM;
262 default:
263 return VK_FORMAT_UNDEFINED;
264 }
265}
@ SBGL_FORMAT_R8G8B8A8_UNORM
Definition sbgl_types.h:160
@ SBGL_FORMAT_R32G32_SFLOAT
Definition sbgl_types.h:156
@ SBGL_FORMAT_R16G16B16A16_SNORM
Definition sbgl_types.h:159
@ SBGL_FORMAT_R32G32B32_SFLOAT
Definition sbgl_types.h:157
@ SBGL_FORMAT_R32G32B32A32_SFLOAT
Definition sbgl_types.h:158
@ SBGL_FORMAT_R32_SFLOAT
Definition sbgl_types.h:155

◆ sbgl_vk_debug_callback()

static VKAPI_ATTR VkBool32 VKAPI_CALL sbgl_vk_debug_callback ( VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity,
VkDebugUtilsMessageTypeFlagsEXT messageType,
const VkDebugUtilsMessengerCallbackDataEXT * pCallbackData,
void * pUserData )
static

Definition at line 200 of file sbgl_backend_vulkan.c.

205 {
206 (void)pUserData;
207 (void)messageType;
208
209 sbgl_LogLevel level =
210 (messageSeverity & VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT)
213
215 __FILE__, __LINE__, __func__,
216 pCallbackData->pMessage);
217
218 return VK_FALSE;
219}
@ SBGL_LOG_CAT_GFX
sbgl_LogLevel
Logging severity levels.
@ SBGL_LOG_WARN
@ SBGL_LOG_ERROR
void sbgl_internal_log_impl(sbgl_LogLevel level, sbgl_LogCategory category, const char *file, int line, const char *function, const char *message)
Definition sbgl_log.c:116

◆ select_physical_device()

static bool select_physical_device ( sbgl_GfxContext * ctx)
static

Definition at line 550 of file sbgl_backend_vulkan.c.

550 {
551 uint32_t deviceCount = 0;
552 vkEnumeratePhysicalDevices(ctx->instance, &deviceCount, NULL);
553 if (deviceCount == 0) {
554 fprintf(stderr, "[Vulkan] No physical devices found\n");
555 return false;
556 }
557
558 SblArenaMark mark = sbl_arena_mark(ctx->arena);
559 VkPhysicalDevice* devices = SBL_ARENA_PUSH_ARRAY(ctx->arena, VkPhysicalDevice, deviceCount);
560 if (!devices)
561 return false;
562 vkEnumeratePhysicalDevices(ctx->instance, &deviceCount, devices);
563
564 for (uint32_t i = 0; i < deviceCount; i++) {
565 VkPhysicalDeviceProperties props;
566 vkGetPhysicalDeviceProperties(devices[i], &props);
567 if (props.deviceType == VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU) {
568 ctx->physicalDevice = devices[i];
569 printf("[Vulkan] Selected Discrete GPU: %s\n", props.deviceName);
570 break;
571 }
572 }
573
574 if (!ctx->physicalDevice) {
575 ctx->physicalDevice = devices[0];
576 VkPhysicalDeviceProperties props;
577 vkGetPhysicalDeviceProperties(ctx->physicalDevice, &props);
578 printf("[Vulkan] Selected GPU: %s\n", props.deviceName);
579 }
580
581 sbl_arena_rewind(ctx->arena, mark);
582 return true;
583}

◆ static_heap_alloc()

static uint32_t static_heap_alloc ( sbgl_GfxContext * ctx,
size_t size )
static

Definition at line 281 of file sbgl_backend_vulkan.c.

281 {
282 /* The size is rounded up to a 256-byte boundary to satisfy the most stringent
283 Vulkan hardware alignment requirements (minUniformBufferOffsetAlignment). */
284 uint32_t alignedSize = (uint32_t)((size + 255) & ~255);
285
286 if (ctx->staticHeap.offset + alignedSize > ctx->staticHeap.size) {
287 /* If the requested allocation exceeds the remaining capacity of the static heap,
288 the context's result state is updated to reflect an out-of-memory error. */
290 return SBGL_INVALID_OFFSET;
291 }
292
293 uint32_t offset = ctx->staticHeap.offset;
294 ctx->staticHeap.offset += alignedSize;
295 return offset;
296}

Variable Documentation

◆ sbgl_DefaultResourceLimits

const sbgl_ResourceLimits sbgl_DefaultResourceLimits
static
Initial value:
= {
.maxBuffers = 1024,
.maxShaders = 256,
.maxPipelines = 256
}

Definition at line 26 of file sbgl_backend_vulkan.c.

26 {
27 .maxBuffers = 1024,
28 .maxShaders = 256,
29 .maxPipelines = 256
30};