5 #include <vulkan/vulkan.h>
11 #include <vulkan/vulkan_win32.h>
13 /*#include <vulkan/vulkan_xlib.h>*/
14 #include <X11/Xlib-xcb.h>
15 #include <vulkan/vulkan_xcb.h>
35 /* if rpasses[rpidx].vkobj != vkrpass, the framebuf is invalid */
39 VkImageView imgv[MAX_FB_IMGV];
47 static struct rpass *rpasses;
48 static struct framebuf *framebufs;
51 static int create_instance(void);
52 static int create_surface(void);
53 static int choose_phys_dev(void);
54 static int create_device(void);
55 static int create_swapchain(void);
57 static int choose_pixfmt(void);
58 static int eval_pdev_score(VkPhysicalDevice dev);
59 static int have_inst_layer(const char *name);
60 static int have_ext(VkExtensionProperties *ext, int next, const char *name);
65 #define MAX_INIT_QUEUE 32
70 VkCommandPool cmdpool;
71 } initq[MAX_INIT_QUEUE];
75 static VkPhysicalDevice vkpdev;
76 static VkQueueFamilyProperties *qfam;
77 static uint32_t num_qfam;
78 static VkDevice vkdev;
79 static VkSurfaceKHR vksurf;
80 static VkSurfaceCapabilitiesKHR vksurf_caps;
81 static int vksurf_numfmt, vksurf_selfmt;
82 static VkSurfaceFormatKHR *vksurf_fmt;
83 static VkSwapchainKHR vksc;
84 static int vksc_numimg;
85 static VkImage *vksc_img;
86 static VkExtent2D vksc_extent;
87 static VkImageView *vksc_view;
89 static VkLayerProperties *inst_layers;
90 static VkExtensionProperties *inst_ext, *dev_ext;
91 static uint32_t inst_ext_count, dev_ext_count, inst_layers_count;
93 static VkPhysicalDevice *pdev_list;
94 static uint32_t num_pdev;
96 static int have_raytrace, have_debug_report;
98 void vk_init_xwin(Display *d, Window w)
104 void vk_init_queue(unsigned int qflags, int count)
108 for(i=0; i<num_initq; i++) {
109 if(initq[i].flags == qflags) {
110 initq[i].count += count;
115 if(num_initq >= MAX_INIT_QUEUE) {
116 fprintf(stderr, "vk_init_queue: too many queues\n");
119 initq[num_initq].flags = qflags;
120 initq[num_initq].count = count;
124 int vk_init(unsigned int flags, unsigned int *usedflags)
127 vk_init_queue(VKQ_GFX | VKQ_PRESENT, 1);
131 if(create_instance() == -1) return -1;
132 if(create_surface() == -1) return -1;
133 if(choose_phys_dev() == -1) return -1;
134 if(create_device() == -1) return -1;
136 if(initflags != flags) {
138 *usedflags = initflags;
147 void vk_cleanup(void)
156 for(i=0; i<vksc_numimg; i++) {
157 vkDestroyImageView(vkdev, vksc_view[i], 0);
162 vkDestroySwapchainKHR(vkdev, vksc, 0);
166 vkDestroyDevice(vkdev, 0);
170 vkDestroySurfaceKHR(vk, vksurf, 0);
174 vkDestroyInstance(vk, 0);
187 int vk_reshape(int xsz, int ysz)
191 if(vksc && vksc_extent.width == xsz && vksc_extent.height == ysz) {
196 for(i=0; i<vksc_numimg; i++) {
197 vkDestroyImageView(vkdev, vksc_view[i], 0);
200 if(vksc) vkDestroySwapchainKHR(vkdev, vksc, 0);
202 vksc_extent.width = xsz;
203 vksc_extent.height = ysz;
205 if(create_swapchain() == -1) return -1;
207 /* TODO create depth/stencil buffers as needed (initflags) */
211 int vk_find_qfamily(unsigned int flags)
216 if(!qfam) return -1; /* not initialized I guess... */
218 for(i=0; i<num_qfam; i++) {
219 vkGetPhysicalDeviceSurfaceSupportKHR(vkpdev, i, vksurf, &can_pres);
221 if((flags & VKQ_PRESENT) && !can_pres) {
224 if((flags & VKQ_GFX) && !(qfam[i].queueFlags & VK_QUEUE_GRAPHICS_BIT)) {
227 if((flags & VKQ_COMPUTE) && !(qfam[i].queueFlags & VK_QUEUE_COMPUTE_BIT)) {
231 return i; /* found a suitabe queue family */
237 VkQueue vk_getq_fam(int fam, int n)
241 if(fam < 0) return 0;
242 if(n < 0 || n >= qfam[fam].queueCount) {
243 fprintf(stderr, "vk_getq_fam: invalid index %d, family %d has %d queues\n",
244 n, fam, qfam[fam].queueCount);
248 vkGetDeviceQueue(vkdev, fam, n, &q);
252 VkQueue vk_getq(unsigned int flags, int n)
254 return vk_getq_fam(vk_find_qfamily(flags), n);
257 static VkCommandPool find_cmdpool(int qfam)
260 VkCommandPoolCreateInfo pinf;
262 for(i=0; i<num_initq; i++) {
263 if(initq[i].qfam == qfam) {
264 if(!initq[i].cmdpool) {
265 /* allocate command pool for this queue family */
266 memset(&pinf, 0, sizeof pinf);
267 pinf.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
268 pinf.flags = VK_COMMAND_POOL_CREATE_TRANSIENT_BIT | VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
269 pinf.queueFamilyIndex = qfam;
271 if(vkCreateCommandPool(vkdev, &pinf, 0, &initq[i].cmdpool) != 0) {
272 fprintf(stderr, "ck_create_cmdbuf: failed to create command buffer pool\n");
276 return initq[i].cmdpool;
280 fprintf(stderr, "vk_create_cmdbuf: failed to find command pool for queue family: %d\n", qfam);
284 VkCommandBuffer vk_create_cmdbuf_fam(int qfam, int level)
286 VkCommandBufferAllocateInfo inf = {0};
287 VkCommandBuffer cmdbuf;
288 VkCommandPool cmdpool;
290 if(!(cmdpool = find_cmdpool(qfam))) {
294 inf.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
295 inf.commandPool = cmdpool;
297 inf.commandBufferCount = 1;
299 if(vkAllocateCommandBuffers(vkdev, &inf, &cmdbuf) != 0) {
300 fprintf(stderr, "vk_create_cmdbuf: failed to allocate command buffer\n");
306 VkCommandBuffer vk_create_cmdbuf(unsigned int qflags, int level)
310 if((qfam = vk_find_qfamily(qflags)) == -1) {
311 fprintf(stderr, "vk_create_cmdbuf: failed to find matching queue family\n");
314 return vk_create_cmdbuf_fam(qfam, level);
317 int vk_create_rpass(void)
320 struct rpass rpass = {0}, *rp = &rpass;
323 rpasses = darr_alloc(0, sizeof *rpasses);
324 darr_push(rpasses, &rpass); /* add dummy rpass */
327 for(i=1; i<darr_size(rpasses); i++) {
328 if(!rpasses[i].used) {
333 /* init renderpass defaults */
335 rp->fmt = vksurf_fmt[vksurf_selfmt].format;
336 rp->zfmt = VK_FORMAT_D24_UNORM_S8_UINT;
344 darr_push(rpasses, rp);
345 return darr_size(rpasses) - 1;
350 void vk_free_rpass(int rp)
352 if(!rpasses || rp < 1 || rp >= darr_size(rpasses)) {
356 if(rpasses[rp].used && rpasses[rp].vkobj) {
357 vkDestroyRenderPass(vkdev, rpasses[rp].vkobj, 0);
359 rpasses[rp].used = 0;
362 void vk_rpass_colorbuf(int rp, int fmt, int n)
364 rpasses[rp].fmt = fmt;
365 rpasses[rp].num_colbuf = n;
366 rpasses[rp].vkobj_valid = 0;
369 void vk_rpass_msaa(int rp, int nsamp)
371 rpasses[rp].num_samples = nsamp;
372 rpasses[rp].vkobj_valid = 0;
375 void vk_rpass_clear(int rp, int clear)
377 rpasses[rp].clear = clear;
378 rpasses[rp].vkobj_valid = 0;
381 VkRenderPass vk_rpass(int rp)
385 VkAttachmentDescription att[17];
386 VkAttachmentReference catref[16], zatref;
387 VkSubpassDescription subpass;
388 VkRenderPassCreateInfo pinf;
392 if(!r->vkobj_valid) {
394 vkDestroyRenderPass(vkdev, r->vkobj, 0);
398 zidx = r->num_colbuf;
399 memset(att, 0, sizeof att);
400 for(i=0; i<r->num_colbuf; i++) {
401 att[i].format = r->fmt;
402 att[i].samples = r->num_samples;
403 att[i].loadOp = r->clear ? VK_ATTACHMENT_LOAD_OP_CLEAR : VK_ATTACHMENT_LOAD_OP_DONT_CARE;
404 att[i].storeOp = VK_ATTACHMENT_STORE_OP_STORE;
405 att[i].stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
406 att[i].stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
407 att[i].initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
408 att[i].finalLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
410 att[zidx].format = r->zfmt;
411 att[zidx].samples = 1;
412 att[zidx].loadOp = r->clear ? VK_ATTACHMENT_LOAD_OP_CLEAR : VK_ATTACHMENT_LOAD_OP_DONT_CARE;
413 att[zidx].storeOp = VK_ATTACHMENT_STORE_OP_STORE;
414 att[zidx].stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
415 att[zidx].stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
416 att[zidx].initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
417 att[zidx].finalLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
419 for(i=0; i<r->num_colbuf; i++) {
420 catref[i].attachment = i;
421 catref[i].layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
423 zatref.attachment = zidx;
424 zatref.layout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
426 memset(&subpass, 0, sizeof subpass);
427 subpass.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS;
428 subpass.colorAttachmentCount = r->num_colbuf;
429 subpass.pColorAttachments = catref;
430 subpass.pDepthStencilAttachment = &zatref;
432 memset(&pinf, 0, sizeof pinf);
433 pinf.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO;
434 pinf.attachmentCount = r->num_colbuf + 1;
435 pinf.pAttachments = att;
436 pinf.subpassCount = 1;
437 pinf.pSubpasses = &subpass;
439 if(vkCreateRenderPass(vkdev, &pinf, 0, &r->vkobj) != 0) {
440 fprintf(stderr, "failed to create render pass!\n");
450 int vk_create_fb(void)
453 struct framebuf framebuf = {0}, *fb = &framebuf;
456 framebufs = darr_alloc(0, sizeof *framebufs);
457 darr_push(framebufs, &framebuf); /* add dummy rpass */
460 for(i=1; i<darr_size(framebufs); i++) {
461 if(!framebufs[i].used) {
466 /* init framebuffer defaults */
467 memset(fb, 0, sizeof &fb);
470 if(fb == &framebuf) {
471 darr_push(framebufs, fb);
472 return darr_size(framebufs) - 1;
474 return fb - framebufs;
477 void vk_free_fb(int fb)
479 if(!framebufs || fb < 1 || fb >= darr_size(framebufs)) {
483 if(framebufs[fb].used && framebufs[fb].vkobj) {
484 vkDestroyFramebuffer(vkdev, framebufs[fb].vkobj, 0);
486 framebufs[fb].used = 0;
489 void vk_fb_size(int fb, int x, int y)
491 framebufs[fb].width = x;
492 framebufs[fb].height = y;
493 framebufs[fb].vkobj_valid = 0;
496 void vk_fb_rpass(int fb, int rpass)
498 if(rpass < 0 || rpass >= darr_size(rpasses) || !rpasses[rpass].used) {
499 fprintf(stderr, "vk_fb_rpass: %d is not a valid renderpass\n", rpass);
503 framebufs[fb].rpidx = rpass;
504 if(rpasses[rpass].vkobj_valid) {
505 framebufs[fb].vkrpass = rpasses[rpass].vkobj;
507 framebufs[fb].vkrpass = 0;
509 framebufs[fb].vkobj_valid = 0;
512 void vk_fb_images(int fb, int n, ...)
517 if(n > MAX_FB_IMGV) {
518 fprintf(stderr, "vk_fb_images: %d is too many images\n", n);
524 framebufs[fb].imgv[i] = va_arg(ap, VkImageView);
527 framebufs[fb].num_imgv = n;
528 framebufs[fb].vkobj_valid = 0;
531 VkFramebuffer vk_fb(int fb)
533 VkFramebufferCreateInfo fbinf;
539 if(!(rpass = vk_rpass(f->rpidx))) {
543 if(rpass != f->vkrpass || !f->vkobj_valid) {
546 vkDestroyFramebuffer(vkdev, f->vkobj, 0);
550 memset(&fbinf, 0, sizeof fbinf);
551 fbinf.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO;
552 fbinf.renderPass = rpass;
553 fbinf.attachmentCount = f->num_imgv;
554 fbinf.pAttachments = f->imgv;
555 fbinf.width = f->width;
556 fbinf.height = f->height;
558 if(vkCreateFramebuffer(vkdev, &fbinf, 0, &f->vkobj) != 0) {
559 fprintf(stderr, "vk_fb: failed to create framebuffer\n");
568 #define ARRSZ(arr) (sizeof arr / sizeof *arr)
569 static const char *known_layer_list[] = {
570 "VK_LAYER_GOOGLE_threading",
571 "VK_LAYER_LUNARG_parameter_validation",
572 "VK_LAYER_LUNARG_object_tracker",
573 "VK_LAYER_LUNARG_image",
574 "VK_LAYER_LUNARG_core_validation",
575 "VK_LAYER_LUNARG_swapchain",
576 "VK_LAYER_GOOGLE_unique_objects"
582 } known_instext_list[] = {
583 {"VK_KHR_surface", 1},
585 {"VK_KHR_win32_surface", 1},
587 /*{"VK_KHR_xlib_surface", 1},*/
588 {"VK_KHR_xcb_surface", 1},
590 {"VK_KHR_debug_report", 0}
596 } known_devext_list[] = {
597 {"VK_KHR_swapchain", 1},
598 {"VK_KHR_acceleration_structure", 0},
599 {"VK_KHR_ray_tracing_pipeline", 0}
602 static int create_instance(void)
604 int i, nlayers = 0, next = 0;
605 VkInstanceCreateInfo instinf;
606 VkApplicationInfo appinf;
607 const char *layers[ARRSZ(known_layer_list)];
608 const char *ext[ARRSZ(known_instext_list)];
611 vkEnumerateInstanceVersion(&apiver);
612 printf("Vulkan API version: %d.%d.%d\n", (apiver >> 22) & 0x7f,
613 (apiver >> 12) & 0x3ff, apiver & 0xfff);
615 memset(&appinf, 0, sizeof appinf);
616 appinf.sType = VK_STRUCTURE_TYPE_APPLICATION_INFO;
617 appinf.pApplicationName = "vkray";
618 appinf.pEngineName = "vkray";
619 appinf.apiVersion = apiver;
621 vkEnumerateInstanceLayerProperties(&inst_layers_count, 0);
622 inst_layers = malloc_nf(inst_layers_count * sizeof *inst_layers);
623 vkEnumerateInstanceLayerProperties(&inst_layers_count, inst_layers);
625 vkEnumerateInstanceExtensionProperties(0, &inst_ext_count, 0);
626 inst_ext = malloc_nf(inst_ext_count * sizeof *inst_ext);
627 vkEnumerateInstanceExtensionProperties(0, &inst_ext_count, inst_ext);
630 for(i=0; i<inst_layers_count; i++) {
631 printf(" - %s: %s\n", inst_layers[i].layerName, inst_layers[i].description);
633 printf("Instance extensions:\n");
634 for(i=0; i<inst_ext_count; i++) {
635 printf(" - %s\n", inst_ext[i].extensionName);
638 have_debug_report = have_ext(inst_ext, inst_ext_count, "VK_KHR_debug_report");
640 for(i=0; i<ARRSZ(known_layer_list); i++) {
641 if(have_inst_layer(known_layer_list[i])) {
642 layers[nlayers++] = known_layer_list[i];
645 for(i=0; i<ARRSZ(known_instext_list); i++) {
646 if(have_ext(inst_ext, inst_ext_count, known_instext_list[i].name)) {
647 ext[next++] = known_instext_list[i].name;
648 } else if(known_instext_list[i].required) {
649 fprintf(stderr, "Vulkan implementation lacks required instance extension: %s\n",
650 known_instext_list[i].name);
655 memset(&instinf, 0, sizeof instinf);
656 instinf.sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO;
657 instinf.pApplicationInfo = &appinf;
658 instinf.enabledLayerCount = nlayers;
659 instinf.ppEnabledLayerNames = layers;
660 instinf.enabledExtensionCount = next;
661 instinf.ppEnabledExtensionNames = ext;
662 if(vkCreateInstance(&instinf, 0, &vk) != 0) {
663 fprintf(stderr, "failed to create vulkan instance\n");
670 static int create_surface(void)
673 VkXlibSurfaceCreateInfoKHR xinf = {0};
674 xinf.sType = VK_STRUCTURE_TYPE_XLIB_SURFACE_CREATE_INFO_KHR;
678 if(vkCreateXlibSurfaceKHR(vk, &xinf, 0, &vksurf) != 0) {
679 fprintf(stderr, "failed to create Xlib window surface\n");
683 VkXcbSurfaceCreateInfoKHR xinf = {0};
684 xinf.sType = VK_STRUCTURE_TYPE_XCB_SURFACE_CREATE_INFO_KHR;
685 xinf.connection = XGetXCBConnection(dpy);
686 xinf.window = (xcb_window_t)win;
688 if(vkCreateXcbSurfaceKHR(vk, &xinf, 0, &vksurf) != 0) {
689 fprintf(stderr, "failed to create XCB window surface\n");
695 int choose_phys_dev(void)
697 uint32_t i, num_pdev, score, best_score, best_dev;
698 VkPhysicalDevice *pdev;
699 VkPhysicalDeviceProperties pdevprop;
702 vkEnumeratePhysicalDevices(vk, &num_pdev, 0);
704 fprintf(stderr, "no vulkan devices found\n");
707 pdev = malloc_nf(num_pdev * sizeof *pdev);
708 vkEnumeratePhysicalDevices(vk, &num_pdev, pdev);
710 printf("Found %d physical devices\n", num_pdev);
714 for(i=0; i<num_pdev; i++) {
715 if((score = eval_pdev_score(pdev[i])) && score > best_score) {
720 vkGetPhysicalDeviceProperties(pdev[i], &pdevprop);
721 printf(" %d: %s (score: %d)\n", i, pdevprop.deviceName, score);
724 fprintf(stderr, "no suitable vulkan device found\n");
728 vkpdev = pdev[best_dev];
732 vkGetPhysicalDeviceQueueFamilyProperties(vkpdev, &num_qfam, 0);
733 qfam = malloc_nf(num_qfam * sizeof *qfam);
734 vkGetPhysicalDeviceQueueFamilyProperties(vkpdev, &num_qfam, qfam);
741 static int create_device(void)
744 VkDeviceQueueCreateInfo qinf[MAX_INIT_QUEUE] = {0};
745 VkPhysicalDeviceFeatures feat = {0};
746 VkDeviceCreateInfo devinf = {0};
747 const char *ext[ARRSZ(known_devext_list) + 16];
748 int i, j, num_ext, qfam, totalq;
750 vkEnumerateDeviceExtensionProperties(vkpdev, 0, &dev_ext_count, 0);
751 dev_ext = malloc_nf(dev_ext_count * sizeof *dev_ext);
752 vkEnumerateDeviceExtensionProperties(vkpdev, 0, &dev_ext_count, dev_ext);
755 for(i=0; i<ARRSZ(known_devext_list); i++) {
756 if(have_ext(dev_ext, dev_ext_count, known_devext_list[i].name)) {
757 ext[num_ext++] = known_devext_list[i].name;
758 } else if(known_devext_list[i].required) {
759 fprintf(stderr, "Vulkan device lacks required extension: %s\n",
760 known_devext_list[i].name);
765 if(initflags & VKINIT_RAY) {
766 if(have_ext(dev_ext, dev_ext_count, "VK_KHR_acceleration_structure") &&
767 have_ext(dev_ext, dev_ext_count, "VK_KHR_ray_tracing_pipeline")) {
768 ext[num_ext++] = "VK_KHR_acceleration_structure";
769 ext[num_ext++] = "VK_KHR_ray_tracing_pipeline";
771 initflags &= ~VKINIT_RAY;
776 for(i=0; i<num_initq; i++) {
777 totalq += initq[i].count;
780 fprintf(stderr, "create_device: arbitrary limit of total queues exceeded (%d)\n", totalq);
783 prio = alloca(totalq * sizeof *prio);
785 for(i=0; i<num_initq; i++) {
786 if((qfam = vk_find_qfamily(initq[i].flags)) == -1) {
787 fprintf(stderr, "create_device: failed to find queue family (flags: 0x%2x)\n",
791 initq[i].qfam = qfam;
792 initq[i].cmdpool = 0;
794 qinf[i].sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO;
795 qinf[i].queueFamilyIndex = qfam;
796 qinf[i].queueCount = initq[i].count;
797 qinf[i].pQueuePriorities = prio;
798 for(j=0; j<initq[i].count; i++) {
799 *prio++ = 1.0f; /* all queue priorities 1 */
803 devinf.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO;
804 devinf.pQueueCreateInfos = qinf;
805 devinf.queueCreateInfoCount = num_initq;
806 devinf.pEnabledFeatures = &feat;
807 devinf.enabledExtensionCount = num_ext;
808 devinf.ppEnabledExtensionNames = ext;
810 if(vkCreateDevice(vkpdev, &devinf, 0, &vkdev) != 0) {
811 fprintf(stderr, "failed to create vulkan device\n");
817 static int create_swapchain(void)
821 VkSwapchainCreateInfoKHR scinf = {0};
822 VkImageViewCreateInfo ivinf;
824 if(vksc_extent.width <= 0 || vksc_extent.height <= 0) {
828 scinf.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR;
829 scinf.surface = vksurf;
830 scinf.minImageCount = 2;
831 scinf.imageFormat = vksurf_fmt[vksurf_selfmt].format;
832 scinf.imageColorSpace = vksurf_fmt[vksurf_selfmt].colorSpace;
833 scinf.imageExtent = vksc_extent;
834 scinf.imageArrayLayers = 1;
835 scinf.imageUsage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
836 scinf.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE;
837 scinf.preTransform = vksurf_caps.currentTransform;
838 scinf.compositeAlpha = VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
839 scinf.presentMode = VK_PRESENT_MODE_FIFO_KHR;
840 scinf.clipped = VK_TRUE;
842 if(vkCreateSwapchainKHR(vkdev, &scinf, 0, &vksc) != 0) {
843 fprintf(stderr, "failed to create swapchain\n");
847 if(!vksc_img || vksc_numimg != num) {
849 vkGetSwapchainImagesKHR(vkdev, vksc, &num, 0);
850 vksc_img = malloc_nf(num * sizeof *vksc_img);
851 vkGetSwapchainImagesKHR(vkdev, vksc, &num, vksc_img);
853 if(!vksc_view || vksc_numimg != num) {
855 vksc_view = malloc_nf(num * sizeof *vksc_view);
859 for(i=0; i<num; i++) {
860 memset(&ivinf, 0, sizeof ivinf);
861 ivinf.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
862 ivinf.image = vksc_img[i];
863 ivinf.format = vksurf_fmt[vksurf_selfmt].format;
864 ivinf.components.r = VK_COMPONENT_SWIZZLE_IDENTITY;
865 ivinf.components.g = VK_COMPONENT_SWIZZLE_IDENTITY;
866 ivinf.components.b = VK_COMPONENT_SWIZZLE_IDENTITY;
867 ivinf.components.a = VK_COMPONENT_SWIZZLE_IDENTITY;
868 ivinf.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
869 ivinf.subresourceRange.levelCount = 1;
870 ivinf.subresourceRange.layerCount = 1;
871 ivinf.viewType = VK_IMAGE_VIEW_TYPE_2D;
873 if(vkCreateImageView(vkdev, &ivinf, 0, vksc_view + i) != 0) {
874 fprintf(stderr, "failed to create image view (%d)\n", i);
882 static int eval_pdev_score(VkPhysicalDevice dev)
885 uint32_t i, num_fmt, num_qfam, num_ext;
886 VkQueueFamilyProperties *qfam;
887 VkExtensionProperties *ext;
888 VkPhysicalDeviceProperties prop;
889 VkPhysicalDeviceFeatures feat;
890 VkSurfaceFormatKHR *sfmt;
893 vkGetPhysicalDeviceProperties(dev, &prop);
894 vkGetPhysicalDeviceFeatures(dev, &feat);
896 /* check if we have the swapchain extension */
897 vkEnumerateDeviceExtensionProperties(dev, 0, &num_ext, 0);
898 ext = malloc_nf(num_ext * sizeof *ext);
899 vkEnumerateDeviceExtensionProperties(dev, 0, &num_ext, ext);
901 if(!have_ext(ext, num_ext, "VK_KHR_swapchain")) {
906 /* populate format and present modes arrays, and make sure we have some of each */
907 vkGetPhysicalDeviceSurfaceFormatsKHR(dev, vksurf, &num_fmt, 0);
912 sfmt = malloc_nf(num_fmt * sizeof *sfmt);
913 vkGetPhysicalDeviceSurfaceFormatsKHR(dev, vksurf, &num_fmt, sfmt);
915 vkGetPhysicalDeviceSurfaceCapabilitiesKHR(dev, vksurf, &vksurf_caps);
917 /* find a queue family which can do graphics and can present */
918 vkGetPhysicalDeviceQueueFamilyProperties(dev, &num_qfam, 0);
919 qfam = malloc_nf(num_qfam * sizeof *qfam);
920 vkGetPhysicalDeviceQueueFamilyProperties(dev, &num_qfam, qfam);
922 for(i=0; i<num_qfam; i++) {
923 vkGetPhysicalDeviceSurfaceSupportKHR(dev, i, vksurf, &can_pres);
924 if(qfam[i].queueCount && (qfam[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) && can_pres) {
929 switch(prop.deviceType) {
930 case VK_PHYSICAL_DEVICE_TYPE_VIRTUAL_GPU:
933 case VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU:
936 case VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU:
943 if(initflags & VKINIT_RAY) {
944 if(have_ext(ext, num_ext, "VK_KHR_acceleration_structure") &&
945 have_ext(ext, num_ext, "VK_KHR_ray_tracing_pipeline")) {
956 static int choose_pixfmt(void)
958 static const VkFormat pref[] = {
959 VK_FORMAT_B8G8R8_UNORM,
960 VK_FORMAT_R8G8B8_UNORM,
961 VK_FORMAT_B8G8R8A8_UNORM,
962 VK_FORMAT_R8G8B8A8_UNORM
967 vkGetPhysicalDeviceSurfaceFormatsKHR(vkpdev, vksurf, &num_fmt, 0);
968 if(!num_fmt) return -1;
969 vksurf_fmt = malloc_nf(num_fmt * sizeof *vksurf_fmt);
970 vkGetPhysicalDeviceSurfaceFormatsKHR(vkpdev, vksurf, &num_fmt, vksurf_fmt);
973 for(i=0; i<num_fmt; i++) {
974 if(vksurf_fmt[i].colorSpace != VK_COLOR_SPACE_SRGB_NONLINEAR_KHR) {
977 for(j=0; j<sizeof pref / sizeof *pref; j++) {
978 if(vksurf_fmt[i].format == pref[j]) {
980 vksurf_numfmt = num_fmt;
991 static int have_inst_layer(const char *name)
994 for(i=0; i<inst_layers_count; i++) {
995 if(strcmp(inst_layers[i].layerName, name) == 0) {
1002 static int have_ext(VkExtensionProperties *ext, int next, const char *name)
1005 for(i=0; i<next; i++) {
1006 if(strcmp(ext[i].extensionName, name) == 0) {