4 * this is public domain without any warranties of any kind
7 /* XXX: KEEP AN EYE ON ABBREVIATIONS, ALWAYS */
9 * XXX vk abstraction is much more complex than real hardware, aka the cost of
10 * most software generalisation/abstraction (and some hardware has lost its
11 * way...). Better know some real hardware programming (for instance AMD
12 * open gpu) and keep that in mind while dealing with vk.
13 * since it's complex, have a "safe mode", kind of minimal use of vk. doing
14 * fancy stuff above this "safe mode" must be validated by hardware vendors
15 * then the user... or you literally walking on eggs.
17 * The vulkan API is, here, simplified and taylored for the app using the C
18 * preprocessor. In other words, when there is no ambiguity in the context of
19 * this code, vulkan API function parameters get simplified out in C
20 * preprocessor macros.
22 * XXX: this is a "One Compilation Unit" source code with preprocessor
23 * namespace support. this allow the project to grow to very large and keep the
24 * global identifier space in check (= tradeoff). Each source file, *.h *.c,
25 * should compile without errors.
27 * XXX: you may have to track the dependencies of vk objs that in order to be
28 * able to deal with any of their brutal state changes:
29 * - a dev can be lost (i.e. power management evt or bad hardware)
30 * - a surf can be lost (i.e. power management evt or something went
32 * - a swpchn can become out-of-date. for instance the win system did resz
33 * the parent surf of the swpchn... if you did allow it to happen
35 * here we choose that any of those evts will be fatal for simplicity. for
36 * instance, if you choose to support the swpchn out-of-date state, if your
37 * rendering state was too much "pre-configured", you would have to
38 * "re-pre-configure" everything... or you should drop "pre-configuring" and
39 * program everything *again* for each swpchn img you draw.
41 * display programing is demonstrated "officially" in khronos vk cube.c and
42 * there is a tutorial slide "1-Vulkan-Tutorial.pdf" (just abstract away c++
43 * cr*p) which is "the programming manual" on top of specs. this code is
45 * - only 1 "main" synchronous loop
46 * - only xcb wsi. xcb is a client library on top of the x11 protocol.
47 * we know wayland ("x12") should be added.
48 * - dynamic loading of xcb.
49 * - no need of vk headers (using directly the ABI with custom headers).
51 * WARNING: vk core q fam props discovery is not used for the discovery of
52 * q fams able to support disp, aka wsi. This is the case because disp
53 * handling (wsi), is done as an ext and is not core (vk can be use without
56 * a phydev must have a q with gfx and compute. additionally, any q with gfx or
57 * compute does implicitely support transfer. basically, it is possible to have
58 * qs with only transfer support, and we are guaranteed to have a q with gfx
59 * and compute and transfer support. Keep in mind that many vk resources must
60 * pay the cost of transfering from 1 q fam to another q fam: then think twice
61 * on how you want to spread your work load on the q fams.
63 * for proper keyboard support, joypad way or/and text input way, read the
64 * included KEYBOARD file. here, since we use basic layout independent standard
65 * keys, the x11 core keyboard protocol is fairly enough.
67 * TODO: use as less as possible device memory object, namely try to allocate
68 * one big chunk and manage alignment constraint ourself. vk api does provide
69 * a way to query for the memory alignment constraints.
74 #include "app_core_types.h"
75 #include "nyanvk/consts.h"
76 #include "nyanvk/types.h"
78 #include "app_state_types.h"
81 #include "app_state.c"
83 /*---------------------------------------------------------------------------*/
84 #include "namespace/app.c"
85 #include "namespace/vk_syms.c"
86 #include "namespace/app_state_types.h"
87 #include "namespace/app_state.c"
88 /*---------------------------------------------------------------------------*/
89 #define VK_FATAL(fmt, ...) \
91 LOG(fmt, ##__VA_ARGS__);\
95 #define FATAL(fmt, ...) \
97 LOG(fmt, ##__VA_ARGS__);\
100 /* the phydev q fam selected */
101 static void dev_create(void)
103 struct vk_dev_create_info_t info
;
104 struct vk_dev_q_create_info_t q_info
;
106 static u8
*exts
[] = {
108 "VK_KHR_bind_memory2",
110 "VK_KHR_get_memory_requirements2",
115 memset(&info
, 0, sizeof(info
));
116 memset(&q_info
, 0, sizeof(q_info
));
117 /*--------------------------------------------------------------------*/
118 q_info
.type
= vk_struct_type_dev_q_create_info
;
119 q_info
.q_fam
= surf_g
.dev
.phydev
.q_fam
;
121 q_info
.q_prios
= &q_prio
;
123 /*--------------------------------------------------------------------*/
124 info
.type
= vk_struct_type_dev_create_info
;
125 info
.q_create_infos_n
= 1;
126 info
.q_create_infos
= &q_info
;
127 info
.enabled_exts_n
= ARRAY_N(exts
);
128 info
.enabled_ext_names
= exts
;
129 vk_create_dev(&info
);
130 VK_FATAL("0:MAIN:FATAL:%d:physical device:%p:unable to create a vulkan device\n", r
, surf_g
.dev
.phydev
.vk
)
131 LOG("0:MAIN:physical device:%p:vulkan device created with one proper queue:%p\n", surf_g
.dev
.phydev
.vk
, surf_g
.dev
.vk
);
134 static void instance_create(void)
137 struct vk_instance_create_info_t info
;
138 static u8
*exts
[] = {
140 * XXX: not 1.1 promoted, should not use it, but it is fixing
141 * some non-consistency from 1.0
143 "VK_KHR_get_surface_capabilities2",
145 "VK_KHR_get_physical_device_properties2",
146 "VK_KHR_xcb_surface",
152 if (i
== ARRAY_N(exts
))
154 LOG("0:MAIN:will use vulkan instance_g extension %s\n", exts
[i
]);
157 memset(&info
, 0, sizeof(info
));
158 info
.type
= vk_struct_type_instance_create_info
;
159 info
.enabled_exts_n
= ARRAY_N(exts
);
160 info
.enabled_ext_names
= exts
;
161 vk_create_instance(&info
);
162 VK_FATAL("0:MAIN:FATAL:%d:unable to create a vulkan instance_g\n", r
)
163 LOG("0:MAIN:vulkan instance_g handle %p\n", instance_g
);
166 /* in theory, this could change on the fly */
167 static void instance_exts_dump(void)
169 #define EXTS_N_MAX 512
170 struct vk_ext_props_t exts
[EXTS_N_MAX
];
174 memset(exts
, 0, sizeof(exts
));
176 vk_enumerate_instance_ext_props(&n
, exts
);
177 if (r
!= vk_success
&& r
!= vk_incomplete
) {
178 LOG("0:MAIN:ERROR:%d:unable to enumerate instance_g extension(s)\n", r
);
181 if (r
== vk_incomplete
) {
182 LOG("0:MAIN:ERROR:too many extensions (%u/%u), dumping disabled", n
, EXTS_N_MAX
);
186 LOG("0:MAIN:have %u instance_g extension(s)\n", n
);
190 LOG("0:MAIN:instance_g extension:name=%s:specification version=%u\n", exts
[n
- 1].name
, exts
[n
- 1].spec_version
);
196 /* in theory, this could change on the fly */
197 static void instance_layers_dump(void)
199 #define LAYERS_N_MAX 32
200 struct vk_layer_props_t layers
[LAYERS_N_MAX
];
204 memset(layers
, 0, sizeof(layers
));
206 vk_enumerate_instance_layer_props(&n
, layers
);
207 if (r
!= vk_success
&& r
!= vk_incomplete
) {
208 LOG("0:MAIN:ERROR:%d:unable to enumerate instance_g layer(s)\n", r
);
211 if (r
== vk_incomplete
) {
212 LOG("0:MAIN:ERROR:too many layers (%u/%u), dumping disabled", n
, LAYERS_N_MAX
);
216 LOG("0:MAIN:have %u instance_g layer(s)\n", n
);
220 LOG("0:MAIN:instance_g layer:%u:name=%s:specification version=%u:implementation version=%u:description=%s\n", n
, layers
[n
].name
, layers
[n
].spec_version
, layers
[n
].implementation_version
, layers
[n
].desc
);
226 static void tmp_phydevs_get(void)
228 void *phydevs
[tmp_phydevs_n_max
];
232 memset(phydevs
, 0, sizeof(phydevs
));
233 n
= tmp_phydevs_n_max
;
234 vk_enumerate_phydevs(&n
, phydevs
);
235 if (r
!= vk_success
&& r
!= vk_incomplete
)
236 FATAL("0:MAIN:FATAL:%ld:unable to enumerate physical devices\n",r
)
237 if (r
== vk_incomplete
)
238 FATAL("0:MAIN:FATAL:too many vulkan physical devices %u/%u for our temporary storage\n", n
, tmp_phydevs_n_max
)
240 LOG("0:MAIN:detected %u physical devices\n", n
);
242 FATAL("0:MAIN:no vulkan physical devices, exiting\n")
244 memset(tmp_phydevs_g
, 0, sizeof(tmp_phydevs_g
));
247 if (n
== tmp_phydevs_n_g
)
249 tmp_phydevs_g
[n
].vk
= phydevs
[n
];
254 static void phydev_exts_dump(void *phydev
)
256 #define EXTS_N_MAX 512
257 struct vk_ext_props_t exts
[EXTS_N_MAX
];
261 memset(exts
, 0, sizeof(exts
));
263 vk_enumerate_dev_ext_props(phydev
, &n
, exts
);
264 if (r
!= vk_success
&& r
!= vk_incomplete
) {
265 LOG("0:MAIN:ERROR:physical device:%p:%d:unable to enumerate device extension(s)\n", phydev
, r
);
268 if (r
== vk_incomplete
) {
269 LOG("0:MAIN:ERROR:physical device:%p:too many extensions (%u/%u), dumping disabled", phydev
, n
, EXTS_N_MAX
);
273 LOG("0:MAIN:physical device:%p:have %u device extension(s)\n", phydev
, n
);
277 LOG("0:MAIN:physical device:%p:device extension:name=%s:specification version=%u\n", phydev
, exts
[n
- 1].name
, exts
[n
- 1].spec_version
);
283 static void tmp_phydevs_exts_dump(void)
289 if (i
== tmp_phydevs_n_g
)
291 phydev_exts_dump(tmp_phydevs_g
[i
].vk
);
296 static u8
*dev_type_str(u32 type
)
299 case vk_phydev_type_other
:
301 case vk_phydev_type_integrated_gpu
:
302 return "integrated gpu";
303 case vk_phydev_type_discrete_gpu
:
304 return "discrete gpu";
305 case vk_phydev_type_virtual_gpu
:
306 return "virtual gpu";
307 case vk_phydev_type_cpu
:
314 static u8
*uuid_str(u8
*uuid
)
316 static u8 uuid_str
[VK_UUID_SZ
* 2 + 1];
319 memset(uuid_str
, 0, sizeof(uuid_str
));
324 /* XXX: always write a terminating 0, truncated or not */
325 snprintf(uuid_str
+ i
* 2, 3, "%02x", uuid
[i
]);
331 static void tmp_phydevs_props_dump(void)
337 struct vk_phydev_props_t props
;
338 struct tmp_phydev_t
*p
;
340 if (i
== tmp_phydevs_n_g
)
342 p
= &tmp_phydevs_g
[i
];
343 memset(&props
, 0, sizeof(props
));
344 props
.type
= vk_struct_type_phydev_props
;
345 vk_get_phydev_props(p
->vk
, &props
);
346 LOG("0:MAIN:physical device:%p:properties:api version=%#x=%u.%u.%u\n", p
->vk
, props
.core
.api_version
, VK_VERSION_MAJOR(props
.core
.api_version
), VK_VERSION_MINOR(props
.core
.api_version
), VK_VERSION_PATCH(props
.core
.api_version
));
347 LOG("0:MAIN:physical device:%p:properties:driver version=%#x=%u.%u.%u\n", p
->vk
, props
.core
.driver_version
, VK_VERSION_MAJOR(props
.core
.driver_version
), VK_VERSION_MINOR(props
.core
.driver_version
), VK_VERSION_PATCH(props
.core
.driver_version
));
348 LOG("0:MAIN:physical device:%p:properties:vendor id=%#x\n", p
->vk
, props
.core
.vendor_id
);
349 LOG("0:MAIN:physical device:%p:properties:device id=%#x\n", p
->vk
, props
.core
.dev_id
);
350 LOG("0:MAIN:physical device:%p:properties:type=%s\n", p
->vk
, dev_type_str(props
.core
.dev_type
));
351 if (props
.core
.dev_type
== vk_phydev_type_discrete_gpu
)
352 p
->is_discret_gpu
= true;
354 p
->is_discret_gpu
= false;
355 LOG("0:MAIN:physical device:%p:properties:name=%s\n", p
->vk
, props
.core
.name
);
356 LOG("0:MAIN:physical device:%p:properties:pipeline cache uuid=%s\n", p
->vk
, uuid_str(props
.core
.pl_cache_uuid
));
357 /* display the limits and sparse props at log level 1, if needed */
362 static void tmp_phydev_q_fams_get(struct tmp_phydev_t
*p
)
368 vk_get_phydev_q_fam_props(p
->vk
, &n
, 0);
369 if (n
> tmp_phydev_q_fams_n_max
)
370 FATAL("0:MAIN:FATAL:physical device:%p:too many queue families %u/%u\n", p
->vk
, n
, tmp_phydev_q_fams_n_max
)
371 memset(p
->q_fams
, 0, sizeof(p
->q_fams
));
374 if (i
== tmp_phydev_q_fams_n_max
)
376 p
->q_fams
[i
].type
= vk_struct_type_q_fam_props
;
379 vk_get_phydev_q_fam_props(p
->vk
, &n
, p
->q_fams
);
381 LOG("0:MAIN:physical device:%p:have %u queue families\n", p
->vk
, p
->q_fams_n
);
384 static void tmp_phydevs_q_fams_get(void)
390 if (i
== tmp_phydevs_n_g
)
392 tmp_phydev_q_fams_get(&tmp_phydevs_g
[i
]);
397 static void tmp_phydev_q_fams_dump(struct tmp_phydev_t
*p
)
403 if (i
== p
->q_fams_n
)
405 if ((p
->q_fams
[i
].core
.flags
& vk_q_gfx_bit
) != 0)
406 LOG("0:MAIN:physical device:%p:queue family:%u:flags:graphics\n", p
->vk
, i
);
407 if ((p
->q_fams
[i
].core
.flags
& vk_q_compute_bit
) != 0)
408 LOG("0:MAIN:physical device:%p:queue family:%u:flags:compute\n", p
->vk
, i
);
409 if ((p
->q_fams
[i
].core
.flags
& vk_q_transfer_bit
) != 0)
410 LOG("0:MAIN:physical device:%p:queue family:%u:flags:transfer\n", p
->vk
, i
);
411 if ((p
->q_fams
[i
].core
.flags
& vk_q_sparse_binding_bit
) != 0)
412 LOG("0:MAIN:physical device:%p:queue family:%u:flags:sparse binding\n", p
->vk
, i
);
413 if ((p
->q_fams
[i
].core
.flags
& vk_q_protected_bit
) != 0)
414 LOG("0:MAIN:physical device:%p:queue family:%u:flags:protected\n", p
->vk
, i
);
415 LOG("0:MAIN:physical device:%p:queue family:%u:%u queues\n", p
->vk
, i
, p
->q_fams
[i
].core
.qs_n
);
416 LOG("0:MAIN:physical device:%p:queue family:%u:%u bits timestamps\n", p
->vk
, i
, p
->q_fams
[i
].core
.timestamp_valid_bits
);
417 LOG("0:MAIN:physical device:%p:queue family:%u:(width=%u,height=%u,depth=%u) minimum image transfer granularity\n", p
->vk
, i
, p
->q_fams
[i
].core
.min_img_transfer_granularity
.width
, p
->q_fams
[i
].core
.min_img_transfer_granularity
.height
, p
->q_fams
[i
].core
.min_img_transfer_granularity
.depth
);
422 static void cp_create(void)
425 struct vk_cp_create_info_t info
;
427 memset(&info
, 0, sizeof(info
));
428 info
.type
= vk_struct_type_cp_create_info
;
429 info
.flags
= vk_cp_create_reset_cb_bit
;
430 info
.q_fam
= surf_g
.dev
.phydev
.q_fam
;
432 VK_FATAL("0:MAIN:FATAL:%d:unable create the commmand pool\n", r
)
433 LOG("0:MAIN:device:%p:queue family:%u:created command pool %p\n", surf_g
.dev
.vk
, surf_g
.dev
.phydev
.q_fam
, surf_g
.dev
.cp
);
436 static void tmp_phydevs_q_fams_dump(void)
442 if (i
== tmp_phydevs_n_g
)
444 tmp_phydev_q_fams_dump(&tmp_phydevs_g
[i
]);
449 static void q_get(void)
451 LOG("0:MAIN:device:%p:getting queue:family=%u queue=0\n", surf_g
.dev
.vk
, surf_g
.dev
.phydev
.q_fam
);
453 LOG("0:MAIN:device:%p:got queue:%p\n", surf_g
.dev
.vk
, surf_g
.dev
.q
);
456 static void check_vk_version(void)
461 vk_enumerate_instance_version(&api_version
);
463 FATAL("0:MAIN:FATAL:%d:unable to enumerate instance_g version\n", r
)
464 LOG("0:MAIN:vulkan instance_g version %#x = %u.%u.%u\n", api_version
, VK_VERSION_MAJOR(api_version
), VK_VERSION_MINOR(api_version
), VK_VERSION_PATCH(api_version
));
465 if (VK_VERSION_MAJOR(api_version
) == 1
466 && VK_VERSION_MINOR(api_version
) == 0)
467 FATAL("0:MAIN:FATAL:vulkan instance_g version too old\n")
470 * the major obj to use in vk abstraction of gfx hardware is the q. In this
471 * abstraction, many core objs like bufs/imgs are "own" by a specific q, and
472 * transfer of such ownership to other qs can be expensive. we know it's not
473 * really the case on AMD hardware, but if vk abstraction insists on this, it
474 * probably means it is important on some hardware of other vendors.
476 static void tmp_phydevs_q_fams_surf_support_get(void)
482 struct tmp_phydev_t
*p
;
485 if (i
== tmp_phydevs_n_g
)
487 p
= &tmp_phydevs_g
[i
];
493 if (j
== p
->q_fams_n
)
495 supported
= vk_false
;
496 vk_get_phydev_surf_support(p
->vk
, j
, &supported
);
497 VK_FATAL("0:MAIN:FATAL:%d:physical device:%p:queue family:%u:surface:%p:unable to query queue family wsi/(image presentation to our surface) support\n", r
, p
->vk
, j
, surf_g
.vk
)
498 if (supported
== vk_true
) {
499 LOG("0:MAIN:physical device:%p:queue family:%u:surface:%p:does support wsi/(image presentation to our surface) \n", p
->vk
, j
, surf_g
.vk
);
500 p
->q_fams_surf_support
[j
] = true;
502 LOG("0:MAIN:physical device:%p:queue family:%u:surface:%p:does not support wsi/(image presentation to our surface)\n", p
->vk
, j
, surf_g
.vk
);
503 p
->q_fams_surf_support
[j
] = false;
511 static void tmp_selected_phydev_cherry_pick(u8 i
)
513 struct tmp_phydev_t
*p
;
515 p
= &tmp_phydevs_g
[i
];
516 surf_g
.dev
.phydev
.vk
= p
->vk
;
517 surf_g
.dev
.phydev
.is_discret_gpu
= p
->is_discret_gpu
;
518 surf_g
.dev
.phydev
.mem_types_n
= p
->mem_props
.core
.mem_types_n
;
519 memcpy(surf_g
.dev
.phydev
.mem_types
, p
->mem_props
.core
.mem_types
,
520 sizeof(surf_g
.dev
.phydev
.mem_types
));
524 * we ask qs of phydevs which one is able to present imgs to the
525 * external pe surf_g. Additionally we require this q to support gfx. we
526 * select basically the first q from the first phydev fitting what we are
529 static void tmp_phydev_and_q_fam_select(void)
536 struct tmp_phydev_t
*p
;
538 if (i
== tmp_phydevs_n_g
)
540 p
= &tmp_phydevs_g
[i
];
543 if (j
== p
->q_fams_n
)
546 * we are looking for a q fam with:
547 * - img presentation to our surf_g
549 * - transfer (implicit with gfx)
551 if (p
->q_fams_surf_support
[j
]
552 && (p
->q_fams
[j
].core
.flags
& vk_q_gfx_bit
)
554 surf_g
.dev
.phydev
.q_fam
= j
;
555 tmp_selected_phydev_cherry_pick(i
);
556 LOG("0:MAIN:physical device %p selected for (wsi/image presentation to our surface %p) using its queue family %u\n", surf_g
.dev
.phydev
.vk
, surf_g
.vk
, surf_g
.dev
.phydev
.q_fam
);
566 * XXX: the surf_g is an obj at the instance_g lvl, NOT THE [PHYSICAL]
569 static void surf_create(void)
571 struct vk_xcb_surf_create_info_t xcb_info
;
574 memset(&surf_g
, 0, sizeof(surf_g
));
575 memset(&xcb_info
, 0, sizeof(xcb_info
));
576 xcb_info
.type
= vk_struct_type_xcb_surf_create_info
;
577 xcb_info
.c
= app_xcb
.c
;
578 xcb_info
.win
= app_xcb
.win_id
;
579 vk_create_xcb_surf(&xcb_info
);
580 VK_FATAL("0:MAIN:FATAL:%d:xcb:%s:screen:%d:root window id:%#x:window id:%#x:unable to create a vulkan surface from this x11 window\n", r
, app_xcb
.disp_env
, app_xcb
.scr_idx
, app_xcb
.scr
->root
, app_xcb
.win_id
)
581 LOG("0:MAIN:xcb:'%s':screen:%d:root window id:%#x:window id:%#x:created vk_surface=%p\n", app_xcb
.disp_env
, app_xcb
.scr_idx
, app_xcb
.scr
->root
, app_xcb
.win_id
, surf_g
.vk
);
584 static void texel_mem_blk_confs_dump(u32 confs_n
,
585 struct vk_surf_texel_mem_blk_conf_t
*confs
)
593 LOG("0:MAIN:physical device:%p:surface:%p:texel memory block configuration:format=%u color_space=%u\n", surf_g
.dev
.phydev
.vk
, surf_g
.vk
, confs
[i
].core
.fmt
, confs
[i
].core
.color_space
);
599 * we only know this phydev/q is "able to present imgs" to the external
600 * pe surf_g. Here we choose the conf of textel blk
602 #define CONFS_N_MAX 1024
603 static void texel_mem_blk_conf_select(void)
605 struct vk_phydev_surf_info_t info
;
606 struct vk_surf_texel_mem_blk_conf_t confs
[CONFS_N_MAX
];
607 struct vk_surf_texel_mem_blk_conf_core_t
*cc
;
612 memset(&info
, 0, sizeof(info
));
613 info
.type
= vk_struct_type_phydev_surf_info
;
614 info
.surf
= surf_g
.vk
;
615 vk_get_phydev_surf_texel_mem_blk_confs(&info
, &confs_n
, 0);
616 VK_FATAL("0:MAIN:FATAL:%d:physical device:%p:surface:%p:unable get the count of valid surface texel memory block configurations\n", r
, surf_g
.dev
.phydev
.vk
, surf_g
.vk
)
617 if (confs_n
> CONFS_N_MAX
)
618 FATAL("0:MAIN:FATAL:physical device:%p:surface:%p:too many surface texel memory block configurations %u/%u\n", surf_g
.dev
.phydev
.vk
, surf_g
.vk
, confs_n
, CONFS_N_MAX
)
620 memset(confs
, 0, sizeof(confs
[0]) * confs_n
);
625 confs
[i
].type
= vk_struct_type_surf_texel_mem_blk_conf
;
628 vk_get_phydev_surf_texel_mem_blk_confs(&info
, &confs_n
, confs
);
629 VK_FATAL("0:MAIN:FATAL:%d:physical device:%p:surface:%p:unable get the valid surface texel memory block configurations\n", r
, surf_g
.dev
.phydev
.vk
, surf_g
.vk
)
631 FATAL("0:MAIN:FATAL:physical device:%p:surface:%p:no valid surface texel memory block configuration\n", surf_g
.dev
.phydev
.vk
, surf_g
.vk
)
632 texel_mem_blk_confs_dump(confs_n
, confs
);
634 cc
= &surf_g
.dev
.phydev
.selected_texel_mem_blk_conf_core
;
635 if ((confs_n
== 1) && (confs
[0].core
.fmt
636 == vk_texel_mem_blk_fmt_undefined
)) {
637 /* this means the dev let us choose our the fmt */
638 cc
->fmt
= vk_texel_mem_blk_fmt_b8g8r8a8_srgb
;
639 LOG("0:MAIN:physical device:%p:surface:%p:using our surface texel memory block format %u\n", surf_g
.dev
.phydev
.vk
, surf_g
.vk
, cc
->fmt
);
640 cc
->color_space
= vk_color_space_srgb_nonlinear
;
641 LOG("0:MAIN:physical device:%p:surface:%p:using prefered surface texel memory block color space %u\n", surf_g
.dev
.phydev
.vk
, surf_g
.vk
, cc
->color_space
);
643 /* the first valid fmt is the prefered fmt */
644 surf_g
.dev
.phydev
.selected_texel_mem_blk_conf_core
.fmt
=
646 LOG("0:MAIN:physical device:%p:surface:%p:using prefered surface texel memory block format %u\n", surf_g
.dev
.phydev
.vk
, surf_g
.vk
, surf_g
.dev
.phydev
.selected_texel_mem_blk_conf_core
.fmt
);
647 cc
->color_space
= confs
[0].core
.color_space
;
648 LOG("0:MAIN:physical device:%p:surface:%p:using prefered surface texel memory block color space %u\n", surf_g
.dev
.phydev
.vk
, surf_g
.vk
, cc
->color_space
);
652 static void tmp_phydev_mem_props_get(struct tmp_phydev_t
*p
)
654 memset(&p
->mem_props
, 0, sizeof(p
->mem_props
));
655 p
->mem_props
.type
= vk_struct_type_phydev_mem_props
;
656 vk_get_phydev_mem_props(p
->vk
, &p
->mem_props
);
659 static void tmp_phydevs_mem_props_get(void)
665 if (i
== tmp_phydevs_n_g
)
667 tmp_phydev_mem_props_get(&tmp_phydevs_g
[i
]);
672 static void phydev_mem_heap_dump(void *phydev
, u8 i
,
673 struct vk_mem_heap_t
*heap
)
675 LOG("0:MAIN:physical device:%p:memory heap:%u:size:%u bytes\n", phydev
, i
, heap
->sz
);
676 LOG("0:MAIN:physical device:%p:memory heap:%u:flags:%#08x\n", phydev
, i
, heap
->flags
);
677 if ((heap
->flags
& vk_mem_heap_dev_local_bit
) != 0)
678 LOG("0:MAIN:physical device:%p:memory heap:%u:device local\n", phydev
, i
);
679 if ((heap
->flags
& vk_mem_heap_multi_instance_bit
) != 0)
680 LOG("0:MAIN:physical device:%p:memory type:%u:multi instance_g\n", phydev
, i
);
683 static void phydev_mem_type_dump(void *phydev
, u8 i
,
684 struct vk_mem_type_t
*type
)
686 LOG("0:MAIN:physical device:%p:memory type:%u:heap:%u\n", phydev
, i
, type
->heap
);
687 LOG("0:MAIN:physical device:%p:memory type:%u:flags:%#08x\n", phydev
, i
, type
->prop_flags
);
688 if ((type
->prop_flags
& vk_mem_prop_dev_local_bit
) != 0)
689 LOG("0:MAIN:physical device:%p:memory type:%u:device local\n", phydev
, i
);
690 if ((type
->prop_flags
& vk_mem_prop_host_visible_bit
) != 0)
691 LOG("0:MAIN:physical device:%p:memory type:%u:host visible\n", phydev
, i
);
692 if ((type
->prop_flags
& vk_mem_prop_host_cached_bit
) != 0)
693 LOG("0:MAIN:physical device:%p:memory type:%u:host cached\n", phydev
, i
);
696 static void tmp_phydev_mem_types_dump(struct tmp_phydev_t
*p
)
700 LOG("0:MAIN:physical device:%p:%u memory types\n", p
->vk
, p
->mem_props
.core
.mem_types_n
);
703 if (i
== p
->mem_props
.core
.mem_types_n
)
705 phydev_mem_type_dump(p
->vk
, i
,
706 &p
->mem_props
.core
.mem_types
[i
]);
711 static void tmp_phydev_mem_heaps_dump(struct tmp_phydev_t
*p
)
715 LOG("0:MAIN:physical device:%p:%u memory heaps\n", p
->vk
, p
->mem_props
.core
.mem_heaps_n
);
718 if (i
== p
->mem_props
.core
.mem_heaps_n
)
720 phydev_mem_heap_dump(p
->vk
, i
,
721 &p
->mem_props
.core
.mem_heaps
[i
]);
727 static void tmp_phydev_mem_props_dump(struct tmp_phydev_t
*p
)
729 tmp_phydev_mem_types_dump(p
);
730 tmp_phydev_mem_heaps_dump(p
);
733 static void tmp_phydevs_mem_props_dump(void)
739 if (i
== tmp_phydevs_n_g
)
741 tmp_phydev_mem_props_dump(&tmp_phydevs_g
[i
]);
746 static void tmp_surf_caps_get(void)
749 struct vk_phydev_surf_info_t info
;
751 memset(&info
, 0, sizeof(info
));
752 info
.type
= vk_struct_type_phydev_surf_info
;
753 info
.surf
= surf_g
.vk
;
754 vk_get_phydev_surf_caps(&info
);
755 VK_FATAL("0:MAIN:FATAL:%d:physical device:%p:surface:%p:unable to get our surface capabilities in the context of the selected physical device\n", r
, surf_g
.dev
.phydev
.vk
, surf_g
.vk
)
756 /* we have room for a maximum of 3 images per swapchain */
757 if (tmp_surf_caps_g
.core
.imgs_n_min
> swpchn_imgs_n_max
)
758 FATAL("0:MAIN:FATAL:physical device:%p:surface:%p:we have room for %u images per swapchain, but this swapchain requires a minimum of %u images\n", surf_g
.dev
.phydev
.vk
, surf_g
.vk
, swpchn_imgs_n_max
, tmp_surf_caps_g
.core
.imgs_n_min
)
761 static void tmp_surf_caps_dump(void)
763 LOG("0:MAIN:physical device:%p:surface:%p:imgs_n_min=%u\n", surf_g
.dev
.phydev
.vk
, surf_g
.vk
, app_tmp_surf_caps
.core
.imgs_n_min
);
764 LOG("0:MAIN:physical device:%p:surface:%p:imgs_n_max=%u\n", surf_g
.dev
.phydev
.vk
, surf_g
.vk
, tmp_surf_caps_g
.core
.imgs_n_max
);
765 LOG("0:MAIN:physical device:%p:surface:%p:current extent=(width=%u, height=%u)\n", surf_g
.dev
.phydev
.vk
, surf_g
.vk
, tmp_surf_caps_g
.core
.current_extent
.width
, tmp_surf_caps_g
.core
.current_extent
.height
);
766 LOG("0:MAIN:physical device:%p:surface:%p:minimal extent=(width=%u, height=%u)\n", surf_g
.dev
.phydev
.vk
, surf_g
.vk
, tmp_surf_caps_g
.core
.img_extent_min
.width
, tmp_surf_caps_g
.core
.img_extent_min
.height
);
767 LOG("0:MAIN:physical device:%p:surface:%p:maximal extent=(width=%u, height=%u)\n", surf_g
.dev
.phydev
.vk
, surf_g
.vk
, tmp_surf_caps_g
.core
.img_extent_max
.width
, tmp_surf_caps_g
.core
.img_extent_max
.height
);
768 LOG("0:MAIN:physical device:%p:surface:%p:img_array_layers_n_max=%u\n", surf_g
.dev
.phydev
.vk
, surf_g
.vk
, tmp_surf_caps_g
.core
.img_array_layers_n_max
);
769 LOG("0:MAIN:physical device:%p:surface:%p:supported_transforms=%#08x\n", surf_g
.dev
.phydev
.vk
, surf_g
.vk
, tmp_surf_caps_g
.core
.supported_transforms
);
770 LOG("0:MAIN:physical device:%p:surface:%p:current_transform=%#08x\n", surf_g
.dev
.phydev
.vk
, surf_g
.vk
, tmp_surf_caps_g
.core
.current_transform
);
771 LOG("0:MAIN:physical device:%p:surface:%p:supported_composite_alpha=%#08x\n", surf_g
.dev
.phydev
.vk
, surf_g
.vk
, tmp_surf_caps_g
.core
.supported_composite_alpha
);
772 LOG("0:MAIN:physical device:%p:surface:%p:supported_img_usage_flags=%#08x\n", surf_g
.dev
.phydev
.vk
, surf_g
.vk
, tmp_surf_caps_g
.core
.supported_img_usage_flags
);
775 static void swpchn_imgs_get(void)
779 surf_g
.dev
.swpchn
.imgs_n
= swpchn_imgs_n_max
;
780 vk_get_swpchn_imgs();
781 VK_FATAL("0:MAIN:FATAL:%d:device:%p:surface:%p:swapchain:%p:unable to get the swapchain images\n", r
, surf_g
.dev
.vk
, surf_g
.vk
, surf_g
.dev
.swpchn
.vk
)
782 LOG("0:MAIN:device:%p:surface:%p:swapchain:%p:got %u swapchain images\n", surf_g
.dev
.vk
, surf_g
.vk
, surf_g
.dev
.swpchn
.vk
, surf_g
.dev
.swpchn
.imgs_n
);
785 static void swpchn_init(void)
787 struct vk_swpchn_create_info_t info
;
791 memset(&info
, 0, sizeof(info
));
792 p
= &surf_g
.dev
.phydev
;
793 info
.type
= vk_struct_type_swpchn_create_info
;
794 info
.surf
= surf_g
.vk
;
795 info
.imgs_n_min
= tmp_surf_caps_g
.core
.imgs_n_min
;
796 info
.img_texel_mem_blk_fmt
= p
->selected_texel_mem_blk_conf_core
.fmt
;
797 info
.img_color_space
= p
->selected_texel_mem_blk_conf_core
.color_space
;
798 memcpy(&info
.img_extent
, &tmp_surf_caps_g
.core
.current_extent
,
799 sizeof(info
.img_extent
));
800 info
.img_layers_n
= 1;
801 info
.img_usage
= vk_img_usage_color_attachment_bit
802 | vk_img_usage_transfer_dst_bit
;
803 info
.img_sharing_mode
= vk_sharing_mode_exclusive
;
804 info
.pre_transform
= vk_surf_transform_identity_bit
;
805 info
.composite_alpha
= vk_composite_alpha_opaque_bit
;
806 info
.present_mode
= vk_present_mode_fifo
;
807 info
.clipped
= vk_true
;
808 vk_create_swpchn(&info
);
809 VK_FATAL("0:MAIN:FATAL:%d:device:%p:surface:%p:unable to create the initial swapchain\n", r
, surf_g
.dev
.vk
, surf_g
.vk
)
810 LOG("0:MAIN:device:%p:surface:%p:swapchain created %p\n", surf_g
.dev
.vk
, surf_g
.vk
, surf_g
.dev
.swpchn
.vk
);
813 static void tmp_present_modes_get(void)
817 tmp_present_modes_n_g
= tmp_present_modes_n_max
;
818 vk_get_phydev_surf_present_modes();
819 VK_FATAL("0:MAIN:FATAL:%d:physical device:%p:surface:%p:unable to get the physical device present mode for our surface\n", r
, surf_g
.dev
.phydev
.vk
, surf_g
.vk
)
822 static u8
*present_mode_to_str(u32 mode
)
825 case vk_present_mode_immediate
:
827 case vk_present_mode_mailbox
:
829 case vk_present_mode_fifo
:
831 case vk_present_mode_fifo_relaxed
:
832 return "fifo relaxed";
838 static void tmp_present_modes_dump(void)
843 LOG("0:MAIN:physical device:%p:surface:%p:%u present modes\n", surf_g
.dev
.phydev
.vk
, surf_g
.vk
, tmp_present_modes_n_g
);
845 if (i
== (u8
)tmp_present_modes_n_g
)
847 LOG("0:MAIN:physical device:%p:surface:%p:present mode=%s\n", surf_g
.dev
.phydev
.vk
, surf_g
.vk
, present_mode_to_str(tmp_present_modes_g
[i
]));
852 static void cpu_img_create(u8 i
)
854 struct vk_img_create_info_t info
;
857 memset(&info
, 0, sizeof(info
));
858 info
.type
= vk_struct_type_img_create_info
;
859 info
.flags
= vk_img_create_flag_2d_array_compatible_bit
;
860 info
.img_type
= vk_img_type_2d
;
861 info
.texel_mem_blk_fmt
= vk_texel_mem_blk_fmt_b8g8r8a8_unorm
;
862 info
.extent
.width
= APP_CPU_IMG_WIDTH
;
863 info
.extent
.height
= APP_CPU_IMG_HEIGHT
;
864 info
.extent
.depth
= 1;
866 info
.samples_n
= vk_samples_n_1_bit
;
867 info
.array_layers_n
= 1;
868 info
.img_tiling
= vk_img_tiling_linear
;
869 info
.usage
= vk_img_usage_transfer_src_bit
;
870 info
.initial_layout
= vk_img_layout_undefined
;
871 vk_create_img(&info
, &surf_g
.dev
.cpu_imgs
[i
].vk
);
872 VK_FATAL("0:MAIN:FATAL:%d:device:%p:unable to create swapchain cpu image %u\n", r
, surf_g
.dev
.vk
, i
)
873 LOG("0:MAIN:device:%p:swapchain cpu image %u created %p\n", surf_g
.dev
.vk
, i
, surf_g
.dev
.cpu_imgs
[i
].vk
);
876 static void cpu_imgs_create(void)
882 if (i
== surf_g
.dev
.swpchn
.imgs_n
)
889 static void img_mem_barrier_run_once(u8 i
, struct vk_img_mem_barrier_t
*b
)
892 struct vk_cb_begin_info_t begin_info
;
893 struct vk_submit_info_t submit_info
;
895 memset(&begin_info
, 0, sizeof(begin_info
));
896 begin_info
.type
= vk_struct_type_cb_begin_info
;
897 begin_info
.flags
= vk_cb_usage_one_time_submit_bit
;
898 vk_begin_cb(surf_g
.dev
.cbs
[i
], &begin_info
);
899 VK_FATAL("0:MAIN:FATAL:%d:command buffer:%p:unable to begin recording the initial layout transition command buffer\n", r
, surf_g
.dev
.cbs
[i
])
900 /*--------------------------------------------------------------------*/
901 vk_cmd_pl_barrier(app_surf
.dev
.cbs
[i
], b
);
902 /*--------------------------------------------------------------------*/
903 vk_end_cb(surf_g
.dev
.cbs
[i
]);
904 VK_FATAL("0:MAIN:FATAL:%d:command buffer:%p:unable to end recording of the initial layout transition command buffer\n", r
, surf_g
.dev
.cbs
[i
])
905 /*--------------------------------------------------------------------*/
906 memset(&submit_info
, 0, sizeof(submit_info
));
907 submit_info
.type
= vk_struct_type_submit_info
;
908 submit_info
.cbs_n
= 1;
909 submit_info
.cbs
= &surf_g
.dev
.cbs
[i
];
910 vk_q_submit(&submit_info
);
911 VK_FATAL("0:MAIN:FATAL:%d:queue:%p:unable to submit the initial layout transition command buffer\n", r
, surf_g
.dev
.q
)
912 /*--------------------------------------------------------------------*/
914 VK_FATAL("0:MAIN:FATAL:%d:queue:%p:unable to wait for idle or completion of initial layout transition command buffer\n", r
, surf_g
.dev
.q
)
915 /*--------------------------------------------------------------------*/
917 * since it is tagged to run once its state_g is invalid, we need to
918 * reset it to the initial state_g
920 vk_reset_cb(surf_g
.dev
.cbs
[i
]);
921 VK_FATAL("0:MAIN:FATAL:%d:command buffer:%p:unable to reset the initial layout transition command buffer\n", r
, surf_g
.dev
.cbs
[i
])
924 static void cpu_img_layout_to_general(u8 i
)
926 struct vk_img_mem_barrier_t b
;
927 struct vk_img_subrsrc_range_t
*r
;
929 memset(&b
, 0, sizeof(b
));
930 b
.type
= vk_struct_type_img_mem_barrier
;
931 b
.old_layout
= vk_img_layout_undefined
;
932 b
.new_layout
= vk_img_layout_general
;
933 b
.src_q_fam
= vk_q_fam_ignored
;
934 b
.dst_q_fam
= vk_q_fam_ignored
;
935 b
.img
= surf_g
.dev
.cpu_imgs
[i
].vk
;
936 r
= &b
.subrsrc_range
;
937 r
->aspect
= vk_img_aspect_color_bit
;
939 r
->array_layers_n
= 1;
940 img_mem_barrier_run_once(i
, &b
);
941 LOG("0:MAIN:cpu image:%p[%u]:transition to general layout successful\n", surf_g
.dev
.cpu_imgs
[i
].vk
, i
);
944 /* once in general layout, the dev sees the img */
945 static void cpu_imgs_layout_to_general(void)
951 if (i
== surf_g
.dev
.swpchn
.imgs_n
)
953 cpu_img_layout_to_general(i
);
958 static void tmp_cpu_img_mem_rqmts_get(u8 i
)
960 struct vk_img_mem_rqmts_info_t info
;
961 struct vk_mem_rqmts_t
*rqmts
;
964 memset(&info
, 0, sizeof(info
));
965 info
.type
= vk_struct_type_img_mem_rqmts_info
;
966 info
.img
= surf_g
.dev
.cpu_imgs
[i
].vk
;
967 rqmts
= &tmp_mem_rqmts_g
[i
];
968 memset(rqmts
, 0, sizeof(*rqmts
));
969 rqmts
->type
= vk_struct_type_mem_rqmts
;
970 vk_get_img_mem_rqmts(&info
, rqmts
);
971 VK_FATAL("0:MAIN:FATAL:%d:device:%p:unable to get memory requirements for cpu image %u\n", r
, surf_g
.dev
.vk
, i
)
972 LOG("0:MAIN:device:%p:cpu image %u core requirements are size=%lu bytes, alignment=%lu bytes, memory type=%#08x\n", surf_g
.dev
.vk
, i
, (long)rqmts
->core
.sz
, (long)rqmts
->core
.alignment
, rqmts
->core
.mem_type_bits
);
975 static void tmp_cpu_imgs_mem_rqmts_get(void)
981 if (i
== surf_g
.dev
.swpchn
.imgs_n
)
983 tmp_cpu_img_mem_rqmts_get(i
);
988 #define WANTED_MEM_PROPS (vk_mem_prop_host_visible_bit \
989 | vk_mem_prop_host_cached_bit)
990 #define IS_DEV_LOCAL(x) (((x)->prop_flags & vk_mem_prop_dev_local_bit) != 0)
991 static bool match_mem_type(u8 mem_type_idx
,
992 struct vk_mem_rqmts_t
*img_rqmts
, bool ignore_gpu_is_discret
)
994 struct vk_mem_type_t
*mem_type
;
996 /* first check this mem type is in our img rqmts */
997 if (((1 << mem_type_idx
) & img_rqmts
->core
.mem_type_bits
) == 0)
999 mem_type
= &surf_g
.dev
.phydev
.mem_types
[mem_type_idx
];
1000 if (!ignore_gpu_is_discret
)
1001 if (surf_g
.dev
.phydev
.is_discret_gpu
&& IS_DEV_LOCAL(mem_type
))
1003 if ((mem_type
->prop_flags
& WANTED_MEM_PROPS
) == WANTED_MEM_PROPS
)
1007 #undef WANTED_MEM_PROPS
1010 static bool try_alloc_cpu_img_dev_mem(u8 i
,
1011 struct vk_mem_rqmts_t
*img_rqmts
, u8 mem_type_idx
)
1013 struct vk_mem_alloc_info_t info
;
1016 memset(&info
, 0, sizeof(info
));
1017 info
.type
= vk_struct_type_mem_alloc_info
;
1018 info
.sz
= img_rqmts
->core
.sz
;
1019 info
.mem_type_idx
= mem_type_idx
;
1020 vk_alloc_mem(&info
, &surf_g
.dev
.cpu_imgs
[i
].dev_mem
);
1022 LOG("0:MAIN:WARNING:%d:device:%p:cpu image:%u:unable to allocate %lu bytes from physical dev %p memory type %u\n", r
, surf_g
.dev
.vk
, img_rqmts
->core
.sz
, surf_g
.dev
.phydev
.vk
, mem_type_idx
);
1025 LOG("0:MAIN:device:%p:physical device:%p:cpu image:%u:%lu bytes allocated from memory type %u\n", surf_g
.dev
.vk
, surf_g
.dev
.phydev
.vk
, i
, img_rqmts
->core
.sz
, mem_type_idx
);
1030 * we are looking for host visible and host cached mem. on discret gpu we would
1031 * like non dev local mem that in order to avoid wasting video ram. if we have
1032 * a discret gpu but could not find a mem type without dev local mem, let's
1033 * retry with only host visible and host cached mem.
1035 #define IGNORE_GPU_IS_DISCRET true
1036 static void cpu_img_dev_mem_alloc(u8 i
)
1038 struct vk_mem_rqmts_t
*img_rqmts
;
1041 img_rqmts
= &tmp_mem_rqmts_g
[i
];
1044 if (mem_type
== surf_g
.dev
.phydev
.mem_types_n
)
1046 if (match_mem_type(mem_type
, img_rqmts
,
1047 !IGNORE_GPU_IS_DISCRET
)) {
1048 if (try_alloc_cpu_img_dev_mem(i
, img_rqmts
,
1054 if (!surf_g
.dev
.phydev
.is_discret_gpu
)
1055 FATAL("0:MAIN:FATAL:physical device:%p:cpu image:%u:unable to find proper memory type or to allocate memory\n", surf_g
.dev
.phydev
.vk
, i
)
1057 * lookup again, but relax the match based on discret gpu constraint for
1062 if (mem_type
== surf_g
.dev
.phydev
.mem_types_n
)
1064 if (match_mem_type(mem_type
, img_rqmts
, IGNORE_GPU_IS_DISCRET
)
1065 && try_alloc_cpu_img_dev_mem(i
, img_rqmts
, mem_type
))
1069 FATAL("0:MAIN:FATAL:physical device:%p:cpu image:%u:unable to find proper memory type or to allocate memory\n", surf_g
.dev
.phydev
.vk
, i
)
1071 #undef IGNORE_GPU_IS_DISCRET
1073 static void cpu_imgs_dev_mem_alloc(void)
1079 if (i
== surf_g
.dev
.swpchn
.imgs_n
)
1081 cpu_img_dev_mem_alloc(i
);
1086 static void cpu_imgs_dev_mem_bind(void)
1088 struct vk_bind_img_mem_info_t infos
[swpchn_imgs_n_max
];
1092 memset(&infos
, 0, sizeof(infos
[0]) * surf_g
.dev
.swpchn
.imgs_n
);
1095 if (i
== surf_g
.dev
.swpchn
.imgs_n
)
1097 infos
[i
].type
= vk_struct_type_bind_img_mem_info
;
1098 infos
[i
].img
= surf_g
.dev
.cpu_imgs
[i
].vk
;
1099 infos
[i
].mem
= surf_g
.dev
.cpu_imgs
[i
].dev_mem
;
1102 vk_bind_img_mem(infos
);
1103 VK_FATAL("0:MAIN:FATAL:%d:device:%p:cpu images:unable to bind device memory to images\n", r
, surf_g
.dev
.vk
)
1104 LOG("0:MAIN:device:%p:cpu images:bound device memory to images\n", surf_g
.dev
.vk
);
1107 static void cpu_imgs_dev_mem_map(void)
1115 if (i
== surf_g
.dev
.swpchn
.imgs_n
)
1117 vk_map_mem(surf_g
.dev
.cpu_imgs
[i
].dev_mem
,
1118 &surf_g
.dev
.cpu_imgs
[i
].data
);
1119 VK_FATAL("0:MAIN:FATAL:%d:device:%p:cpu image:%u:unable to map image memory\n", r
, surf_g
.dev
.vk
, i
)
1120 LOG("0:MAIN:device:%p:cpu image:%u:image memory mapped\n", surf_g
.dev
.vk
, i
);
1125 static void cpu_img_subrsrc_layout_get(u8 i
)
1127 struct vk_img_subrsrc_t s
;
1129 memset(&s
, 0, sizeof(s
));
1130 /* 1 subrsrc = uniq color plane of mip lvl 0 and array 0 */
1131 s
.aspect
= vk_img_aspect_color_bit
;
1132 vk_get_img_subrsrc_layout(surf_g
.dev
.cpu_imgs
[i
].vk
, &s
,
1133 &surf_g
.dev
.cpu_imgs
[i
].layout
);
1134 LOG("0:MAIN:device:%p:cpu image:%u:layout:offset=%lu bytes size=%lu bytes row_pitch=%lu bytes array_pitch=%lu bytes depth_pitch=%lu bytes\n", surf_g
.dev
.vk
, i
, surf_g
.dev
.cpu_imgs
[i
].layout
.offset
, surf_g
.dev
.cpu_imgs
[i
].layout
.sz
, surf_g
.dev
.cpu_imgs
[i
].layout
.row_pitch
, surf_g
.dev
.cpu_imgs
[i
].layout
.array_pitch
, surf_g
.dev
.cpu_imgs
[i
].layout
.depth_pitch
);
1137 static void cpu_imgs_subrsrc_layout_get(void)
1143 if (i
== surf_g
.dev
.swpchn
.imgs_n
)
1145 cpu_img_subrsrc_layout_get(i
);
1150 static void sems_create(void)
1153 struct vk_sem_create_info_t info
;
1160 memset(&info
, 0, sizeof(info
));
1161 info
.type
= vk_struct_type_sem_create_info
;
1162 vk_create_sem(&info
, &surf_g
.dev
.sems
[sem
]);
1163 VK_FATAL("0:MAIN:FATAL:%d:device:%p:unable to create a semaphore %u for our swapchain\n", r
, surf_g
.dev
.vk
, sem
)
1164 LOG("0:MAIN:device:%p:semaphore %u for our swapchain created %p\n", surf_g
.dev
.vk
, sem
, surf_g
.dev
.sems
[sem
]);
1169 static void cbs_create(void)
1172 struct vk_cb_alloc_info_t alloc_info
;
1174 memset(&alloc_info
, 0, sizeof(alloc_info
));
1175 alloc_info
.type
= vk_struct_type_cb_alloc_info
;
1176 alloc_info
.cp
= surf_g
.dev
.cp
;
1177 alloc_info
.lvl
= vk_cb_lvl_primary
;
1178 alloc_info
.cbs_n
= surf_g
.dev
.swpchn
.imgs_n
;
1179 vk_alloc_cbs(&alloc_info
);
1180 VK_FATAL("0:MAIN:FATAL:%d:device:%p:unable to allocate command buffers for our swapchain images from %p command pool\n", r
, surf_g
.dev
.vk
, surf_g
.dev
.cp
)
1181 LOG("0:MAIN:device:%p:allocated %u command buffers for our swapchain images from %p command pool\n", surf_g
.dev
.vk
, surf_g
.dev
.swpchn
.imgs_n
, surf_g
.dev
.cp
);
1184 static void cb_rec(u8 i
)
1187 struct vk_cb_begin_info_t begin_info
;
1188 struct vk_img_mem_barrier_t b
;
1189 struct vk_img_blit_t region
;
1190 /*--------------------------------------------------------------------*/
1191 memset(&begin_info
, 0, sizeof(begin_info
));
1192 begin_info
.type
= vk_struct_type_cb_begin_info
;
1193 vk_begin_cb(surf_g
.dev
.cbs
[i
], &begin_info
);
1194 VK_FATAL("0:MAIN:FATAL:%d:swapchain img:%u:command buffer:%p:unable to begin recording\n", r
, i
, surf_g
.dev
.cbs
[i
])
1195 /*--------------------------------------------------------------------*/
1196 /* acquired img (undefined layout) to presentation layout */
1197 memset(&b
, 0, sizeof(b
));
1198 b
.type
= vk_struct_type_img_mem_barrier
;
1199 b
.old_layout
= vk_img_layout_undefined
;
1200 b
.new_layout
= vk_img_layout_present
;
1201 b
.src_q_fam
= vk_q_fam_ignored
;
1202 b
.dst_q_fam
= vk_q_fam_ignored
;
1203 b
.img
= surf_g
.dev
.swpchn
.imgs
[i
];
1204 b
.subrsrc_range
.aspect
= vk_img_aspect_color_bit
;
1205 b
.subrsrc_range
.lvls_n
= 1;
1206 b
.subrsrc_range
.array_layers_n
= 1;
1207 vk_cmd_pl_barrier(surf_g
.dev
.cbs
[i
], &b
);
1208 /*--------------------------------------------------------------------*/
1209 /* blit from cpu img to pe img */
1210 memset(®ion
, 0, sizeof(region
));
1211 region
.src_subrsrc
.aspect
= vk_img_aspect_color_bit
;
1212 region
.src_subrsrc
.array_layers_n
= 1;
1213 region
.src_offsets
[1].x
= APP_CPU_IMG_WIDTH
;
1214 region
.src_offsets
[1].y
= APP_CPU_IMG_HEIGHT
;
1215 region
.dst_subrsrc
.aspect
= vk_img_aspect_color_bit
;
1216 region
.dst_subrsrc
.array_layers_n
= 1;
1217 /* XXX: it is a scaling blit: you can use APP_WIN_WIDTH/APP_WIN_HEIGHT */
1218 region
.dst_offsets
[1].x
= APP_CPU_IMG_WIDTH
;
1219 region
.dst_offsets
[1].y
= APP_CPU_IMG_HEIGHT
;
1220 vk_cmd_blit_img(surf_g
.dev
.cbs
[i
], surf_g
.dev
.cpu_imgs
[i
].vk
,
1221 surf_g
.dev
.swpchn
.imgs
[i
], ®ion
);
1222 /*--------------------------------------------------------------------*/
1223 vk_end_cb(surf_g
.dev
.cbs
[i
]);
1224 VK_FATAL("0:MAIN:FATAL:%d:swapchain img:%u:command buffer:%p:unable to end recording\n", r
, i
, surf_g
.dev
.cbs
[i
])
1227 static void cbs_rec(void)
1233 if (i
== surf_g
.dev
.swpchn
.imgs_n
)
1240 static void phydev_init(void)
1243 /*--------------------------------------------------------------------*/
1244 tmp_phydevs_exts_dump();
1245 tmp_phydevs_props_dump();
1246 tmp_phydevs_mem_props_get();
1247 tmp_phydevs_mem_props_dump();
1248 /*--------------------------------------------------------------------*/
1249 tmp_phydevs_q_fams_get();
1250 tmp_phydevs_q_fams_dump();
1251 tmp_phydevs_q_fams_surf_support_get();
1252 /*--------------------------------------------------------------------*/
1253 tmp_phydev_and_q_fam_select();
1254 /*--------------------------------------------------------------------*/
1255 texel_mem_blk_conf_select();
1256 /*--------------------------------------------------------------------*/
1257 tmp_surf_caps_get();
1258 tmp_surf_caps_dump();
1259 /*--------------------------------------------------------------------*/
1260 tmp_present_modes_get();
1261 tmp_present_modes_dump();
1264 static void dev_init(void)
1267 /*--------------------------------------------------------------------*/
1274 static void surf_init(void)
1280 /* our cpu imgs for swpchn imgs */
1284 cpu_imgs_layout_to_general();
1285 cpu_imgs_subrsrc_layout_get();
1286 tmp_cpu_imgs_mem_rqmts_get();
1287 cpu_imgs_dev_mem_alloc();
1288 cpu_imgs_dev_mem_bind();
1289 cpu_imgs_dev_mem_map();
1293 static void init_vk(void)
1297 instance_static_syms();
1299 instance_exts_dump();
1300 instance_layers_dump();
1301 /*--------------------------------------------------------------------*/
1304 /*--------------------------------------------------------------------*/
1308 static void swpchn_acquire_next_img(u32
*i
)
1310 struct vk_acquire_next_img_info_t info
;
1313 memset(&info
, 0, sizeof(info
));
1314 info
.type
= vk_struct_type_acquire_next_img_info
;
1315 info
.swpchn
= surf_g
.dev
.swpchn
.vk
;
1316 info
.timeout
= u64_max
; /* infinite */
1317 info
.devs
= 0x00000001; /* no device group then 1 */
1318 info
.sem
= surf_g
.dev
.sems
[sem_acquire_img_done
];
1319 vk_acquire_next_img(&info
, i
);
1320 VK_FATAL("0:MAIN:FATAL:%d:device:%p:unable to acquire next image from swapchain %p\n", r
, surf_g
.dev
.vk
, surf_g
.dev
.swpchn
.vk
)
1321 LOG("0:MAIN:device:%p:swapchain:%p:acquired image %u\n", surf_g
.dev
.vk
, surf_g
.dev
.swpchn
.vk
, *i
);
1325 static void cpu_img_draw(u8 i
)
1331 texel
= (u32
*)surf_g
.dev
.cpu_imgs
[i
].data
;
1334 if (row
== APP_CPU_IMG_HEIGHT
)
1338 struct vk_subrsrc_layout_t
*l
;
1339 u64 o
; /* _byte_ offset */
1340 u64 o_w
; /* _32 bits_ word offset */
1342 if (col
== APP_CPU_IMG_WIDTH
)
1344 l
= &surf_g
.dev
.cpu_imgs
[i
].layout
;
1345 o
= row
* l
->row_pitch
+ col
* sizeof(*texel
);
1347 texel
[o_w
] = fill_texel_g
;
1354 static void cpu_img_to_pe(u8 i
)
1357 struct vk_submit_info_t submit_info
;
1358 struct vk_present_info_t present_info
;
1362 memset(&submit_info
, 0, sizeof(submit_info
));
1363 submit_info
.type
= vk_struct_type_submit_info
;
1364 submit_info
.wait_sems_n
= 1;
1365 submit_info
.wait_sems
= &surf_g
.dev
.sems
[sem_acquire_img_done
];
1366 wait_dst_stage
= vk_pl_stage_bottom_of_pipe_bit
;
1367 submit_info
.wait_dst_stages
= &wait_dst_stage
;
1368 submit_info
.cbs_n
= 1;
1369 submit_info
.cbs
= &surf_g
.dev
.cbs
[i
];
1370 submit_info
.signal_sems_n
= 1;
1371 submit_info
.signal_sems
= &surf_g
.dev
.sems
[app_sem_blit_done
];
1372 LOG("MAIN:queue:%p\n", surf_g
.dev
.q
);
1374 vk_q_submit(&submit_info
);
1375 VK_FATAL("0:MAIN:FATAL:%d:queue:%p:unable to submit the image pre-recorded command buffer\n", r
, surf_g
.dev
.q
)
1376 /*--------------------------------------------------------------------*/
1378 memset(&present_info
, 0, sizeof(present_info
));
1379 present_info
.type
= vk_struct_type_present_info
;
1380 present_info
.wait_sems_n
= 1;
1381 present_info
.wait_sems
= &surf_g
.dev
.sems
[sem_blit_done
];
1382 present_info
.swpchns_n
= 1;
1383 present_info
.swpchns
= &surf_g
.dev
.swpchn
.vk
;
1384 present_info
.idxs
= idxs
;
1385 present_info
.results
= 0;
1386 vk_q_present(&present_info
);
1387 VK_FATAL("0:MAIN:FATAL:%d:queue:%p:unable to submit the image %u to the presentation engine\n", r
, surf_g
.dev
.q
, i
)
1390 static void render(void)
1394 swpchn_acquire_next_img(&i
);
1395 cpu_img_draw(i
); /* cpu rendering */
1397 do_render_g
= false;
1398 if (fill_texel_g
== 0x0000ff00)
1399 fill_texel_g
= 0x00ff0000;
1401 fill_texel_g
= 0x0000ff00;
1404 static void run(void)
1406 state_g
= state_run
;
1408 xcb_generic_event_t
*e
;
1410 do_render_g
= false;
1411 /* "evts which could lead to change what we display" */
1412 e
= dl_xcb_wait_for_event(app_xcb
.c
);
1413 if (e
== 0) { /* i/o err */
1414 LOG("0:MAIN:xcb:'%s':connection:%p:event:input/output error | x11 server connection lost\n", app_xcb
.disp_env
, app_xcb
.c
);
1417 loop
{ /* drain evts */
1418 app_xcb_evt_handle(e
);
1420 if (state_g
== state_quit
)
1422 e
= dl_xcb_poll_for_event(app_xcb
.c
);
1426 /* synchronous rendering */
1434 LOG("0:starting app\n");
1437 fill_texel_g
= 0x0000ff00;
1439 LOG("0:exiting app\n");
1444 /*---------------------------------------------------------------------------*/
1446 #include "namespace/app.c"
1447 #include "namespace/vk_syms.c"
1448 #include "namespace/app_state_types.h"
1449 #include "namespace/app_state.c"
1451 /*---------------------------------------------------------------------------*/