4 * this is public domain without any warranties of any kind
7 /* XXX: KEEP AN EYE ON ABBREVIATIONS, ALWAYS */
9 * XXX vk abstraction is much more complex than real hardware, aka the cost of
10 * most software generalisation/abstraction (and some hardware has lost its
11 * way...). Better know some real hardware programming (for instance AMD
12 * open gpu) and keep that in mind while dealing with vk.
13 * since it's complex, have a "safe mode", kind of minimal use of vk. doing
14 * fancy stuff above this "safe mode" must be validated by hardware vendors
15 * then the user... or you literally walking on eggs.
17 * The vulkan API is, here, simplified and taylored for the app using the C
18 * preprocessor. In other words, when there is no ambiguity in the context of
19 * this code, vulkan API function parameters get simplified out in C
20 * preprocessor macros.
22 * XXX: this is a "One Compilation Unit" source code with preprocessor
23 * namespace support. this allow the project to grow to very large and keep the
24 * global identifier space in check (= tradeoff). Each source file, *.h *.c,
25 * should compile without errors.
27 * XXX: you may have to track the dependencies of vk objs that in order to be
28 * able to deal with any of their brutal state changes:
29 * - a dev can be lost (i.e. power management evt or bad hardware)
30 * - a surf can be lost (i.e. power management evt or something went
32 * - a swpchn can become out-of-date. for instance the win system did resz
33 * the parent surf of the swpchn... if you did allow it to happen
35 * here we choose that any of those evts will be fatal for simplicity. for
36 * instance, if you choose to support the swpchn out-of-date state, if your
37 * rendering state was too much "pre-configured", you would have to
38 * "re-pre-configure" everything... or you should drop "pre-configuring" and
39 * program everything *again* for each swpchn img you draw.
41 * display programing is demonstrated "officially" in khronos vk cube.c and
42 * there is a tutorial slide "1-Vulkan-Tutorial.pdf" (just abstract away c++
43 * cr*p) which is "the programming manual" on top of specs. this code is
45 * - only 1 "main" synchronous loop
46 * - only xcb wsi. xcb is a client library on top of the x11 protocol.
47 * we know wayland ("x12") should be added.
48 * - dynamic loading of xcb.
49 * - no need of vk headers (using directly the ABI with custom headers).
51 * WARNING: vk core q fam props discovery is not used for the discovery of
52 * q fams able to support disp, aka wsi. This is the case because disp
53 * handling (wsi), is done as an ext and is not core (vk can be use without
56 * a phydev must have a q with gfx and compute. additionally, any q with gfx or
57 * compute does implicitely support transfer. basically, it is possible to have
58 * qs with only transfer support, and we are guaranteed to have a q with gfx
59 * and compute and transfer support. Keep in mind that many vk resources must
60 * pay the cost of transfering from 1 q fam to another q fam: then think twice
61 * on how you want to spread your work load on the q fams.
63 * for proper keyboard support, joypad way or/and text input way, read the
64 * included KEYBOARD file. here, since we use basic layout independent standard
65 * keys, the x11 core keyboard protocol is fairly enough.
67 * TODO: use as less as possible device memory object, namely try to allocate
68 * one big chunk and manage alignment constraint ourself. vk api does provide
69 * a way to query for the memory alignment constraints.
74 #include "app_core_types.h"
75 #include "nyanvk/consts.h"
76 #include "nyanvk/types.h"
78 #include "app_state_types.h"
81 #include "app_state.c"
83 /*---------------------------------------------------------------------------*/
84 #include "namespace/app.c"
85 #include "namespace/vk_syms.c"
86 #include "namespace/app_state_types.h"
87 #include "namespace/app_state.c"
88 /*---------------------------------------------------------------------------*/
89 #define VK_FATAL(fmt, ...) \
91 LOG(fmt, ##__VA_ARGS__);\
95 #define FATAL(fmt, ...) \
97 LOG(fmt, ##__VA_ARGS__);\
100 /* the phydev q fam selected */
101 static void dev_create(void)
103 struct vk_dev_create_info_t info
;
104 struct vk_dev_q_create_info_t q_info
;
106 static u8
*exts
[] = {
108 "VK_KHR_bind_memory2",
110 "VK_KHR_get_memory_requirements2",
115 memset(&info
, 0, sizeof(info
));
116 memset(&q_info
, 0, sizeof(q_info
));
117 /*--------------------------------------------------------------------*/
118 q_info
.type
= vk_struct_type_dev_q_create_info
;
119 q_info
.q_fam
= surf_g
.dev
.phydev
.q_fam
;
121 q_info
.q_prios
= &q_prio
;
123 /*--------------------------------------------------------------------*/
124 info
.type
= vk_struct_type_dev_create_info
;
125 info
.q_create_infos_n
= 1;
126 info
.q_create_infos
= &q_info
;
127 info
.enabled_exts_n
= ARRAY_N(exts
);
128 info
.enabled_ext_names
= exts
;
129 vk_create_dev(&info
);
130 VK_FATAL("0:MAIN:FATAL:%d:physical device:%p:unable to create a vulkan device\n", r
, surf_g
.dev
.phydev
.vk
)
131 LOG("0:MAIN:physical device:%p:vulkan device created with one proper queue:%p\n", surf_g
.dev
.phydev
.vk
, surf_g
.dev
.vk
);
134 static void instance_create(void)
137 struct vk_instance_create_info_t info
;
138 static u8
*exts
[] = {
140 * XXX: not 1.1 promoted, should not use it, but it is fixing
141 * some non-consistency from 1.0
143 "VK_KHR_get_surface_capabilities2",
145 "VK_KHR_get_physical_device_properties2",
146 "VK_KHR_xcb_surface",
152 if (i
== ARRAY_N(exts
))
154 LOG("0:MAIN:will use vulkan instance_g extension %s\n", exts
[i
]);
157 memset(&info
, 0, sizeof(info
));
158 info
.type
= vk_struct_type_instance_create_info
;
159 info
.enabled_exts_n
= ARRAY_N(exts
);
160 info
.enabled_ext_names
= exts
;
161 vk_create_instance(&info
);
162 VK_FATAL("0:MAIN:FATAL:%d:unable to create a vulkan instance_g\n", r
)
163 LOG("0:MAIN:vulkan instance_g handle %p\n", instance_g
);
166 /* in theory, this could change on the fly */
167 static void instance_exts_dump(void)
169 #define EXTS_N_MAX 512
170 struct vk_ext_props_t exts
[EXTS_N_MAX
];
174 memset(exts
, 0, sizeof(exts
));
176 vk_enumerate_instance_ext_props(&n
, exts
);
177 if (r
!= vk_success
&& r
!= vk_incomplete
) {
178 LOG("0:MAIN:ERROR:%d:unable to enumerate instance_g extension(s)\n", r
);
181 if (r
== vk_incomplete
) {
182 LOG("0:MAIN:ERROR:too many extensions (%u/%u), dumping disabled", n
, EXTS_N_MAX
);
186 LOG("0:MAIN:have %u instance_g extension(s)\n", n
);
190 LOG("0:MAIN:instance_g extension:name=%s:specification version=%u\n", exts
[n
- 1].name
, exts
[n
- 1].spec_version
);
196 /* in theory, this could change on the fly */
197 static void instance_layers_dump(void)
199 #define LAYERS_N_MAX 32
200 struct vk_layer_props_t layers
[LAYERS_N_MAX
];
204 memset(layers
, 0, sizeof(layers
));
206 vk_enumerate_instance_layer_props(&n
, layers
);
207 if (r
!= vk_success
&& r
!= vk_incomplete
) {
208 LOG("0:MAIN:ERROR:%d:unable to enumerate instance_g layer(s)\n", r
);
211 if (r
== vk_incomplete
) {
212 LOG("0:MAIN:ERROR:too many layers (%u/%u), dumping disabled", n
, LAYERS_N_MAX
);
216 LOG("0:MAIN:have %u instance_g layer(s)\n", n
);
220 LOG("0:MAIN:instance_g layer:%u:name=%s:specification version=%u:implementation version=%u:description=%s\n", n
, layers
[n
].name
, layers
[n
].spec_version
, layers
[n
].implementation_version
, layers
[n
].desc
);
226 static void tmp_phydevs_get(void)
228 void *phydevs
[tmp_phydevs_n_max
];
232 memset(phydevs
, 0, sizeof(phydevs
));
233 n
= tmp_phydevs_n_max
;
234 vk_enumerate_phydevs(&n
, phydevs
);
235 if (r
!= vk_success
&& r
!= vk_incomplete
)
236 FATAL("0:MAIN:FATAL:%ld:unable to enumerate physical devices\n",r
)
237 if (r
== vk_incomplete
)
238 FATAL("0:MAIN:FATAL:too many vulkan physical devices %u/%u for our temporary storage\n", n
, tmp_phydevs_n_max
)
240 LOG("0:MAIN:detected %u physical devices\n", n
);
242 FATAL("0:MAIN:no vulkan physical devices, exiting\n")
244 memset(tmp_phydevs_g
, 0, sizeof(tmp_phydevs_g
));
247 if (n
== tmp_phydevs_n_g
)
249 tmp_phydevs_g
[n
].vk
= phydevs
[n
];
254 static void phydev_exts_dump(void *phydev
)
256 #define EXTS_N_MAX 512
257 struct vk_ext_props_t exts
[EXTS_N_MAX
];
261 memset(exts
, 0, sizeof(exts
));
263 vk_enumerate_dev_ext_props(phydev
, &n
, exts
);
264 if (r
!= vk_success
&& r
!= vk_incomplete
) {
265 LOG("0:MAIN:ERROR:physical device:%p:%d:unable to enumerate device extension(s)\n", phydev
, r
);
268 if (r
== vk_incomplete
) {
269 LOG("0:MAIN:ERROR:physical device:%p:too many extensions (%u/%u), dumping disabled", phydev
, n
, EXTS_N_MAX
);
273 LOG("0:MAIN:physical device:%p:have %u device extension(s)\n", phydev
, n
);
277 LOG("0:MAIN:physical device:%p:device extension:name=%s:specification version=%u\n", phydev
, exts
[n
- 1].name
, exts
[n
- 1].spec_version
);
283 static void tmp_phydevs_exts_dump(void)
289 if (i
== tmp_phydevs_n_g
)
291 phydev_exts_dump(tmp_phydevs_g
[i
].vk
);
296 static u8
*dev_type_str(u32 type
)
299 case vk_phydev_type_other
:
301 case vk_phydev_type_integrated_gpu
:
302 return "integrated gpu";
303 case vk_phydev_type_discrete_gpu
:
304 return "discrete gpu";
305 case vk_phydev_type_virtual_gpu
:
306 return "virtual gpu";
307 case vk_phydev_type_cpu
:
314 static u8
*uuid_str(u8
*uuid
)
316 static u8 uuid_str
[VK_UUID_SZ
* 2 + 1];
319 memset(uuid_str
, 0, sizeof(uuid_str
));
324 /* XXX: always write a terminating 0, truncated or not */
325 snprintf(uuid_str
+ i
* 2, 3, "%02x", uuid
[i
]);
331 static void tmp_phydevs_props_dump(void)
337 struct vk_phydev_props_t props
;
338 struct tmp_phydev_t
*p
;
340 if (i
== tmp_phydevs_n_g
)
342 p
= &tmp_phydevs_g
[i
];
343 memset(&props
, 0, sizeof(props
));
344 props
.type
= vk_struct_type_phydev_props
;
345 vk_get_phydev_props(p
->vk
, &props
);
346 LOG("0:MAIN:physical device:%p:properties:api version=%#x=%u.%u.%u\n", p
->vk
, props
.core
.api_version
, VK_VERSION_MAJOR(props
.core
.api_version
), VK_VERSION_MINOR(props
.core
.api_version
), VK_VERSION_PATCH(props
.core
.api_version
));
347 LOG("0:MAIN:physical device:%p:properties:driver version=%#x=%u.%u.%u\n", p
->vk
, props
.core
.driver_version
, VK_VERSION_MAJOR(props
.core
.driver_version
), VK_VERSION_MINOR(props
.core
.driver_version
), VK_VERSION_PATCH(props
.core
.driver_version
));
348 LOG("0:MAIN:physical device:%p:properties:vendor id=%#x\n", p
->vk
, props
.core
.vendor_id
);
349 LOG("0:MAIN:physical device:%p:properties:device id=%#x\n", p
->vk
, props
.core
.dev_id
);
350 LOG("0:MAIN:physical device:%p:properties:type=%s\n", p
->vk
, dev_type_str(props
.core
.dev_type
));
351 if (props
.core
.dev_type
== vk_phydev_type_discrete_gpu
)
352 p
->is_discret_gpu
= true;
354 p
->is_discret_gpu
= false;
355 LOG("0:MAIN:physical device:%p:properties:name=%s\n", p
->vk
, props
.core
.name
);
356 LOG("0:MAIN:physical device:%p:properties:pipeline cache uuid=%s\n", p
->vk
, uuid_str(props
.core
.pl_cache_uuid
));
357 /* display the limits and sparse props at log level 1, if needed */
362 static void tmp_phydev_q_fams_get(struct tmp_phydev_t
*p
)
368 vk_get_phydev_q_fam_props(p
->vk
, &n
, 0);
369 if (n
> tmp_phydev_q_fams_n_max
)
370 FATAL("0:MAIN:FATAL:physical device:%p:too many queue families %u/%u\n", p
->vk
, n
, tmp_phydev_q_fams_n_max
)
371 memset(p
->q_fams
, 0, sizeof(p
->q_fams
));
374 if (i
== tmp_phydev_q_fams_n_max
)
376 p
->q_fams
[i
].type
= vk_struct_type_q_fam_props
;
379 vk_get_phydev_q_fam_props(p
->vk
, &n
, p
->q_fams
);
381 LOG("0:MAIN:physical device:%p:have %u queue families\n", p
->vk
, p
->q_fams_n
);
384 static void tmp_phydevs_q_fams_get(void)
390 if (i
== tmp_phydevs_n_g
)
392 tmp_phydev_q_fams_get(&tmp_phydevs_g
[i
]);
397 static void tmp_phydev_q_fams_dump(struct tmp_phydev_t
*p
)
403 if (i
== p
->q_fams_n
)
405 if ((p
->q_fams
[i
].core
.flags
& vk_q_gfx_bit
) != 0)
406 LOG("0:MAIN:physical device:%p:queue family:%u:flags:graphics\n", p
->vk
, i
);
407 if ((p
->q_fams
[i
].core
.flags
& vk_q_compute_bit
) != 0)
408 LOG("0:MAIN:physical device:%p:queue family:%u:flags:compute\n", p
->vk
, i
);
409 if ((p
->q_fams
[i
].core
.flags
& vk_q_transfer_bit
) != 0)
410 LOG("0:MAIN:physical device:%p:queue family:%u:flags:transfer\n", p
->vk
, i
);
411 if ((p
->q_fams
[i
].core
.flags
& vk_q_sparse_binding_bit
) != 0)
412 LOG("0:MAIN:physical device:%p:queue family:%u:flags:sparse binding\n", p
->vk
, i
);
413 if ((p
->q_fams
[i
].core
.flags
& vk_q_protected_bit
) != 0)
414 LOG("0:MAIN:physical device:%p:queue family:%u:flags:protected\n", p
->vk
, i
);
415 LOG("0:MAIN:physical device:%p:queue family:%u:%u queues\n", p
->vk
, i
, p
->q_fams
[i
].core
.qs_n
);
416 LOG("0:MAIN:physical device:%p:queue family:%u:%u bits timestamps\n", p
->vk
, i
, p
->q_fams
[i
].core
.timestamp_valid_bits
);
417 LOG("0:MAIN:physical device:%p:queue family:%u:(width=%u,height=%u,depth=%u) minimum image transfer granularity\n", p
->vk
, i
, p
->q_fams
[i
].core
.min_img_transfer_granularity
.width
, p
->q_fams
[i
].core
.min_img_transfer_granularity
.height
, p
->q_fams
[i
].core
.min_img_transfer_granularity
.depth
);
422 static void cp_create(void)
425 struct vk_cp_create_info_t info
;
427 memset(&info
, 0, sizeof(info
));
428 info
.type
= vk_struct_type_cp_create_info
;
429 info
.flags
= vk_cp_create_reset_cb_bit
;
430 info
.q_fam
= surf_g
.dev
.phydev
.q_fam
;
432 VK_FATAL("0:MAIN:FATAL:%d:unable create the commmand pool\n", r
)
433 LOG("0:MAIN:device:%p:queue family:%u:created command pool %p\n", surf_g
.dev
.vk
, surf_g
.dev
.phydev
.q_fam
, surf_g
.dev
.cp
);
436 static void tmp_phydevs_q_fams_dump(void)
442 if (i
== tmp_phydevs_n_g
)
444 tmp_phydev_q_fams_dump(&tmp_phydevs_g
[i
]);
449 static void q_get(void)
451 LOG("0:MAIN:device:%p:getting queue:family=%u queue=0\n", surf_g
.dev
.vk
, surf_g
.dev
.phydev
.q_fam
);
453 LOG("0:MAIN:device:%p:got queue:%p\n", surf_g
.dev
.vk
, surf_g
.dev
.q
);
456 static void check_vk_version(void)
461 vk_enumerate_instance_version(&api_version
);
463 FATAL("0:MAIN:FATAL:%d:unable to enumerate instance_g version\n", r
)
464 LOG("0:MAIN:vulkan instance_g version %#x = %u.%u.%u\n", api_version
, VK_VERSION_MAJOR(api_version
), VK_VERSION_MINOR(api_version
), VK_VERSION_PATCH(api_version
));
465 if (VK_VERSION_MAJOR(api_version
) == 1
466 && VK_VERSION_MINOR(api_version
) == 0)
467 FATAL("0:MAIN:FATAL:vulkan instance_g version too old\n")
470 * the major obj to use in vk abstraction of gfx hardware is the q. In this
471 * abstraction, many core objs like bufs/imgs are "own" by a specific q, and
472 * transfer of such ownership to other qs can be expensive. we know it's not
473 * really the case on AMD hardware, but if vk abstraction insists on this, it
474 * probably means it is important on some hardware of other vendors.
476 static void tmp_phydevs_q_fams_surf_support_get(void)
482 struct tmp_phydev_t
*p
;
485 if (i
== tmp_phydevs_n_g
)
487 p
= &tmp_phydevs_g
[i
];
493 if (j
== p
->q_fams_n
)
495 supported
= vk_false
;
496 vk_get_phydev_surf_support(p
->vk
, j
, &supported
);
497 VK_FATAL("0:MAIN:FATAL:%d:physical device:%p:queue family:%u:surface:%p:unable to query queue family wsi/(image presentation to our surface) support\n", r
, p
->vk
, j
, surf_g
.vk
)
498 if (supported
== vk_true
) {
499 LOG("0:MAIN:physical device:%p:queue family:%u:surface:%p:does support wsi/(image presentation to our surface) \n", p
->vk
, j
, surf_g
.vk
);
500 p
->q_fams_surf_support
[j
] = true;
502 LOG("0:MAIN:physical device:%p:queue family:%u:surface:%p:does not support wsi/(image presentation to our surface)\n", p
->vk
, j
, surf_g
.vk
);
503 p
->q_fams_surf_support
[j
] = false;
511 static void tmp_selected_phydev_cherry_pick(u8 i
)
513 struct tmp_phydev_t
*p
;
515 p
= &tmp_phydevs_g
[i
];
516 surf_g
.dev
.phydev
.vk
= p
->vk
;
517 surf_g
.dev
.phydev
.is_discret_gpu
= p
->is_discret_gpu
;
518 surf_g
.dev
.phydev
.mem_types_n
= p
->mem_props
.core
.mem_types_n
;
519 memcpy(surf_g
.dev
.phydev
.mem_types
, p
->mem_props
.core
.mem_types
,
520 sizeof(surf_g
.dev
.phydev
.mem_types
));
524 * we ask qs of phydevs which one is able to present imgs to the
525 * external pe surf_g. Additionally we require this q to support gfx. we
526 * select basically the first q from the first phydev fitting what we are
529 static void tmp_phydev_and_q_fam_select(void)
536 struct tmp_phydev_t
*p
;
538 if (i
== tmp_phydevs_n_g
)
540 p
= &tmp_phydevs_g
[i
];
543 if (j
== p
->q_fams_n
)
546 * we are looking for a q fam with:
547 * - img presentation to our surf_g
549 * - transfer (implicit with gfx)
551 if (p
->q_fams_surf_support
[j
]
552 && (p
->q_fams
[j
].core
.flags
& vk_q_gfx_bit
)
554 surf_g
.dev
.phydev
.q_fam
= j
;
555 tmp_selected_phydev_cherry_pick(i
);
556 LOG("0:MAIN:physical device %p selected for (wsi/image presentation to our surface %p) using its queue family %u\n", surf_g
.dev
.phydev
.vk
, surf_g
.vk
, surf_g
.dev
.phydev
.q_fam
);
566 * XXX: the surf_g is an obj at the instance_g lvl, NOT THE [PHYSICAL]
569 static void surf_create(void)
571 struct vk_xcb_surf_create_info_t xcb_info
;
574 memset(&surf_g
, 0, sizeof(surf_g
));
575 memset(&xcb_info
, 0, sizeof(xcb_info
));
576 xcb_info
.type
= vk_struct_type_xcb_surf_create_info
;
577 xcb_info
.c
= app_xcb
.c
;
578 xcb_info
.win
= app_xcb
.win_id
;
579 vk_create_xcb_surf(&xcb_info
);
580 VK_FATAL("0:MAIN:FATAL:%d:xcb:%s:screen:%d:root window id:%#x:window id:%#x:unable to create a vulkan surface from this x11 window\n", r
, app_xcb
.disp_env
, app_xcb
.scr_idx
, app_xcb
.scr
->root
, app_xcb
.win_id
)
581 LOG("0:MAIN:xcb:'%s':screen:%d:root window id:%#x:window id:%#x:created vk_surface=%p\n", app_xcb
.disp_env
, app_xcb
.scr_idx
, app_xcb
.scr
->root
, app_xcb
.win_id
, surf_g
.vk
);
584 static void texel_mem_blk_confs_dump(u32 confs_n
,
585 struct vk_surf_texel_mem_blk_conf_t
*confs
)
593 LOG("0:MAIN:physical device:%p:surface:%p:texel memory block configuration:format=%u color_space=%u\n", surf_g
.dev
.phydev
.vk
, surf_g
.vk
, confs
[i
].core
.fmt
, confs
[i
].core
.color_space
);
599 * we only know this phydev/q is "able to present imgs" to the external
600 * pe surf_g. Here we choose the conf of textel blk
602 #define CONFS_N_MAX 1024
603 static void texel_mem_blk_conf_select(void)
605 struct vk_phydev_surf_info_t info
;
606 struct vk_surf_texel_mem_blk_conf_t confs
[CONFS_N_MAX
];
607 struct vk_surf_texel_mem_blk_conf_core_t
*cc
;
612 memset(&info
, 0, sizeof(info
));
613 info
.type
= vk_struct_type_phydev_surf_info
;
614 info
.surf
= surf_g
.vk
;
615 vk_get_phydev_surf_texel_mem_blk_confs(&info
, &confs_n
, 0);
616 VK_FATAL("0:MAIN:FATAL:%d:physical device:%p:surface:%p:unable get the count of valid surface texel memory block configurations\n", r
, surf_g
.dev
.phydev
.vk
, surf_g
.vk
)
617 if (confs_n
> CONFS_N_MAX
)
618 FATAL("0:MAIN:FATAL:physical device:%p:surface:%p:too many surface texel memory block configurations %u/%u\n", surf_g
.dev
.phydev
.vk
, surf_g
.vk
, confs_n
, CONFS_N_MAX
)
620 memset(confs
, 0, sizeof(confs
[0]) * confs_n
);
625 confs
[i
].type
= vk_struct_type_surf_texel_mem_blk_conf
;
628 vk_get_phydev_surf_texel_mem_blk_confs(&info
, &confs_n
, confs
);
629 VK_FATAL("0:MAIN:FATAL:%d:physical device:%p:surface:%p:unable get the valid surface texel memory block configurations\n", r
, surf_g
.dev
.phydev
.vk
, surf_g
.vk
)
631 FATAL("0:MAIN:FATAL:physical device:%p:surface:%p:no valid surface texel memory block configuration\n", surf_g
.dev
.phydev
.vk
, surf_g
.vk
)
632 texel_mem_blk_confs_dump(confs_n
, confs
);
634 cc
= &surf_g
.dev
.phydev
.selected_texel_mem_blk_conf_core
;
635 if ((confs_n
== 1) && (confs
[0].core
.fmt
636 == vk_texel_mem_blk_fmt_undefined
)) {
637 /* this means the dev let us choose our the fmt */
638 cc
->fmt
= vk_texel_mem_blk_fmt_b8g8r8a8_srgb
;
639 LOG("0:MAIN:physical device:%p:surface:%p:using our surface texel memory block format %u\n", surf_g
.dev
.phydev
.vk
, surf_g
.vk
, cc
->fmt
);
640 cc
->color_space
= vk_color_space_srgb_nonlinear
;
641 LOG("0:MAIN:physical device:%p:surface:%p:using prefered surface texel memory block color space %u\n", surf_g
.dev
.phydev
.vk
, surf_g
.vk
, cc
->color_space
);
643 /* the first valid fmt is the prefered fmt */
644 surf_g
.dev
.phydev
.selected_texel_mem_blk_conf_core
.fmt
=
646 LOG("0:MAIN:physical device:%p:surface:%p:using prefered surface texel memory block format %u\n", surf_g
.dev
.phydev
.vk
, surf_g
.vk
, surf_g
.dev
.phydev
.selected_texel_mem_blk_conf_core
.fmt
);
647 cc
->color_space
= confs
[0].core
.color_space
;
648 LOG("0:MAIN:physical device:%p:surface:%p:using prefered surface texel memory block color space %u\n", surf_g
.dev
.phydev
.vk
, surf_g
.vk
, cc
->color_space
);
652 static void tmp_phydev_mem_props_get(struct tmp_phydev_t
*p
)
654 memset(&p
->mem_props
, 0, sizeof(p
->mem_props
));
655 p
->mem_props
.type
= vk_struct_type_phydev_mem_props
;
656 vk_get_phydev_mem_props(p
->vk
, &p
->mem_props
);
659 static void tmp_phydevs_mem_props_get(void)
665 if (i
== tmp_phydevs_n_g
)
667 tmp_phydev_mem_props_get(&tmp_phydevs_g
[i
]);
672 static void phydev_mem_heap_dump(void *phydev
, u8 i
,
673 struct vk_mem_heap_t
*heap
)
675 LOG("0:MAIN:physical device:%p:memory heap:%u:size:%u bytes\n", phydev
, i
, heap
->sz
);
676 LOG("0:MAIN:physical device:%p:memory heap:%u:flags:%#08x\n", phydev
, i
, heap
->flags
);
677 if ((heap
->flags
& vk_mem_heap_dev_local_bit
) != 0)
678 LOG("0:MAIN:physical device:%p:memory heap:%u:device local\n", phydev
, i
);
679 if ((heap
->flags
& vk_mem_heap_multi_instance_bit
) != 0)
680 LOG("0:MAIN:physical device:%p:memory type:%u:multi instance_g\n", phydev
, i
);
683 static void phydev_mem_type_dump(void *phydev
, u8 i
,
684 struct vk_mem_type_t
*type
)
686 LOG("0:MAIN:physical device:%p:memory type:%u:heap:%u\n", phydev
, i
, type
->heap
);
687 LOG("0:MAIN:physical device:%p:memory type:%u:flags:%#08x\n", phydev
, i
, type
->prop_flags
);
688 if ((type
->prop_flags
& vk_mem_prop_dev_local_bit
) != 0)
689 LOG("0:MAIN:physical device:%p:memory type:%u:device local\n", phydev
, i
);
690 if ((type
->prop_flags
& vk_mem_prop_host_visible_bit
) != 0)
691 LOG("0:MAIN:physical device:%p:memory type:%u:host visible\n", phydev
, i
);
692 if ((type
->prop_flags
& vk_mem_prop_host_cached_bit
) != 0)
693 LOG("0:MAIN:physical device:%p:memory type:%u:host cached\n", phydev
, i
);
696 static void tmp_phydev_mem_types_dump(struct tmp_phydev_t
*p
)
700 LOG("0:MAIN:physical device:%p:%u memory types\n", p
->vk
, p
->mem_props
.core
.mem_types_n
);
703 if (i
== p
->mem_props
.core
.mem_types_n
)
705 phydev_mem_type_dump(p
->vk
, i
,
706 &p
->mem_props
.core
.mem_types
[i
]);
711 static void tmp_phydev_mem_heaps_dump(struct tmp_phydev_t
*p
)
715 LOG("0:MAIN:physical device:%p:%u memory heaps\n", p
->vk
, p
->mem_props
.core
.mem_heaps_n
);
718 if (i
== p
->mem_props
.core
.mem_heaps_n
)
720 phydev_mem_heap_dump(p
->vk
, i
,
721 &p
->mem_props
.core
.mem_heaps
[i
]);
727 static void tmp_phydev_mem_props_dump(struct tmp_phydev_t
*p
)
729 tmp_phydev_mem_types_dump(p
);
730 tmp_phydev_mem_heaps_dump(p
);
733 static void tmp_phydevs_mem_props_dump(void)
739 if (i
== tmp_phydevs_n_g
)
741 tmp_phydev_mem_props_dump(&tmp_phydevs_g
[i
]);
746 static void tmp_surf_caps_get(void)
749 struct vk_phydev_surf_info_t info
;
751 memset(&info
, 0, sizeof(info
));
752 info
.type
= vk_struct_type_phydev_surf_info
;
753 info
.surf
= surf_g
.vk
;
754 memset(&tmp_surf_caps_g
, 0, sizeof(tmp_surf_caps_g
));
755 tmp_surf_caps_g
.type
= vk_struct_type_surf_caps
;
756 vk_get_phydev_surf_caps(&info
, &tmp_surf_caps_g
);
757 VK_FATAL("0:MAIN:FATAL:%d:physical device:%p:surface:%p:unable to get our surface capabilities in the context of the selected physical device\n", r
, surf_g
.dev
.phydev
.vk
, surf_g
.vk
)
758 /* we have room for a maximum of 3 images per swapchain */
759 if (tmp_surf_caps_g
.core
.imgs_n_min
> swpchn_imgs_n_max
)
760 FATAL("0:MAIN:FATAL:physical device:%p:surface:%p:we have room for %u images per swapchain, but this swapchain requires a minimum of %u images\n", surf_g
.dev
.phydev
.vk
, surf_g
.vk
, swpchn_imgs_n_max
, tmp_surf_caps_g
.core
.imgs_n_min
)
763 static void tmp_surf_caps_dump(void)
765 LOG("0:MAIN:physical device:%p:surface:%p:imgs_n_min=%u\n", surf_g
.dev
.phydev
.vk
, surf_g
.vk
, app_tmp_surf_caps
.core
.imgs_n_min
);
766 LOG("0:MAIN:physical device:%p:surface:%p:imgs_n_max=%u\n", surf_g
.dev
.phydev
.vk
, surf_g
.vk
, tmp_surf_caps_g
.core
.imgs_n_max
);
767 LOG("0:MAIN:physical device:%p:surface:%p:current extent=(width=%u, height=%u)\n", surf_g
.dev
.phydev
.vk
, surf_g
.vk
, tmp_surf_caps_g
.core
.current_extent
.width
, tmp_surf_caps_g
.core
.current_extent
.height
);
768 LOG("0:MAIN:physical device:%p:surface:%p:minimal extent=(width=%u, height=%u)\n", surf_g
.dev
.phydev
.vk
, surf_g
.vk
, tmp_surf_caps_g
.core
.img_extent_min
.width
, tmp_surf_caps_g
.core
.img_extent_min
.height
);
769 LOG("0:MAIN:physical device:%p:surface:%p:maximal extent=(width=%u, height=%u)\n", surf_g
.dev
.phydev
.vk
, surf_g
.vk
, tmp_surf_caps_g
.core
.img_extent_max
.width
, tmp_surf_caps_g
.core
.img_extent_max
.height
);
770 LOG("0:MAIN:physical device:%p:surface:%p:img_array_layers_n_max=%u\n", surf_g
.dev
.phydev
.vk
, surf_g
.vk
, tmp_surf_caps_g
.core
.img_array_layers_n_max
);
771 LOG("0:MAIN:physical device:%p:surface:%p:supported_transforms=%#08x\n", surf_g
.dev
.phydev
.vk
, surf_g
.vk
, tmp_surf_caps_g
.core
.supported_transforms
);
772 LOG("0:MAIN:physical device:%p:surface:%p:current_transform=%#08x\n", surf_g
.dev
.phydev
.vk
, surf_g
.vk
, tmp_surf_caps_g
.core
.current_transform
);
773 LOG("0:MAIN:physical device:%p:surface:%p:supported_composite_alpha=%#08x\n", surf_g
.dev
.phydev
.vk
, surf_g
.vk
, tmp_surf_caps_g
.core
.supported_composite_alpha
);
774 LOG("0:MAIN:physical device:%p:surface:%p:supported_img_usage_flags=%#08x\n", surf_g
.dev
.phydev
.vk
, surf_g
.vk
, tmp_surf_caps_g
.core
.supported_img_usage_flags
);
777 static void swpchn_imgs_get(void)
781 surf_g
.dev
.swpchn
.imgs_n
= swpchn_imgs_n_max
;
782 vk_get_swpchn_imgs();
783 VK_FATAL("0:MAIN:FATAL:%d:device:%p:surface:%p:swapchain:%p:unable to get the swapchain images\n", r
, surf_g
.dev
.vk
, surf_g
.vk
, surf_g
.dev
.swpchn
.vk
)
784 LOG("0:MAIN:device:%p:surface:%p:swapchain:%p:got %u swapchain images\n", surf_g
.dev
.vk
, surf_g
.vk
, surf_g
.dev
.swpchn
.vk
, surf_g
.dev
.swpchn
.imgs_n
);
787 static void swpchn_init(void)
789 struct vk_swpchn_create_info_t info
;
793 memset(&info
, 0, sizeof(info
));
794 p
= &surf_g
.dev
.phydev
;
795 info
.type
= vk_struct_type_swpchn_create_info
;
796 info
.surf
= surf_g
.vk
;
797 info
.imgs_n_min
= tmp_surf_caps_g
.core
.imgs_n_min
;
798 info
.img_texel_mem_blk_fmt
= p
->selected_texel_mem_blk_conf_core
.fmt
;
799 info
.img_color_space
= p
->selected_texel_mem_blk_conf_core
.color_space
;
800 memcpy(&info
.img_extent
, &tmp_surf_caps_g
.core
.current_extent
,
801 sizeof(info
.img_extent
));
802 info
.img_layers_n
= 1;
803 info
.img_usage
= vk_img_usage_color_attachment_bit
804 | vk_img_usage_transfer_dst_bit
;
805 info
.img_sharing_mode
= vk_sharing_mode_exclusive
;
806 info
.pre_transform
= vk_surf_transform_identity_bit
;
807 info
.composite_alpha
= vk_composite_alpha_opaque_bit
;
808 info
.present_mode
= vk_present_mode_fifo
;
809 info
.clipped
= vk_true
;
810 vk_create_swpchn(&info
);
811 VK_FATAL("0:MAIN:FATAL:%d:device:%p:surface:%p:unable to create the initial swapchain\n", r
, surf_g
.dev
.vk
, surf_g
.vk
)
812 LOG("0:MAIN:device:%p:surface:%p:swapchain created %p\n", surf_g
.dev
.vk
, surf_g
.vk
, surf_g
.dev
.swpchn
.vk
);
815 static void tmp_present_modes_get(void)
819 tmp_present_modes_n_g
= tmp_present_modes_n_max
;
820 vk_get_phydev_surf_present_modes();
821 VK_FATAL("0:MAIN:FATAL:%d:physical device:%p:surface:%p:unable to get the physical device present mode for our surface\n", r
, surf_g
.dev
.phydev
.vk
, surf_g
.vk
)
824 static u8
*present_mode_to_str(u32 mode
)
827 case vk_present_mode_immediate
:
829 case vk_present_mode_mailbox
:
831 case vk_present_mode_fifo
:
833 case vk_present_mode_fifo_relaxed
:
834 return "fifo relaxed";
840 static void tmp_present_modes_dump(void)
845 LOG("0:MAIN:physical device:%p:surface:%p:%u present modes\n", surf_g
.dev
.phydev
.vk
, surf_g
.vk
, tmp_present_modes_n_g
);
847 if (i
== (u8
)tmp_present_modes_n_g
)
849 LOG("0:MAIN:physical device:%p:surface:%p:present mode=%s\n", surf_g
.dev
.phydev
.vk
, surf_g
.vk
, present_mode_to_str(tmp_present_modes_g
[i
]));
854 static void cpu_img_create(u8 i
)
856 struct vk_img_create_info_t info
;
859 memset(&info
, 0, sizeof(info
));
860 info
.type
= vk_struct_type_img_create_info
;
861 info
.flags
= vk_img_create_flag_2d_array_compatible_bit
;
862 info
.img_type
= vk_img_type_2d
;
863 info
.texel_mem_blk_fmt
= vk_texel_mem_blk_fmt_b8g8r8a8_unorm
;
864 info
.extent
.width
= APP_CPU_IMG_WIDTH
;
865 info
.extent
.height
= APP_CPU_IMG_HEIGHT
;
866 info
.extent
.depth
= 1;
868 info
.samples_n
= vk_samples_n_1_bit
;
869 info
.array_layers_n
= 1;
870 info
.img_tiling
= vk_img_tiling_linear
;
871 info
.usage
= vk_img_usage_transfer_src_bit
;
872 info
.initial_layout
= vk_img_layout_undefined
;
873 vk_create_img(&info
, &surf_g
.dev
.cpu_imgs
[i
].vk
);
874 VK_FATAL("0:MAIN:FATAL:%d:device:%p:unable to create swapchain cpu image %u\n", r
, surf_g
.dev
.vk
, i
)
875 LOG("0:MAIN:device:%p:swapchain cpu image %u created %p\n", surf_g
.dev
.vk
, i
, surf_g
.dev
.cpu_imgs
[i
].vk
);
878 static void cpu_imgs_create(void)
884 if (i
== surf_g
.dev
.swpchn
.imgs_n
)
891 static void img_mem_barrier_run_once(u8 i
, struct vk_img_mem_barrier_t
*b
)
894 struct vk_cb_begin_info_t begin_info
;
895 struct vk_submit_info_t submit_info
;
897 memset(&begin_info
, 0, sizeof(begin_info
));
898 begin_info
.type
= vk_struct_type_cb_begin_info
;
899 begin_info
.flags
= vk_cb_usage_one_time_submit_bit
;
900 vk_begin_cb(surf_g
.dev
.cbs
[i
], &begin_info
);
901 VK_FATAL("0:MAIN:FATAL:%d:command buffer:%p:unable to begin recording the initial layout transition command buffer\n", r
, surf_g
.dev
.cbs
[i
])
902 /*--------------------------------------------------------------------*/
903 vk_cmd_pl_barrier(app_surf
.dev
.cbs
[i
], b
);
904 /*--------------------------------------------------------------------*/
905 vk_end_cb(surf_g
.dev
.cbs
[i
]);
906 VK_FATAL("0:MAIN:FATAL:%d:command buffer:%p:unable to end recording of the initial layout transition command buffer\n", r
, surf_g
.dev
.cbs
[i
])
907 /*--------------------------------------------------------------------*/
908 memset(&submit_info
, 0, sizeof(submit_info
));
909 submit_info
.type
= vk_struct_type_submit_info
;
910 submit_info
.cbs_n
= 1;
911 submit_info
.cbs
= &surf_g
.dev
.cbs
[i
];
912 vk_q_submit(&submit_info
);
913 VK_FATAL("0:MAIN:FATAL:%d:queue:%p:unable to submit the initial layout transition command buffer\n", r
, surf_g
.dev
.q
)
914 /*--------------------------------------------------------------------*/
916 VK_FATAL("0:MAIN:FATAL:%d:queue:%p:unable to wait for idle or completion of initial layout transition command buffer\n", r
, surf_g
.dev
.q
)
917 /*--------------------------------------------------------------------*/
919 * since it is tagged to run once its state_g is invalid, we need to
920 * reset it to the initial state_g
922 vk_reset_cb(surf_g
.dev
.cbs
[i
]);
923 VK_FATAL("0:MAIN:FATAL:%d:command buffer:%p:unable to reset the initial layout transition command buffer\n", r
, surf_g
.dev
.cbs
[i
])
926 static void cpu_img_layout_to_general(u8 i
)
928 struct vk_img_mem_barrier_t b
;
929 struct vk_img_subrsrc_range_t
*r
;
931 memset(&b
, 0, sizeof(b
));
932 b
.type
= vk_struct_type_img_mem_barrier
;
933 b
.old_layout
= vk_img_layout_undefined
;
934 b
.new_layout
= vk_img_layout_general
;
935 b
.src_q_fam
= vk_q_fam_ignored
;
936 b
.dst_q_fam
= vk_q_fam_ignored
;
937 b
.img
= surf_g
.dev
.cpu_imgs
[i
].vk
;
938 r
= &b
.subrsrc_range
;
939 r
->aspect
= vk_img_aspect_color_bit
;
941 r
->array_layers_n
= 1;
942 img_mem_barrier_run_once(i
, &b
);
943 LOG("0:MAIN:cpu image:%p[%u]:transition to general layout successful\n", surf_g
.dev
.cpu_imgs
[i
].vk
, i
);
946 /* once in general layout, the dev sees the img */
947 static void cpu_imgs_layout_to_general(void)
953 if (i
== surf_g
.dev
.swpchn
.imgs_n
)
955 cpu_img_layout_to_general(i
);
960 static void tmp_cpu_img_mem_rqmts_get(u8 i
)
962 struct vk_img_mem_rqmts_info_t info
;
963 struct vk_mem_rqmts_t
*rqmts
;
966 memset(&info
, 0, sizeof(info
));
967 info
.type
= vk_struct_type_img_mem_rqmts_info
;
968 info
.img
= surf_g
.dev
.cpu_imgs
[i
].vk
;
969 rqmts
= &tmp_mem_rqmts_g
[i
];
970 memset(rqmts
, 0, sizeof(*rqmts
));
971 rqmts
->type
= vk_struct_type_mem_rqmts
;
972 vk_get_img_mem_rqmts(&info
, rqmts
);
973 VK_FATAL("0:MAIN:FATAL:%d:device:%p:unable to get memory requirements for cpu image %u\n", r
, surf_g
.dev
.vk
, i
)
974 LOG("0:MAIN:device:%p:cpu image %u core requirements are size=%lu bytes, alignment=%lu bytes, memory type=%#08x\n", surf_g
.dev
.vk
, i
, (long)rqmts
->core
.sz
, (long)rqmts
->core
.alignment
, rqmts
->core
.mem_type_bits
);
977 static void tmp_cpu_imgs_mem_rqmts_get(void)
983 if (i
== surf_g
.dev
.swpchn
.imgs_n
)
985 tmp_cpu_img_mem_rqmts_get(i
);
990 #define WANTED_MEM_PROPS (vk_mem_prop_host_visible_bit \
991 | vk_mem_prop_host_cached_bit)
992 #define IS_DEV_LOCAL(x) (((x)->prop_flags & vk_mem_prop_dev_local_bit) != 0)
993 static bool match_mem_type(u8 mem_type_idx
,
994 struct vk_mem_rqmts_t
*img_rqmts
, bool ignore_gpu_is_discret
)
996 struct vk_mem_type_t
*mem_type
;
998 /* first check this mem type is in our img rqmts */
999 if (((1 << mem_type_idx
) & img_rqmts
->core
.mem_type_bits
) == 0)
1001 mem_type
= &surf_g
.dev
.phydev
.mem_types
[mem_type_idx
];
1002 if (!ignore_gpu_is_discret
)
1003 if (surf_g
.dev
.phydev
.is_discret_gpu
&& IS_DEV_LOCAL(mem_type
))
1005 if ((mem_type
->prop_flags
& WANTED_MEM_PROPS
) == WANTED_MEM_PROPS
)
1009 #undef WANTED_MEM_PROPS
1012 static bool try_alloc_cpu_img_dev_mem(u8 i
,
1013 struct vk_mem_rqmts_t
*img_rqmts
, u8 mem_type_idx
)
1015 struct vk_mem_alloc_info_t info
;
1018 memset(&info
, 0, sizeof(info
));
1019 info
.type
= vk_struct_type_mem_alloc_info
;
1020 info
.sz
= img_rqmts
->core
.sz
;
1021 info
.mem_type_idx
= mem_type_idx
;
1022 vk_alloc_mem(&info
, &surf_g
.dev
.cpu_imgs
[i
].dev_mem
);
1024 LOG("0:MAIN:WARNING:%d:device:%p:cpu image:%u:unable to allocate %lu bytes from physical dev %p memory type %u\n", r
, surf_g
.dev
.vk
, img_rqmts
->core
.sz
, surf_g
.dev
.phydev
.vk
, mem_type_idx
);
1027 LOG("0:MAIN:device:%p:physical device:%p:cpu image:%u:%lu bytes allocated from memory type %u\n", surf_g
.dev
.vk
, surf_g
.dev
.phydev
.vk
, i
, img_rqmts
->core
.sz
, mem_type_idx
);
1032 * we are looking for host visible and host cached mem. on discret gpu we would
1033 * like non dev local mem that in order to avoid wasting video ram. if we have
1034 * a discret gpu but could not find a mem type without dev local mem, let's
1035 * retry with only host visible and host cached mem.
1037 #define IGNORE_GPU_IS_DISCRET true
1038 static void cpu_img_dev_mem_alloc(u8 i
)
1040 struct vk_mem_rqmts_t
*img_rqmts
;
1043 img_rqmts
= &tmp_mem_rqmts_g
[i
];
1046 if (mem_type
== surf_g
.dev
.phydev
.mem_types_n
)
1048 if (match_mem_type(mem_type
, img_rqmts
,
1049 !IGNORE_GPU_IS_DISCRET
)) {
1050 if (try_alloc_cpu_img_dev_mem(i
, img_rqmts
,
1056 if (!surf_g
.dev
.phydev
.is_discret_gpu
)
1057 FATAL("0:MAIN:FATAL:physical device:%p:cpu image:%u:unable to find proper memory type or to allocate memory\n", surf_g
.dev
.phydev
.vk
, i
)
1059 * lookup again, but relax the match based on discret gpu constraint for
1064 if (mem_type
== surf_g
.dev
.phydev
.mem_types_n
)
1066 if (match_mem_type(mem_type
, img_rqmts
, IGNORE_GPU_IS_DISCRET
)
1067 && try_alloc_cpu_img_dev_mem(i
, img_rqmts
, mem_type
))
1071 FATAL("0:MAIN:FATAL:physical device:%p:cpu image:%u:unable to find proper memory type or to allocate memory\n", surf_g
.dev
.phydev
.vk
, i
)
1073 #undef IGNORE_GPU_IS_DISCRET
1075 static void cpu_imgs_dev_mem_alloc(void)
1081 if (i
== surf_g
.dev
.swpchn
.imgs_n
)
1083 cpu_img_dev_mem_alloc(i
);
1088 static void cpu_imgs_dev_mem_bind(void)
1090 struct vk_bind_img_mem_info_t infos
[swpchn_imgs_n_max
];
1094 memset(&infos
, 0, sizeof(infos
[0]) * surf_g
.dev
.swpchn
.imgs_n
);
1097 if (i
== surf_g
.dev
.swpchn
.imgs_n
)
1099 infos
[i
].type
= vk_struct_type_bind_img_mem_info
;
1100 infos
[i
].img
= surf_g
.dev
.cpu_imgs
[i
].vk
;
1101 infos
[i
].mem
= surf_g
.dev
.cpu_imgs
[i
].dev_mem
;
1104 vk_bind_img_mem(infos
);
1105 VK_FATAL("0:MAIN:FATAL:%d:device:%p:cpu images:unable to bind device memory to images\n", r
, surf_g
.dev
.vk
)
1106 LOG("0:MAIN:device:%p:cpu images:bound device memory to images\n", surf_g
.dev
.vk
);
1109 static void cpu_imgs_dev_mem_map(void)
1117 if (i
== surf_g
.dev
.swpchn
.imgs_n
)
1119 vk_map_mem(surf_g
.dev
.cpu_imgs
[i
].dev_mem
,
1120 &surf_g
.dev
.cpu_imgs
[i
].data
);
1121 VK_FATAL("0:MAIN:FATAL:%d:device:%p:cpu image:%u:unable to map image memory\n", r
, surf_g
.dev
.vk
, i
)
1122 LOG("0:MAIN:device:%p:cpu image:%u:image memory mapped\n", surf_g
.dev
.vk
, i
);
1127 static void cpu_img_subrsrc_layout_get(u8 i
)
1129 struct vk_img_subrsrc_t s
;
1131 memset(&s
, 0, sizeof(s
));
1132 /* 1 subrsrc = uniq color plane of mip lvl 0 and array 0 */
1133 s
.aspect
= vk_img_aspect_color_bit
;
1134 vk_get_img_subrsrc_layout(surf_g
.dev
.cpu_imgs
[i
].vk
, &s
,
1135 &surf_g
.dev
.cpu_imgs
[i
].layout
);
1136 LOG("0:MAIN:device:%p:cpu image:%u:layout:offset=%lu bytes size=%lu bytes row_pitch=%lu bytes array_pitch=%lu bytes depth_pitch=%lu bytes\n", surf_g
.dev
.vk
, i
, surf_g
.dev
.cpu_imgs
[i
].layout
.offset
, surf_g
.dev
.cpu_imgs
[i
].layout
.sz
, surf_g
.dev
.cpu_imgs
[i
].layout
.row_pitch
, surf_g
.dev
.cpu_imgs
[i
].layout
.array_pitch
, surf_g
.dev
.cpu_imgs
[i
].layout
.depth_pitch
);
1139 static void cpu_imgs_subrsrc_layout_get(void)
1145 if (i
== surf_g
.dev
.swpchn
.imgs_n
)
1147 cpu_img_subrsrc_layout_get(i
);
1152 static void sems_create(void)
1155 struct vk_sem_create_info_t info
;
1162 memset(&info
, 0, sizeof(info
));
1163 info
.type
= vk_struct_type_sem_create_info
;
1164 vk_create_sem(&info
, &surf_g
.dev
.sems
[sem
]);
1165 VK_FATAL("0:MAIN:FATAL:%d:device:%p:unable to create a semaphore %u for our swapchain\n", r
, surf_g
.dev
.vk
, sem
)
1166 LOG("0:MAIN:device:%p:semaphore %u for our swapchain created %p\n", surf_g
.dev
.vk
, sem
, surf_g
.dev
.sems
[sem
]);
1171 static void cbs_create(void)
1174 struct vk_cb_alloc_info_t alloc_info
;
1176 memset(&alloc_info
, 0, sizeof(alloc_info
));
1177 alloc_info
.type
= vk_struct_type_cb_alloc_info
;
1178 alloc_info
.cp
= surf_g
.dev
.cp
;
1179 alloc_info
.lvl
= vk_cb_lvl_primary
;
1180 alloc_info
.cbs_n
= surf_g
.dev
.swpchn
.imgs_n
;
1181 vk_alloc_cbs(&alloc_info
);
1182 VK_FATAL("0:MAIN:FATAL:%d:device:%p:unable to allocate command buffers for our swapchain images from %p command pool\n", r
, surf_g
.dev
.vk
, surf_g
.dev
.cp
)
1183 LOG("0:MAIN:device:%p:allocated %u command buffers for our swapchain images from %p command pool\n", surf_g
.dev
.vk
, surf_g
.dev
.swpchn
.imgs_n
, surf_g
.dev
.cp
);
1186 static void cb_rec(u8 i
)
1189 struct vk_cb_begin_info_t begin_info
;
1190 struct vk_img_mem_barrier_t b
;
1191 struct vk_img_blit_t region
;
1192 /*--------------------------------------------------------------------*/
1193 memset(&begin_info
, 0, sizeof(begin_info
));
1194 begin_info
.type
= vk_struct_type_cb_begin_info
;
1195 vk_begin_cb(surf_g
.dev
.cbs
[i
], &begin_info
);
1196 VK_FATAL("0:MAIN:FATAL:%d:swapchain img:%u:command buffer:%p:unable to begin recording\n", r
, i
, surf_g
.dev
.cbs
[i
])
1197 /*--------------------------------------------------------------------*/
1198 /* acquired img (undefined layout) to presentation layout */
1199 memset(&b
, 0, sizeof(b
));
1200 b
.type
= vk_struct_type_img_mem_barrier
;
1201 b
.old_layout
= vk_img_layout_undefined
;
1202 b
.new_layout
= vk_img_layout_present
;
1203 b
.src_q_fam
= vk_q_fam_ignored
;
1204 b
.dst_q_fam
= vk_q_fam_ignored
;
1205 b
.img
= surf_g
.dev
.swpchn
.imgs
[i
];
1206 b
.subrsrc_range
.aspect
= vk_img_aspect_color_bit
;
1207 b
.subrsrc_range
.lvls_n
= 1;
1208 b
.subrsrc_range
.array_layers_n
= 1;
1209 vk_cmd_pl_barrier(surf_g
.dev
.cbs
[i
], &b
);
1210 /*--------------------------------------------------------------------*/
1211 /* blit from cpu img to pe img */
1212 memset(®ion
, 0, sizeof(region
));
1213 region
.src_subrsrc
.aspect
= vk_img_aspect_color_bit
;
1214 region
.src_subrsrc
.array_layers_n
= 1;
1215 region
.src_offsets
[1].x
= APP_CPU_IMG_WIDTH
;
1216 region
.src_offsets
[1].y
= APP_CPU_IMG_HEIGHT
;
1217 region
.dst_subrsrc
.aspect
= vk_img_aspect_color_bit
;
1218 region
.dst_subrsrc
.array_layers_n
= 1;
1219 /* XXX: it is a scaling blit: you can use APP_WIN_WIDTH/APP_WIN_HEIGHT */
1220 region
.dst_offsets
[1].x
= APP_CPU_IMG_WIDTH
;
1221 region
.dst_offsets
[1].y
= APP_CPU_IMG_HEIGHT
;
1222 vk_cmd_blit_img(surf_g
.dev
.cbs
[i
], surf_g
.dev
.cpu_imgs
[i
].vk
,
1223 surf_g
.dev
.swpchn
.imgs
[i
], ®ion
);
1224 /*--------------------------------------------------------------------*/
1225 vk_end_cb(surf_g
.dev
.cbs
[i
]);
1226 VK_FATAL("0:MAIN:FATAL:%d:swapchain img:%u:command buffer:%p:unable to end recording\n", r
, i
, surf_g
.dev
.cbs
[i
])
1229 static void cbs_rec(void)
1235 if (i
== surf_g
.dev
.swpchn
.imgs_n
)
1242 static void phydev_init(void)
1245 /*--------------------------------------------------------------------*/
1246 tmp_phydevs_exts_dump();
1247 tmp_phydevs_props_dump();
1248 tmp_phydevs_mem_props_get();
1249 tmp_phydevs_mem_props_dump();
1250 /*--------------------------------------------------------------------*/
1251 tmp_phydevs_q_fams_get();
1252 tmp_phydevs_q_fams_dump();
1253 tmp_phydevs_q_fams_surf_support_get();
1254 /*--------------------------------------------------------------------*/
1255 tmp_phydev_and_q_fam_select();
1256 /*--------------------------------------------------------------------*/
1257 texel_mem_blk_conf_select();
1258 /*--------------------------------------------------------------------*/
1259 tmp_surf_caps_get();
1260 tmp_surf_caps_dump();
1261 /*--------------------------------------------------------------------*/
1262 tmp_present_modes_get();
1263 tmp_present_modes_dump();
1266 static void dev_init(void)
1269 /*--------------------------------------------------------------------*/
1276 static void surf_init(void)
1282 /* our cpu imgs for swpchn imgs */
1286 cpu_imgs_layout_to_general();
1287 cpu_imgs_subrsrc_layout_get();
1288 tmp_cpu_imgs_mem_rqmts_get();
1289 cpu_imgs_dev_mem_alloc();
1290 cpu_imgs_dev_mem_bind();
1291 cpu_imgs_dev_mem_map();
1295 static void init_vk(void)
1299 instance_static_syms();
1301 instance_exts_dump();
1302 instance_layers_dump();
1303 /*--------------------------------------------------------------------*/
1306 /*--------------------------------------------------------------------*/
1310 static void swpchn_acquire_next_img(u32
*i
)
1312 struct vk_acquire_next_img_info_t info
;
1315 memset(&info
, 0, sizeof(info
));
1316 info
.type
= vk_struct_type_acquire_next_img_info
;
1317 info
.swpchn
= surf_g
.dev
.swpchn
.vk
;
1318 info
.timeout
= u64_max
; /* infinite */
1319 info
.devs
= 0x00000001; /* no device group then 1 */
1320 info
.sem
= surf_g
.dev
.sems
[sem_acquire_img_done
];
1321 vk_acquire_next_img(&info
, i
);
1322 VK_FATAL("0:MAIN:FATAL:%d:device:%p:unable to acquire next image from swapchain %p\n", r
, surf_g
.dev
.vk
, surf_g
.dev
.swpchn
.vk
)
1323 LOG("0:MAIN:device:%p:swapchain:%p:acquired image %u\n", surf_g
.dev
.vk
, surf_g
.dev
.swpchn
.vk
, *i
);
1327 static void cpu_img_draw(u8 i
)
1333 texel
= (u32
*)surf_g
.dev
.cpu_imgs
[i
].data
;
1336 if (row
== APP_CPU_IMG_HEIGHT
)
1340 struct vk_subrsrc_layout_t
*l
;
1341 u64 o
; /* _byte_ offset */
1342 u64 o_w
; /* _32 bits_ word offset */
1344 if (col
== APP_CPU_IMG_WIDTH
)
1346 l
= &surf_g
.dev
.cpu_imgs
[i
].layout
;
1347 o
= row
* l
->row_pitch
+ col
* sizeof(*texel
);
1349 texel
[o_w
] = fill_texel_g
;
1356 static void cpu_img_to_pe(u8 i
)
1359 struct vk_submit_info_t submit_info
;
1360 struct vk_present_info_t present_info
;
1364 memset(&submit_info
, 0, sizeof(submit_info
));
1365 submit_info
.type
= vk_struct_type_submit_info
;
1366 submit_info
.wait_sems_n
= 1;
1367 submit_info
.wait_sems
= &surf_g
.dev
.sems
[sem_acquire_img_done
];
1368 wait_dst_stage
= vk_pl_stage_bottom_of_pipe_bit
;
1369 submit_info
.wait_dst_stages
= &wait_dst_stage
;
1370 submit_info
.cbs_n
= 1;
1371 submit_info
.cbs
= &surf_g
.dev
.cbs
[i
];
1372 submit_info
.signal_sems_n
= 1;
1373 submit_info
.signal_sems
= &surf_g
.dev
.sems
[app_sem_blit_done
];
1374 LOG("MAIN:queue:%p\n", surf_g
.dev
.q
);
1376 vk_q_submit(&submit_info
);
1377 VK_FATAL("0:MAIN:FATAL:%d:queue:%p:unable to submit the image pre-recorded command buffer\n", r
, surf_g
.dev
.q
)
1378 /*--------------------------------------------------------------------*/
1380 memset(&present_info
, 0, sizeof(present_info
));
1381 present_info
.type
= vk_struct_type_present_info
;
1382 present_info
.wait_sems_n
= 1;
1383 present_info
.wait_sems
= &surf_g
.dev
.sems
[sem_blit_done
];
1384 present_info
.swpchns_n
= 1;
1385 present_info
.swpchns
= &surf_g
.dev
.swpchn
.vk
;
1386 present_info
.idxs
= idxs
;
1387 present_info
.results
= 0;
1388 vk_q_present(&present_info
);
1389 VK_FATAL("0:MAIN:FATAL:%d:queue:%p:unable to submit the image %u to the presentation engine\n", r
, surf_g
.dev
.q
, i
)
1392 static void render(void)
1396 swpchn_acquire_next_img(&i
);
1397 cpu_img_draw(i
); /* cpu rendering */
1399 do_render_g
= false;
1400 if (fill_texel_g
== 0x0000ff00)
1401 fill_texel_g
= 0x00ff0000;
1403 fill_texel_g
= 0x0000ff00;
1406 static void run(void)
1408 state_g
= state_run
;
1413 xcb_generic_event_t
*e
;
1415 do_render_g
= false;
1416 /* "evts which could lead to change what we display" */
1417 e
= dl_xcb_wait_for_event(app_xcb
.c
);
1418 if (e
== 0) { /* i/o err */
1419 LOG("0:MAIN:xcb:'%s':connection:%p:event:input/output error | x11 server connection lost\n", app_xcb
.disp_env
, app_xcb
.c
);
1422 loop
{ /* drain evts */
1423 app_xcb_evt_handle(e
);
1425 if (state_g
== state_quit
)
1427 e
= dl_xcb_poll_for_event(app_xcb
.c
);
1431 /* synchronous rendering */
1439 LOG("0:starting app\n");
1442 fill_texel_g
= 0x0000ff00;
1444 LOG("0:exiting app\n");
1449 /*---------------------------------------------------------------------------*/
1451 #include "namespace/app.c"
1452 #include "namespace/vk_syms.c"
1453 #include "namespace/app_state_types.h"
1454 #include "namespace/app_state.c"
1456 /*---------------------------------------------------------------------------*/