radv: Implement & enable VK_EXT_texel_buffer_alignment.
[mesa.git] / src / amd / vulkan / radv_device.c
1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 */
27
28 #include <stdbool.h>
29 #include <string.h>
30 #include <unistd.h>
31 #include <fcntl.h>
32 #include <llvm/Config/llvm-config.h>
33 #include "radv_debug.h"
34 #include "radv_private.h"
35 #include "radv_shader.h"
36 #include "radv_cs.h"
37 #include "util/disk_cache.h"
38 #include "util/strtod.h"
39 #include "vk_util.h"
40 #include <xf86drm.h>
41 #include <amdgpu.h>
42 #include <amdgpu_drm.h>
43 #include "winsys/amdgpu/radv_amdgpu_winsys_public.h"
44 #include "ac_llvm_util.h"
45 #include "vk_format.h"
46 #include "sid.h"
47 #include "git_sha1.h"
48 #include "util/build_id.h"
49 #include "util/debug.h"
50 #include "util/mesa-sha1.h"
51 #include "compiler/glsl_types.h"
52 #include "util/xmlpool.h"
53
54 static int
55 radv_device_get_cache_uuid(enum radeon_family family, void *uuid)
56 {
57 struct mesa_sha1 ctx;
58 unsigned char sha1[20];
59 unsigned ptr_size = sizeof(void*);
60
61 memset(uuid, 0, VK_UUID_SIZE);
62 _mesa_sha1_init(&ctx);
63
64 if (!disk_cache_get_function_identifier(radv_device_get_cache_uuid, &ctx) ||
65 !disk_cache_get_function_identifier(LLVMInitializeAMDGPUTargetInfo, &ctx))
66 return -1;
67
68 _mesa_sha1_update(&ctx, &family, sizeof(family));
69 _mesa_sha1_update(&ctx, &ptr_size, sizeof(ptr_size));
70 _mesa_sha1_final(&ctx, sha1);
71
72 memcpy(uuid, sha1, VK_UUID_SIZE);
73 return 0;
74 }
75
76 static void
77 radv_get_driver_uuid(void *uuid)
78 {
79 ac_compute_driver_uuid(uuid, VK_UUID_SIZE);
80 }
81
82 static void
83 radv_get_device_uuid(struct radeon_info *info, void *uuid)
84 {
85 ac_compute_device_uuid(info, uuid, VK_UUID_SIZE);
86 }
87
88 static uint64_t
89 radv_get_visible_vram_size(struct radv_physical_device *device)
90 {
91 return MIN2(device->rad_info.vram_size, device->rad_info.vram_vis_size);
92 }
93
94 static uint64_t
95 radv_get_vram_size(struct radv_physical_device *device)
96 {
97 return device->rad_info.vram_size - radv_get_visible_vram_size(device);
98 }
99
100 static void
101 radv_physical_device_init_mem_types(struct radv_physical_device *device)
102 {
103 STATIC_ASSERT(RADV_MEM_HEAP_COUNT <= VK_MAX_MEMORY_HEAPS);
104 uint64_t visible_vram_size = radv_get_visible_vram_size(device);
105 uint64_t vram_size = radv_get_vram_size(device);
106 int vram_index = -1, visible_vram_index = -1, gart_index = -1;
107 device->memory_properties.memoryHeapCount = 0;
108 if (vram_size > 0) {
109 vram_index = device->memory_properties.memoryHeapCount++;
110 device->memory_properties.memoryHeaps[vram_index] = (VkMemoryHeap) {
111 .size = vram_size,
112 .flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT,
113 };
114 }
115 if (visible_vram_size) {
116 visible_vram_index = device->memory_properties.memoryHeapCount++;
117 device->memory_properties.memoryHeaps[visible_vram_index] = (VkMemoryHeap) {
118 .size = visible_vram_size,
119 .flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT,
120 };
121 }
122 if (device->rad_info.gart_size > 0) {
123 gart_index = device->memory_properties.memoryHeapCount++;
124 device->memory_properties.memoryHeaps[gart_index] = (VkMemoryHeap) {
125 .size = device->rad_info.gart_size,
126 .flags = device->rad_info.has_dedicated_vram ? 0 : VK_MEMORY_HEAP_DEVICE_LOCAL_BIT,
127 };
128 }
129
130 STATIC_ASSERT(RADV_MEM_TYPE_COUNT <= VK_MAX_MEMORY_TYPES);
131 unsigned type_count = 0;
132 if (vram_index >= 0) {
133 device->mem_type_indices[type_count] = RADV_MEM_TYPE_VRAM;
134 device->memory_properties.memoryTypes[type_count++] = (VkMemoryType) {
135 .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT,
136 .heapIndex = vram_index,
137 };
138 }
139 if (gart_index >= 0 && device->rad_info.has_dedicated_vram) {
140 device->mem_type_indices[type_count] = RADV_MEM_TYPE_GTT_WRITE_COMBINE;
141 device->memory_properties.memoryTypes[type_count++] = (VkMemoryType) {
142 .propertyFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
143 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
144 .heapIndex = gart_index,
145 };
146 }
147 if (visible_vram_index >= 0) {
148 device->mem_type_indices[type_count] = RADV_MEM_TYPE_VRAM_CPU_ACCESS;
149 device->memory_properties.memoryTypes[type_count++] = (VkMemoryType) {
150 .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
151 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
152 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
153 .heapIndex = visible_vram_index,
154 };
155 }
156 if (gart_index >= 0 && !device->rad_info.has_dedicated_vram) {
157 /* Put GTT after visible VRAM for GPUs without dedicated VRAM
158 * as they have identical property flags, and according to the
159 * spec, for types with identical flags, the one with greater
160 * performance must be given a lower index. */
161 device->mem_type_indices[type_count] = RADV_MEM_TYPE_GTT_WRITE_COMBINE;
162 device->memory_properties.memoryTypes[type_count++] = (VkMemoryType) {
163 .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
164 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
165 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
166 .heapIndex = gart_index,
167 };
168 }
169 if (gart_index >= 0) {
170 device->mem_type_indices[type_count] = RADV_MEM_TYPE_GTT_CACHED;
171 device->memory_properties.memoryTypes[type_count++] = (VkMemoryType) {
172 .propertyFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
173 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT |
174 VK_MEMORY_PROPERTY_HOST_CACHED_BIT |
175 (device->rad_info.has_dedicated_vram ? 0 : VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT),
176 .heapIndex = gart_index,
177 };
178 }
179 device->memory_properties.memoryTypeCount = type_count;
180 }
181
182 static void
183 radv_handle_env_var_force_family(struct radv_physical_device *device)
184 {
185 const char *family = getenv("RADV_FORCE_FAMILY");
186 unsigned i;
187
188 if (!family)
189 return;
190
191 for (i = CHIP_TAHITI; i < CHIP_LAST; i++) {
192 if (!strcmp(family, ac_get_llvm_processor_name(i))) {
193 /* Override family and chip_class. */
194 device->rad_info.family = i;
195
196 if (i >= CHIP_NAVI10)
197 device->rad_info.chip_class = GFX10;
198 else if (i >= CHIP_VEGA10)
199 device->rad_info.chip_class = GFX9;
200 else if (i >= CHIP_TONGA)
201 device->rad_info.chip_class = GFX8;
202 else if (i >= CHIP_BONAIRE)
203 device->rad_info.chip_class = GFX7;
204 else
205 device->rad_info.chip_class = GFX6;
206
207 return;
208 }
209 }
210
211 fprintf(stderr, "radv: Unknown family: %s\n", family);
212 exit(1);
213 }
214
215 static VkResult
216 radv_physical_device_init(struct radv_physical_device *device,
217 struct radv_instance *instance,
218 drmDevicePtr drm_device)
219 {
220 const char *path = drm_device->nodes[DRM_NODE_RENDER];
221 VkResult result;
222 drmVersionPtr version;
223 int fd;
224 int master_fd = -1;
225
226 fd = open(path, O_RDWR | O_CLOEXEC);
227 if (fd < 0) {
228 if (instance->debug_flags & RADV_DEBUG_STARTUP)
229 radv_logi("Could not open device '%s'", path);
230
231 return vk_error(instance, VK_ERROR_INCOMPATIBLE_DRIVER);
232 }
233
234 version = drmGetVersion(fd);
235 if (!version) {
236 close(fd);
237
238 if (instance->debug_flags & RADV_DEBUG_STARTUP)
239 radv_logi("Could not get the kernel driver version for device '%s'", path);
240
241 return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
242 "failed to get version %s: %m", path);
243 }
244
245 if (strcmp(version->name, "amdgpu")) {
246 drmFreeVersion(version);
247 close(fd);
248
249 if (instance->debug_flags & RADV_DEBUG_STARTUP)
250 radv_logi("Device '%s' is not using the amdgpu kernel driver.", path);
251
252 return VK_ERROR_INCOMPATIBLE_DRIVER;
253 }
254 drmFreeVersion(version);
255
256 if (instance->debug_flags & RADV_DEBUG_STARTUP)
257 radv_logi("Found compatible device '%s'.", path);
258
259 device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
260 device->instance = instance;
261
262 device->ws = radv_amdgpu_winsys_create(fd, instance->debug_flags,
263 instance->perftest_flags);
264 if (!device->ws) {
265 result = vk_error(instance, VK_ERROR_INCOMPATIBLE_DRIVER);
266 goto fail;
267 }
268
269 if (instance->enabled_extensions.KHR_display) {
270 master_fd = open(drm_device->nodes[DRM_NODE_PRIMARY], O_RDWR | O_CLOEXEC);
271 if (master_fd >= 0) {
272 uint32_t accel_working = 0;
273 struct drm_amdgpu_info request = {
274 .return_pointer = (uintptr_t)&accel_working,
275 .return_size = sizeof(accel_working),
276 .query = AMDGPU_INFO_ACCEL_WORKING
277 };
278
279 if (drmCommandWrite(master_fd, DRM_AMDGPU_INFO, &request, sizeof (struct drm_amdgpu_info)) < 0 || !accel_working) {
280 close(master_fd);
281 master_fd = -1;
282 }
283 }
284 }
285
286 device->master_fd = master_fd;
287 device->local_fd = fd;
288 device->ws->query_info(device->ws, &device->rad_info);
289
290 radv_handle_env_var_force_family(device);
291
292 device->use_aco = instance->perftest_flags & RADV_PERFTEST_ACO;
293 if ((device->rad_info.chip_class < GFX8 ||
294 device->rad_info.chip_class > GFX9) && device->use_aco) {
295 fprintf(stderr, "WARNING: disabling ACO on unsupported GPUs.\n");
296 device->use_aco = false;
297 }
298
299 snprintf(device->name, sizeof(device->name),
300 "AMD RADV%s %s (LLVM " MESA_LLVM_VERSION_STRING ")", device->use_aco ? "/ACO" : "",
301 device->rad_info.name);
302
303 if (radv_device_get_cache_uuid(device->rad_info.family, device->cache_uuid)) {
304 device->ws->destroy(device->ws);
305 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
306 "cannot generate UUID");
307 goto fail;
308 }
309
310 /* These flags affect shader compilation. */
311 uint64_t shader_env_flags =
312 (device->instance->perftest_flags & RADV_PERFTEST_SISCHED ? 0x1 : 0) |
313 (device->instance->debug_flags & RADV_DEBUG_UNSAFE_MATH ? 0x2 : 0) |
314 (device->use_aco ? 0x4 : 0);
315
316 /* The gpu id is already embedded in the uuid so we just pass "radv"
317 * when creating the cache.
318 */
319 char buf[VK_UUID_SIZE * 2 + 1];
320 disk_cache_format_hex_id(buf, device->cache_uuid, VK_UUID_SIZE * 2);
321 device->disk_cache = disk_cache_create(device->name, buf, shader_env_flags);
322
323 if (device->rad_info.chip_class < GFX8 ||
324 device->rad_info.chip_class > GFX9)
325 fprintf(stderr, "WARNING: radv is not a conformant vulkan implementation, testing use only.\n");
326
327 radv_get_driver_uuid(&device->driver_uuid);
328 radv_get_device_uuid(&device->rad_info, &device->device_uuid);
329
330 device->out_of_order_rast_allowed = device->rad_info.has_out_of_order_rast &&
331 !(device->instance->debug_flags & RADV_DEBUG_NO_OUT_OF_ORDER);
332
333 device->dcc_msaa_allowed =
334 (device->instance->perftest_flags & RADV_PERFTEST_DCC_MSAA);
335
336 device->use_shader_ballot = device->rad_info.chip_class >= GFX8 &&
337 (device->use_aco || device->instance->perftest_flags & RADV_PERFTEST_SHADER_BALLOT);
338
339 device->use_ngg = device->rad_info.chip_class >= GFX10 &&
340 device->rad_info.family != CHIP_NAVI14 &&
341 !(device->instance->debug_flags & RADV_DEBUG_NO_NGG);
342
343 device->use_ngg_streamout = false;
344
345 /* Determine the number of threads per wave for all stages. */
346 device->cs_wave_size = 64;
347 device->ps_wave_size = 64;
348 device->ge_wave_size = 64;
349
350 if (device->rad_info.chip_class >= GFX10) {
351 if (device->instance->perftest_flags & RADV_PERFTEST_CS_WAVE_32)
352 device->cs_wave_size = 32;
353
354 /* For pixel shaders, wave64 is recommanded. */
355 if (device->instance->perftest_flags & RADV_PERFTEST_PS_WAVE_32)
356 device->ps_wave_size = 32;
357
358 if (device->instance->perftest_flags & RADV_PERFTEST_GE_WAVE_32)
359 device->ge_wave_size = 32;
360 }
361
362 radv_physical_device_init_mem_types(device);
363 radv_fill_device_extension_table(device, &device->supported_extensions);
364
365 device->bus_info = *drm_device->businfo.pci;
366
367 if ((device->instance->debug_flags & RADV_DEBUG_INFO))
368 ac_print_gpu_info(&device->rad_info);
369
370 /* The WSI is structured as a layer on top of the driver, so this has
371 * to be the last part of initialization (at least until we get other
372 * semi-layers).
373 */
374 result = radv_init_wsi(device);
375 if (result != VK_SUCCESS) {
376 device->ws->destroy(device->ws);
377 vk_error(instance, result);
378 goto fail;
379 }
380
381 return VK_SUCCESS;
382
383 fail:
384 close(fd);
385 if (master_fd != -1)
386 close(master_fd);
387 return result;
388 }
389
390 static void
391 radv_physical_device_finish(struct radv_physical_device *device)
392 {
393 radv_finish_wsi(device);
394 device->ws->destroy(device->ws);
395 disk_cache_destroy(device->disk_cache);
396 close(device->local_fd);
397 if (device->master_fd != -1)
398 close(device->master_fd);
399 }
400
401 static void *
402 default_alloc_func(void *pUserData, size_t size, size_t align,
403 VkSystemAllocationScope allocationScope)
404 {
405 return malloc(size);
406 }
407
408 static void *
409 default_realloc_func(void *pUserData, void *pOriginal, size_t size,
410 size_t align, VkSystemAllocationScope allocationScope)
411 {
412 return realloc(pOriginal, size);
413 }
414
415 static void
416 default_free_func(void *pUserData, void *pMemory)
417 {
418 free(pMemory);
419 }
420
421 static const VkAllocationCallbacks default_alloc = {
422 .pUserData = NULL,
423 .pfnAllocation = default_alloc_func,
424 .pfnReallocation = default_realloc_func,
425 .pfnFree = default_free_func,
426 };
427
428 static const struct debug_control radv_debug_options[] = {
429 {"nofastclears", RADV_DEBUG_NO_FAST_CLEARS},
430 {"nodcc", RADV_DEBUG_NO_DCC},
431 {"shaders", RADV_DEBUG_DUMP_SHADERS},
432 {"nocache", RADV_DEBUG_NO_CACHE},
433 {"shaderstats", RADV_DEBUG_DUMP_SHADER_STATS},
434 {"nohiz", RADV_DEBUG_NO_HIZ},
435 {"nocompute", RADV_DEBUG_NO_COMPUTE_QUEUE},
436 {"unsafemath", RADV_DEBUG_UNSAFE_MATH},
437 {"allbos", RADV_DEBUG_ALL_BOS},
438 {"noibs", RADV_DEBUG_NO_IBS},
439 {"spirv", RADV_DEBUG_DUMP_SPIRV},
440 {"vmfaults", RADV_DEBUG_VM_FAULTS},
441 {"zerovram", RADV_DEBUG_ZERO_VRAM},
442 {"syncshaders", RADV_DEBUG_SYNC_SHADERS},
443 {"nosisched", RADV_DEBUG_NO_SISCHED},
444 {"preoptir", RADV_DEBUG_PREOPTIR},
445 {"nodynamicbounds", RADV_DEBUG_NO_DYNAMIC_BOUNDS},
446 {"nooutoforder", RADV_DEBUG_NO_OUT_OF_ORDER},
447 {"info", RADV_DEBUG_INFO},
448 {"errors", RADV_DEBUG_ERRORS},
449 {"startup", RADV_DEBUG_STARTUP},
450 {"checkir", RADV_DEBUG_CHECKIR},
451 {"nothreadllvm", RADV_DEBUG_NOTHREADLLVM},
452 {"nobinning", RADV_DEBUG_NOBINNING},
453 {"noloadstoreopt", RADV_DEBUG_NO_LOAD_STORE_OPT},
454 {"nongg", RADV_DEBUG_NO_NGG},
455 {"noshaderballot", RADV_DEBUG_NO_SHADER_BALLOT},
456 {"allentrypoints", RADV_DEBUG_ALL_ENTRYPOINTS},
457 {"metashaders", RADV_DEBUG_DUMP_META_SHADERS},
458 {NULL, 0}
459 };
460
461 const char *
462 radv_get_debug_option_name(int id)
463 {
464 assert(id < ARRAY_SIZE(radv_debug_options) - 1);
465 return radv_debug_options[id].string;
466 }
467
468 static const struct debug_control radv_perftest_options[] = {
469 {"nobatchchain", RADV_PERFTEST_NO_BATCHCHAIN},
470 {"sisched", RADV_PERFTEST_SISCHED},
471 {"localbos", RADV_PERFTEST_LOCAL_BOS},
472 {"dccmsaa", RADV_PERFTEST_DCC_MSAA},
473 {"bolist", RADV_PERFTEST_BO_LIST},
474 {"shader_ballot", RADV_PERFTEST_SHADER_BALLOT},
475 {"tccompatcmask", RADV_PERFTEST_TC_COMPAT_CMASK},
476 {"cswave32", RADV_PERFTEST_CS_WAVE_32},
477 {"pswave32", RADV_PERFTEST_PS_WAVE_32},
478 {"gewave32", RADV_PERFTEST_GE_WAVE_32},
479 {"dfsm", RADV_PERFTEST_DFSM},
480 {"aco", RADV_PERFTEST_ACO},
481 {NULL, 0}
482 };
483
484 const char *
485 radv_get_perftest_option_name(int id)
486 {
487 assert(id < ARRAY_SIZE(radv_perftest_options) - 1);
488 return radv_perftest_options[id].string;
489 }
490
491 static void
492 radv_handle_per_app_options(struct radv_instance *instance,
493 const VkApplicationInfo *info)
494 {
495 const char *name = info ? info->pApplicationName : NULL;
496
497 if (!name)
498 return;
499
500 if (!strcmp(name, "Talos - Linux - 32bit") ||
501 !strcmp(name, "Talos - Linux - 64bit")) {
502 if (!(instance->debug_flags & RADV_DEBUG_NO_SISCHED)) {
503 /* Force enable LLVM sisched for Talos because it looks
504 * safe and it gives few more FPS.
505 */
506 instance->perftest_flags |= RADV_PERFTEST_SISCHED;
507 }
508 } else if (!strcmp(name, "DOOM_VFR")) {
509 /* Work around a Doom VFR game bug */
510 instance->debug_flags |= RADV_DEBUG_NO_DYNAMIC_BOUNDS;
511 } else if (!strcmp(name, "MonsterHunterWorld.exe")) {
512 /* Workaround for a WaW hazard when LLVM moves/merges
513 * load/store memory operations.
514 * See https://reviews.llvm.org/D61313
515 */
516 if (LLVM_VERSION_MAJOR < 9)
517 instance->debug_flags |= RADV_DEBUG_NO_LOAD_STORE_OPT;
518 } else if (!strcmp(name, "Wolfenstein: Youngblood")) {
519 if (!(instance->debug_flags & RADV_DEBUG_NO_SHADER_BALLOT)) {
520 /* Force enable VK_AMD_shader_ballot because it looks
521 * safe and it gives a nice boost (+20% on Vega 56 at
522 * this time).
523 */
524 instance->perftest_flags |= RADV_PERFTEST_SHADER_BALLOT;
525 }
526 } else if (!strcmp(name, "Fledge")) {
527 /*
528 * Zero VRAM for "The Surge 2"
529 *
530 * This avoid a hang when when rendering any level. Likely
531 * uninitialized data in an indirect draw.
532 */
533 instance->debug_flags |= RADV_DEBUG_ZERO_VRAM;
534 }
535 }
536
537 static int radv_get_instance_extension_index(const char *name)
538 {
539 for (unsigned i = 0; i < RADV_INSTANCE_EXTENSION_COUNT; ++i) {
540 if (strcmp(name, radv_instance_extensions[i].extensionName) == 0)
541 return i;
542 }
543 return -1;
544 }
545
546 static const char radv_dri_options_xml[] =
547 DRI_CONF_BEGIN
548 DRI_CONF_SECTION_PERFORMANCE
549 DRI_CONF_ADAPTIVE_SYNC("true")
550 DRI_CONF_VK_X11_OVERRIDE_MIN_IMAGE_COUNT(0)
551 DRI_CONF_VK_X11_STRICT_IMAGE_COUNT("false")
552 DRI_CONF_SECTION_END
553 DRI_CONF_END;
554
555 static void radv_init_dri_options(struct radv_instance *instance)
556 {
557 driParseOptionInfo(&instance->available_dri_options, radv_dri_options_xml);
558 driParseConfigFiles(&instance->dri_options,
559 &instance->available_dri_options,
560 0, "radv", NULL,
561 instance->engineName,
562 instance->engineVersion);
563 }
564
565 VkResult radv_CreateInstance(
566 const VkInstanceCreateInfo* pCreateInfo,
567 const VkAllocationCallbacks* pAllocator,
568 VkInstance* pInstance)
569 {
570 struct radv_instance *instance;
571 VkResult result;
572
573 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO);
574
575 uint32_t client_version;
576 if (pCreateInfo->pApplicationInfo &&
577 pCreateInfo->pApplicationInfo->apiVersion != 0) {
578 client_version = pCreateInfo->pApplicationInfo->apiVersion;
579 } else {
580 client_version = VK_API_VERSION_1_0;
581 }
582
583 const char *engine_name = NULL;
584 uint32_t engine_version = 0;
585 if (pCreateInfo->pApplicationInfo) {
586 engine_name = pCreateInfo->pApplicationInfo->pEngineName;
587 engine_version = pCreateInfo->pApplicationInfo->engineVersion;
588 }
589
590 instance = vk_zalloc2(&default_alloc, pAllocator, sizeof(*instance), 8,
591 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
592 if (!instance)
593 return vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
594
595 instance->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
596
597 if (pAllocator)
598 instance->alloc = *pAllocator;
599 else
600 instance->alloc = default_alloc;
601
602 instance->apiVersion = client_version;
603 instance->physicalDeviceCount = -1;
604
605 instance->debug_flags = parse_debug_string(getenv("RADV_DEBUG"),
606 radv_debug_options);
607
608 instance->perftest_flags = parse_debug_string(getenv("RADV_PERFTEST"),
609 radv_perftest_options);
610
611 if (instance->perftest_flags & RADV_PERFTEST_ACO)
612 fprintf(stderr, "WARNING: Experimental compiler backend enabled. Here be dragons! Incorrect rendering, GPU hangs and/or resets are likely\n");
613
614 if (instance->debug_flags & RADV_DEBUG_STARTUP)
615 radv_logi("Created an instance");
616
617 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
618 const char *ext_name = pCreateInfo->ppEnabledExtensionNames[i];
619 int index = radv_get_instance_extension_index(ext_name);
620
621 if (index < 0 || !radv_supported_instance_extensions.extensions[index]) {
622 vk_free2(&default_alloc, pAllocator, instance);
623 return vk_error(instance, VK_ERROR_EXTENSION_NOT_PRESENT);
624 }
625
626 instance->enabled_extensions.extensions[index] = true;
627 }
628
629 result = vk_debug_report_instance_init(&instance->debug_report_callbacks);
630 if (result != VK_SUCCESS) {
631 vk_free2(&default_alloc, pAllocator, instance);
632 return vk_error(instance, result);
633 }
634
635 instance->engineName = vk_strdup(&instance->alloc, engine_name,
636 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
637 instance->engineVersion = engine_version;
638
639 _mesa_locale_init();
640 glsl_type_singleton_init_or_ref();
641
642 VG(VALGRIND_CREATE_MEMPOOL(instance, 0, false));
643
644 radv_init_dri_options(instance);
645 radv_handle_per_app_options(instance, pCreateInfo->pApplicationInfo);
646
647 *pInstance = radv_instance_to_handle(instance);
648
649 return VK_SUCCESS;
650 }
651
652 void radv_DestroyInstance(
653 VkInstance _instance,
654 const VkAllocationCallbacks* pAllocator)
655 {
656 RADV_FROM_HANDLE(radv_instance, instance, _instance);
657
658 if (!instance)
659 return;
660
661 for (int i = 0; i < instance->physicalDeviceCount; ++i) {
662 radv_physical_device_finish(instance->physicalDevices + i);
663 }
664
665 vk_free(&instance->alloc, instance->engineName);
666
667 VG(VALGRIND_DESTROY_MEMPOOL(instance));
668
669 glsl_type_singleton_decref();
670 _mesa_locale_fini();
671
672 driDestroyOptionCache(&instance->dri_options);
673 driDestroyOptionInfo(&instance->available_dri_options);
674
675 vk_debug_report_instance_destroy(&instance->debug_report_callbacks);
676
677 vk_free(&instance->alloc, instance);
678 }
679
680 static VkResult
681 radv_enumerate_devices(struct radv_instance *instance)
682 {
683 /* TODO: Check for more devices ? */
684 drmDevicePtr devices[8];
685 VkResult result = VK_ERROR_INCOMPATIBLE_DRIVER;
686 int max_devices;
687
688 instance->physicalDeviceCount = 0;
689
690 max_devices = drmGetDevices2(0, devices, ARRAY_SIZE(devices));
691
692 if (instance->debug_flags & RADV_DEBUG_STARTUP)
693 radv_logi("Found %d drm nodes", max_devices);
694
695 if (max_devices < 1)
696 return vk_error(instance, VK_ERROR_INCOMPATIBLE_DRIVER);
697
698 for (unsigned i = 0; i < (unsigned)max_devices; i++) {
699 if (devices[i]->available_nodes & 1 << DRM_NODE_RENDER &&
700 devices[i]->bustype == DRM_BUS_PCI &&
701 devices[i]->deviceinfo.pci->vendor_id == ATI_VENDOR_ID) {
702
703 result = radv_physical_device_init(instance->physicalDevices +
704 instance->physicalDeviceCount,
705 instance,
706 devices[i]);
707 if (result == VK_SUCCESS)
708 ++instance->physicalDeviceCount;
709 else if (result != VK_ERROR_INCOMPATIBLE_DRIVER)
710 break;
711 }
712 }
713 drmFreeDevices(devices, max_devices);
714
715 return result;
716 }
717
718 VkResult radv_EnumeratePhysicalDevices(
719 VkInstance _instance,
720 uint32_t* pPhysicalDeviceCount,
721 VkPhysicalDevice* pPhysicalDevices)
722 {
723 RADV_FROM_HANDLE(radv_instance, instance, _instance);
724 VkResult result;
725
726 if (instance->physicalDeviceCount < 0) {
727 result = radv_enumerate_devices(instance);
728 if (result != VK_SUCCESS &&
729 result != VK_ERROR_INCOMPATIBLE_DRIVER)
730 return result;
731 }
732
733 if (!pPhysicalDevices) {
734 *pPhysicalDeviceCount = instance->physicalDeviceCount;
735 } else {
736 *pPhysicalDeviceCount = MIN2(*pPhysicalDeviceCount, instance->physicalDeviceCount);
737 for (unsigned i = 0; i < *pPhysicalDeviceCount; ++i)
738 pPhysicalDevices[i] = radv_physical_device_to_handle(instance->physicalDevices + i);
739 }
740
741 return *pPhysicalDeviceCount < instance->physicalDeviceCount ? VK_INCOMPLETE
742 : VK_SUCCESS;
743 }
744
745 VkResult radv_EnumeratePhysicalDeviceGroups(
746 VkInstance _instance,
747 uint32_t* pPhysicalDeviceGroupCount,
748 VkPhysicalDeviceGroupProperties* pPhysicalDeviceGroupProperties)
749 {
750 RADV_FROM_HANDLE(radv_instance, instance, _instance);
751 VkResult result;
752
753 if (instance->physicalDeviceCount < 0) {
754 result = radv_enumerate_devices(instance);
755 if (result != VK_SUCCESS &&
756 result != VK_ERROR_INCOMPATIBLE_DRIVER)
757 return result;
758 }
759
760 if (!pPhysicalDeviceGroupProperties) {
761 *pPhysicalDeviceGroupCount = instance->physicalDeviceCount;
762 } else {
763 *pPhysicalDeviceGroupCount = MIN2(*pPhysicalDeviceGroupCount, instance->physicalDeviceCount);
764 for (unsigned i = 0; i < *pPhysicalDeviceGroupCount; ++i) {
765 pPhysicalDeviceGroupProperties[i].physicalDeviceCount = 1;
766 pPhysicalDeviceGroupProperties[i].physicalDevices[0] = radv_physical_device_to_handle(instance->physicalDevices + i);
767 pPhysicalDeviceGroupProperties[i].subsetAllocation = false;
768 }
769 }
770 return *pPhysicalDeviceGroupCount < instance->physicalDeviceCount ? VK_INCOMPLETE
771 : VK_SUCCESS;
772 }
773
774 void radv_GetPhysicalDeviceFeatures(
775 VkPhysicalDevice physicalDevice,
776 VkPhysicalDeviceFeatures* pFeatures)
777 {
778 RADV_FROM_HANDLE(radv_physical_device, pdevice, physicalDevice);
779 memset(pFeatures, 0, sizeof(*pFeatures));
780
781 *pFeatures = (VkPhysicalDeviceFeatures) {
782 .robustBufferAccess = true,
783 .fullDrawIndexUint32 = true,
784 .imageCubeArray = true,
785 .independentBlend = true,
786 .geometryShader = true,
787 .tessellationShader = true,
788 .sampleRateShading = true,
789 .dualSrcBlend = true,
790 .logicOp = true,
791 .multiDrawIndirect = true,
792 .drawIndirectFirstInstance = true,
793 .depthClamp = true,
794 .depthBiasClamp = true,
795 .fillModeNonSolid = true,
796 .depthBounds = true,
797 .wideLines = true,
798 .largePoints = true,
799 .alphaToOne = true,
800 .multiViewport = true,
801 .samplerAnisotropy = true,
802 .textureCompressionETC2 = radv_device_supports_etc(pdevice),
803 .textureCompressionASTC_LDR = false,
804 .textureCompressionBC = true,
805 .occlusionQueryPrecise = true,
806 .pipelineStatisticsQuery = true,
807 .vertexPipelineStoresAndAtomics = true,
808 .fragmentStoresAndAtomics = true,
809 .shaderTessellationAndGeometryPointSize = true,
810 .shaderImageGatherExtended = true,
811 .shaderStorageImageExtendedFormats = true,
812 .shaderStorageImageMultisample = pdevice->rad_info.chip_class >= GFX8,
813 .shaderUniformBufferArrayDynamicIndexing = true,
814 .shaderSampledImageArrayDynamicIndexing = true,
815 .shaderStorageBufferArrayDynamicIndexing = true,
816 .shaderStorageImageArrayDynamicIndexing = true,
817 .shaderStorageImageReadWithoutFormat = true,
818 .shaderStorageImageWriteWithoutFormat = true,
819 .shaderClipDistance = true,
820 .shaderCullDistance = true,
821 .shaderFloat64 = true,
822 .shaderInt64 = true,
823 .shaderInt16 = pdevice->rad_info.chip_class >= GFX9 && !pdevice->use_aco,
824 .sparseBinding = true,
825 .variableMultisampleRate = true,
826 .inheritedQueries = true,
827 };
828 }
829
830 void radv_GetPhysicalDeviceFeatures2(
831 VkPhysicalDevice physicalDevice,
832 VkPhysicalDeviceFeatures2 *pFeatures)
833 {
834 RADV_FROM_HANDLE(radv_physical_device, pdevice, physicalDevice);
835 vk_foreach_struct(ext, pFeatures->pNext) {
836 switch (ext->sType) {
837 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTERS_FEATURES: {
838 VkPhysicalDeviceVariablePointersFeatures *features = (void *)ext;
839 features->variablePointersStorageBuffer = true;
840 features->variablePointers = true;
841 break;
842 }
843 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES: {
844 VkPhysicalDeviceMultiviewFeatures *features = (VkPhysicalDeviceMultiviewFeatures*)ext;
845 features->multiview = true;
846 features->multiviewGeometryShader = true;
847 features->multiviewTessellationShader = true;
848 break;
849 }
850 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETERS_FEATURES: {
851 VkPhysicalDeviceShaderDrawParametersFeatures *features =
852 (VkPhysicalDeviceShaderDrawParametersFeatures*)ext;
853 features->shaderDrawParameters = true;
854 break;
855 }
856 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES: {
857 VkPhysicalDeviceProtectedMemoryFeatures *features =
858 (VkPhysicalDeviceProtectedMemoryFeatures*)ext;
859 features->protectedMemory = false;
860 break;
861 }
862 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES: {
863 VkPhysicalDevice16BitStorageFeatures *features =
864 (VkPhysicalDevice16BitStorageFeatures*)ext;
865 bool enabled = pdevice->rad_info.chip_class >= GFX8 && !pdevice->use_aco;
866 features->storageBuffer16BitAccess = enabled;
867 features->uniformAndStorageBuffer16BitAccess = enabled;
868 features->storagePushConstant16 = enabled;
869 features->storageInputOutput16 = enabled && LLVM_VERSION_MAJOR >= 9;
870 break;
871 }
872 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES: {
873 VkPhysicalDeviceSamplerYcbcrConversionFeatures *features =
874 (VkPhysicalDeviceSamplerYcbcrConversionFeatures*)ext;
875 features->samplerYcbcrConversion = true;
876 break;
877 }
878 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES_EXT: {
879 VkPhysicalDeviceDescriptorIndexingFeaturesEXT *features =
880 (VkPhysicalDeviceDescriptorIndexingFeaturesEXT*)ext;
881 features->shaderInputAttachmentArrayDynamicIndexing = true;
882 features->shaderUniformTexelBufferArrayDynamicIndexing = true;
883 features->shaderStorageTexelBufferArrayDynamicIndexing = true;
884 features->shaderUniformBufferArrayNonUniformIndexing = true;
885 features->shaderSampledImageArrayNonUniformIndexing = true;
886 features->shaderStorageBufferArrayNonUniformIndexing = true;
887 features->shaderStorageImageArrayNonUniformIndexing = true;
888 features->shaderInputAttachmentArrayNonUniformIndexing = true;
889 features->shaderUniformTexelBufferArrayNonUniformIndexing = true;
890 features->shaderStorageTexelBufferArrayNonUniformIndexing = true;
891 features->descriptorBindingUniformBufferUpdateAfterBind = true;
892 features->descriptorBindingSampledImageUpdateAfterBind = true;
893 features->descriptorBindingStorageImageUpdateAfterBind = true;
894 features->descriptorBindingStorageBufferUpdateAfterBind = true;
895 features->descriptorBindingUniformTexelBufferUpdateAfterBind = true;
896 features->descriptorBindingStorageTexelBufferUpdateAfterBind = true;
897 features->descriptorBindingUpdateUnusedWhilePending = true;
898 features->descriptorBindingPartiallyBound = true;
899 features->descriptorBindingVariableDescriptorCount = true;
900 features->runtimeDescriptorArray = true;
901 break;
902 }
903 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONDITIONAL_RENDERING_FEATURES_EXT: {
904 VkPhysicalDeviceConditionalRenderingFeaturesEXT *features =
905 (VkPhysicalDeviceConditionalRenderingFeaturesEXT*)ext;
906 features->conditionalRendering = true;
907 features->inheritedConditionalRendering = false;
908 break;
909 }
910 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_FEATURES_EXT: {
911 VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT *features =
912 (VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT *)ext;
913 features->vertexAttributeInstanceRateDivisor = VK_TRUE;
914 features->vertexAttributeInstanceRateZeroDivisor = VK_TRUE;
915 break;
916 }
917 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_FEATURES_EXT: {
918 VkPhysicalDeviceTransformFeedbackFeaturesEXT *features =
919 (VkPhysicalDeviceTransformFeedbackFeaturesEXT*)ext;
920 features->transformFeedback = true;
921 features->geometryStreams = !pdevice->use_ngg_streamout;
922 break;
923 }
924 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SCALAR_BLOCK_LAYOUT_FEATURES_EXT: {
925 VkPhysicalDeviceScalarBlockLayoutFeaturesEXT *features =
926 (VkPhysicalDeviceScalarBlockLayoutFeaturesEXT *)ext;
927 features->scalarBlockLayout = pdevice->rad_info.chip_class >= GFX7;
928 break;
929 }
930 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PRIORITY_FEATURES_EXT: {
931 VkPhysicalDeviceMemoryPriorityFeaturesEXT *features =
932 (VkPhysicalDeviceMemoryPriorityFeaturesEXT *)ext;
933 features->memoryPriority = VK_TRUE;
934 break;
935 }
936 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES_EXT: {
937 VkPhysicalDeviceBufferDeviceAddressFeaturesEXT *features =
938 (VkPhysicalDeviceBufferDeviceAddressFeaturesEXT *)ext;
939 features->bufferDeviceAddress = true;
940 features->bufferDeviceAddressCaptureReplay = false;
941 features->bufferDeviceAddressMultiDevice = false;
942 break;
943 }
944 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_CLIP_ENABLE_FEATURES_EXT: {
945 VkPhysicalDeviceDepthClipEnableFeaturesEXT *features =
946 (VkPhysicalDeviceDepthClipEnableFeaturesEXT *)ext;
947 features->depthClipEnable = true;
948 break;
949 }
950 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_HOST_QUERY_RESET_FEATURES_EXT: {
951 VkPhysicalDeviceHostQueryResetFeaturesEXT *features =
952 (VkPhysicalDeviceHostQueryResetFeaturesEXT *)ext;
953 features->hostQueryReset = true;
954 break;
955 }
956 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_8BIT_STORAGE_FEATURES_KHR: {
957 VkPhysicalDevice8BitStorageFeaturesKHR *features =
958 (VkPhysicalDevice8BitStorageFeaturesKHR*)ext;
959 bool enabled = pdevice->rad_info.chip_class >= GFX8 && !pdevice->use_aco;
960 features->storageBuffer8BitAccess = enabled;
961 features->uniformAndStorageBuffer8BitAccess = enabled;
962 features->storagePushConstant8 = enabled;
963 break;
964 }
965 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FLOAT16_INT8_FEATURES_KHR: {
966 VkPhysicalDeviceFloat16Int8FeaturesKHR *features =
967 (VkPhysicalDeviceFloat16Int8FeaturesKHR*)ext;
968 features->shaderFloat16 = pdevice->rad_info.chip_class >= GFX8 && !pdevice->use_aco;
969 features->shaderInt8 = !pdevice->use_aco;
970 break;
971 }
972 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_INT64_FEATURES_KHR: {
973 VkPhysicalDeviceShaderAtomicInt64FeaturesKHR *features =
974 (VkPhysicalDeviceShaderAtomicInt64FeaturesKHR *)ext;
975 features->shaderBufferInt64Atomics = LLVM_VERSION_MAJOR >= 9;
976 features->shaderSharedInt64Atomics = LLVM_VERSION_MAJOR >= 9;
977 break;
978 }
979 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DEMOTE_TO_HELPER_INVOCATION_FEATURES_EXT: {
980 VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT *features =
981 (VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT *)ext;
982 features->shaderDemoteToHelperInvocation = pdevice->use_aco;
983 break;
984 }
985 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_FEATURES_EXT: {
986 VkPhysicalDeviceInlineUniformBlockFeaturesEXT *features =
987 (VkPhysicalDeviceInlineUniformBlockFeaturesEXT *)ext;
988
989 features->inlineUniformBlock = true;
990 features->descriptorBindingInlineUniformBlockUpdateAfterBind = true;
991 break;
992 }
993 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COMPUTE_SHADER_DERIVATIVES_FEATURES_NV: {
994 VkPhysicalDeviceComputeShaderDerivativesFeaturesNV *features =
995 (VkPhysicalDeviceComputeShaderDerivativesFeaturesNV *)ext;
996 features->computeDerivativeGroupQuads = false;
997 features->computeDerivativeGroupLinear = true;
998 break;
999 }
1000 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_YCBCR_IMAGE_ARRAYS_FEATURES_EXT: {
1001 VkPhysicalDeviceYcbcrImageArraysFeaturesEXT *features =
1002 (VkPhysicalDeviceYcbcrImageArraysFeaturesEXT*)ext;
1003 features->ycbcrImageArrays = true;
1004 break;
1005 }
1006 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_UNIFORM_BUFFER_STANDARD_LAYOUT_FEATURES_KHR: {
1007 VkPhysicalDeviceUniformBufferStandardLayoutFeaturesKHR *features =
1008 (VkPhysicalDeviceUniformBufferStandardLayoutFeaturesKHR *)ext;
1009 features->uniformBufferStandardLayout = true;
1010 break;
1011 }
1012 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INDEX_TYPE_UINT8_FEATURES_EXT: {
1013 VkPhysicalDeviceIndexTypeUint8FeaturesEXT *features =
1014 (VkPhysicalDeviceIndexTypeUint8FeaturesEXT *)ext;
1015 features->indexTypeUint8 = pdevice->rad_info.chip_class >= GFX8;
1016 break;
1017 }
1018 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGELESS_FRAMEBUFFER_FEATURES_KHR: {
1019 VkPhysicalDeviceImagelessFramebufferFeaturesKHR *features =
1020 (VkPhysicalDeviceImagelessFramebufferFeaturesKHR *)ext;
1021 features->imagelessFramebuffer = true;
1022 break;
1023 }
1024 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_EXECUTABLE_PROPERTIES_FEATURES_KHR: {
1025 VkPhysicalDevicePipelineExecutablePropertiesFeaturesKHR *features =
1026 (VkPhysicalDevicePipelineExecutablePropertiesFeaturesKHR *)ext;
1027 features->pipelineExecutableInfo = true;
1028 break;
1029 }
1030 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CLOCK_FEATURES_KHR: {
1031 VkPhysicalDeviceShaderClockFeaturesKHR *features =
1032 (VkPhysicalDeviceShaderClockFeaturesKHR *)ext;
1033 features->shaderSubgroupClock = true;
1034 features->shaderDeviceClock = false;
1035 break;
1036 }
1037 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_FEATURES_EXT: {
1038 VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT *features =
1039 (VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT *)ext;
1040 features->texelBufferAlignment = true;
1041 break;
1042 }
1043 default:
1044 break;
1045 }
1046 }
1047 return radv_GetPhysicalDeviceFeatures(physicalDevice, &pFeatures->features);
1048 }
1049
1050 void radv_GetPhysicalDeviceProperties(
1051 VkPhysicalDevice physicalDevice,
1052 VkPhysicalDeviceProperties* pProperties)
1053 {
1054 RADV_FROM_HANDLE(radv_physical_device, pdevice, physicalDevice);
1055 VkSampleCountFlags sample_counts = 0xf;
1056
1057 /* make sure that the entire descriptor set is addressable with a signed
1058 * 32-bit int. So the sum of all limits scaled by descriptor size has to
1059 * be at most 2 GiB. the combined image & samples object count as one of
1060 * both. This limit is for the pipeline layout, not for the set layout, but
1061 * there is no set limit, so we just set a pipeline limit. I don't think
1062 * any app is going to hit this soon. */
1063 size_t max_descriptor_set_size = ((1ull << 31) - 16 * MAX_DYNAMIC_BUFFERS) /
1064 (32 /* uniform buffer, 32 due to potential space wasted on alignment */ +
1065 32 /* storage buffer, 32 due to potential space wasted on alignment */ +
1066 32 /* sampler, largest when combined with image */ +
1067 64 /* sampled image */ +
1068 64 /* storage image */);
1069
1070 VkPhysicalDeviceLimits limits = {
1071 .maxImageDimension1D = (1 << 14),
1072 .maxImageDimension2D = (1 << 14),
1073 .maxImageDimension3D = (1 << 11),
1074 .maxImageDimensionCube = (1 << 14),
1075 .maxImageArrayLayers = (1 << 11),
1076 .maxTexelBufferElements = 128 * 1024 * 1024,
1077 .maxUniformBufferRange = UINT32_MAX,
1078 .maxStorageBufferRange = UINT32_MAX,
1079 .maxPushConstantsSize = MAX_PUSH_CONSTANTS_SIZE,
1080 .maxMemoryAllocationCount = UINT32_MAX,
1081 .maxSamplerAllocationCount = 64 * 1024,
1082 .bufferImageGranularity = 64, /* A cache line */
1083 .sparseAddressSpaceSize = 0xffffffffu, /* buffer max size */
1084 .maxBoundDescriptorSets = MAX_SETS,
1085 .maxPerStageDescriptorSamplers = max_descriptor_set_size,
1086 .maxPerStageDescriptorUniformBuffers = max_descriptor_set_size,
1087 .maxPerStageDescriptorStorageBuffers = max_descriptor_set_size,
1088 .maxPerStageDescriptorSampledImages = max_descriptor_set_size,
1089 .maxPerStageDescriptorStorageImages = max_descriptor_set_size,
1090 .maxPerStageDescriptorInputAttachments = max_descriptor_set_size,
1091 .maxPerStageResources = max_descriptor_set_size,
1092 .maxDescriptorSetSamplers = max_descriptor_set_size,
1093 .maxDescriptorSetUniformBuffers = max_descriptor_set_size,
1094 .maxDescriptorSetUniformBuffersDynamic = MAX_DYNAMIC_UNIFORM_BUFFERS,
1095 .maxDescriptorSetStorageBuffers = max_descriptor_set_size,
1096 .maxDescriptorSetStorageBuffersDynamic = MAX_DYNAMIC_STORAGE_BUFFERS,
1097 .maxDescriptorSetSampledImages = max_descriptor_set_size,
1098 .maxDescriptorSetStorageImages = max_descriptor_set_size,
1099 .maxDescriptorSetInputAttachments = max_descriptor_set_size,
1100 .maxVertexInputAttributes = MAX_VERTEX_ATTRIBS,
1101 .maxVertexInputBindings = MAX_VBS,
1102 .maxVertexInputAttributeOffset = 2047,
1103 .maxVertexInputBindingStride = 2048,
1104 .maxVertexOutputComponents = 128,
1105 .maxTessellationGenerationLevel = 64,
1106 .maxTessellationPatchSize = 32,
1107 .maxTessellationControlPerVertexInputComponents = 128,
1108 .maxTessellationControlPerVertexOutputComponents = 128,
1109 .maxTessellationControlPerPatchOutputComponents = 120,
1110 .maxTessellationControlTotalOutputComponents = 4096,
1111 .maxTessellationEvaluationInputComponents = 128,
1112 .maxTessellationEvaluationOutputComponents = 128,
1113 .maxGeometryShaderInvocations = 127,
1114 .maxGeometryInputComponents = 64,
1115 .maxGeometryOutputComponents = 128,
1116 .maxGeometryOutputVertices = 256,
1117 .maxGeometryTotalOutputComponents = 1024,
1118 .maxFragmentInputComponents = 128,
1119 .maxFragmentOutputAttachments = 8,
1120 .maxFragmentDualSrcAttachments = 1,
1121 .maxFragmentCombinedOutputResources = 8,
1122 .maxComputeSharedMemorySize = 32768,
1123 .maxComputeWorkGroupCount = { 65535, 65535, 65535 },
1124 .maxComputeWorkGroupInvocations = 2048,
1125 .maxComputeWorkGroupSize = {
1126 2048,
1127 2048,
1128 2048
1129 },
1130 .subPixelPrecisionBits = 8,
1131 .subTexelPrecisionBits = 8,
1132 .mipmapPrecisionBits = 8,
1133 .maxDrawIndexedIndexValue = UINT32_MAX,
1134 .maxDrawIndirectCount = UINT32_MAX,
1135 .maxSamplerLodBias = 16,
1136 .maxSamplerAnisotropy = 16,
1137 .maxViewports = MAX_VIEWPORTS,
1138 .maxViewportDimensions = { (1 << 14), (1 << 14) },
1139 .viewportBoundsRange = { INT16_MIN, INT16_MAX },
1140 .viewportSubPixelBits = 8,
1141 .minMemoryMapAlignment = 4096, /* A page */
1142 .minTexelBufferOffsetAlignment = 4,
1143 .minUniformBufferOffsetAlignment = 4,
1144 .minStorageBufferOffsetAlignment = 4,
1145 .minTexelOffset = -32,
1146 .maxTexelOffset = 31,
1147 .minTexelGatherOffset = -32,
1148 .maxTexelGatherOffset = 31,
1149 .minInterpolationOffset = -2,
1150 .maxInterpolationOffset = 2,
1151 .subPixelInterpolationOffsetBits = 8,
1152 .maxFramebufferWidth = (1 << 14),
1153 .maxFramebufferHeight = (1 << 14),
1154 .maxFramebufferLayers = (1 << 10),
1155 .framebufferColorSampleCounts = sample_counts,
1156 .framebufferDepthSampleCounts = sample_counts,
1157 .framebufferStencilSampleCounts = sample_counts,
1158 .framebufferNoAttachmentsSampleCounts = sample_counts,
1159 .maxColorAttachments = MAX_RTS,
1160 .sampledImageColorSampleCounts = sample_counts,
1161 .sampledImageIntegerSampleCounts = VK_SAMPLE_COUNT_1_BIT,
1162 .sampledImageDepthSampleCounts = sample_counts,
1163 .sampledImageStencilSampleCounts = sample_counts,
1164 .storageImageSampleCounts = pdevice->rad_info.chip_class >= GFX8 ? sample_counts : VK_SAMPLE_COUNT_1_BIT,
1165 .maxSampleMaskWords = 1,
1166 .timestampComputeAndGraphics = true,
1167 .timestampPeriod = 1000000.0 / pdevice->rad_info.clock_crystal_freq,
1168 .maxClipDistances = 8,
1169 .maxCullDistances = 8,
1170 .maxCombinedClipAndCullDistances = 8,
1171 .discreteQueuePriorities = 2,
1172 .pointSizeRange = { 0.0, 8192.0 },
1173 .lineWidthRange = { 0.0, 7.9921875 },
1174 .pointSizeGranularity = (1.0 / 8.0),
1175 .lineWidthGranularity = (1.0 / 128.0),
1176 .strictLines = false, /* FINISHME */
1177 .standardSampleLocations = true,
1178 .optimalBufferCopyOffsetAlignment = 128,
1179 .optimalBufferCopyRowPitchAlignment = 128,
1180 .nonCoherentAtomSize = 64,
1181 };
1182
1183 *pProperties = (VkPhysicalDeviceProperties) {
1184 .apiVersion = radv_physical_device_api_version(pdevice),
1185 .driverVersion = vk_get_driver_version(),
1186 .vendorID = ATI_VENDOR_ID,
1187 .deviceID = pdevice->rad_info.pci_id,
1188 .deviceType = pdevice->rad_info.has_dedicated_vram ? VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU : VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU,
1189 .limits = limits,
1190 .sparseProperties = {0},
1191 };
1192
1193 strcpy(pProperties->deviceName, pdevice->name);
1194 memcpy(pProperties->pipelineCacheUUID, pdevice->cache_uuid, VK_UUID_SIZE);
1195 }
1196
1197 void radv_GetPhysicalDeviceProperties2(
1198 VkPhysicalDevice physicalDevice,
1199 VkPhysicalDeviceProperties2 *pProperties)
1200 {
1201 RADV_FROM_HANDLE(radv_physical_device, pdevice, physicalDevice);
1202 radv_GetPhysicalDeviceProperties(physicalDevice, &pProperties->properties);
1203
1204 vk_foreach_struct(ext, pProperties->pNext) {
1205 switch (ext->sType) {
1206 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR: {
1207 VkPhysicalDevicePushDescriptorPropertiesKHR *properties =
1208 (VkPhysicalDevicePushDescriptorPropertiesKHR *) ext;
1209 properties->maxPushDescriptors = MAX_PUSH_DESCRIPTORS;
1210 break;
1211 }
1212 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES: {
1213 VkPhysicalDeviceIDProperties *properties = (VkPhysicalDeviceIDProperties*)ext;
1214 memcpy(properties->driverUUID, pdevice->driver_uuid, VK_UUID_SIZE);
1215 memcpy(properties->deviceUUID, pdevice->device_uuid, VK_UUID_SIZE);
1216 properties->deviceLUIDValid = false;
1217 break;
1218 }
1219 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES: {
1220 VkPhysicalDeviceMultiviewProperties *properties = (VkPhysicalDeviceMultiviewProperties*)ext;
1221 properties->maxMultiviewViewCount = MAX_VIEWS;
1222 properties->maxMultiviewInstanceIndex = INT_MAX;
1223 break;
1224 }
1225 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES: {
1226 VkPhysicalDevicePointClippingProperties *properties =
1227 (VkPhysicalDevicePointClippingProperties*)ext;
1228 properties->pointClippingBehavior = VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES;
1229 break;
1230 }
1231 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DISCARD_RECTANGLE_PROPERTIES_EXT: {
1232 VkPhysicalDeviceDiscardRectanglePropertiesEXT *properties =
1233 (VkPhysicalDeviceDiscardRectanglePropertiesEXT*)ext;
1234 properties->maxDiscardRectangles = MAX_DISCARD_RECTANGLES;
1235 break;
1236 }
1237 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_MEMORY_HOST_PROPERTIES_EXT: {
1238 VkPhysicalDeviceExternalMemoryHostPropertiesEXT *properties =
1239 (VkPhysicalDeviceExternalMemoryHostPropertiesEXT *) ext;
1240 properties->minImportedHostPointerAlignment = 4096;
1241 break;
1242 }
1243 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_PROPERTIES: {
1244 VkPhysicalDeviceSubgroupProperties *properties =
1245 (VkPhysicalDeviceSubgroupProperties*)ext;
1246 properties->subgroupSize = 64;
1247 properties->supportedStages = VK_SHADER_STAGE_ALL;
1248 properties->supportedOperations =
1249 VK_SUBGROUP_FEATURE_BASIC_BIT |
1250 VK_SUBGROUP_FEATURE_BALLOT_BIT |
1251 VK_SUBGROUP_FEATURE_QUAD_BIT |
1252 VK_SUBGROUP_FEATURE_VOTE_BIT;
1253 if (pdevice->rad_info.chip_class >= GFX8) {
1254 properties->supportedOperations |=
1255 VK_SUBGROUP_FEATURE_ARITHMETIC_BIT |
1256 VK_SUBGROUP_FEATURE_CLUSTERED_BIT |
1257 VK_SUBGROUP_FEATURE_SHUFFLE_BIT |
1258 VK_SUBGROUP_FEATURE_SHUFFLE_RELATIVE_BIT;
1259 }
1260 properties->quadOperationsInAllStages = true;
1261 break;
1262 }
1263 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES: {
1264 VkPhysicalDeviceMaintenance3Properties *properties =
1265 (VkPhysicalDeviceMaintenance3Properties*)ext;
1266 /* Make sure everything is addressable by a signed 32-bit int, and
1267 * our largest descriptors are 96 bytes. */
1268 properties->maxPerSetDescriptors = (1ull << 31) / 96;
1269 /* Our buffer size fields allow only this much */
1270 properties->maxMemoryAllocationSize = 0xFFFFFFFFull;
1271 break;
1272 }
1273 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_FILTER_MINMAX_PROPERTIES_EXT: {
1274 VkPhysicalDeviceSamplerFilterMinmaxPropertiesEXT *properties =
1275 (VkPhysicalDeviceSamplerFilterMinmaxPropertiesEXT *)ext;
1276 /* GFX6-8 only support single channel min/max filter. */
1277 properties->filterMinmaxImageComponentMapping = pdevice->rad_info.chip_class >= GFX9;
1278 properties->filterMinmaxSingleComponentFormats = true;
1279 break;
1280 }
1281 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CORE_PROPERTIES_AMD: {
1282 VkPhysicalDeviceShaderCorePropertiesAMD *properties =
1283 (VkPhysicalDeviceShaderCorePropertiesAMD *)ext;
1284
1285 /* Shader engines. */
1286 properties->shaderEngineCount =
1287 pdevice->rad_info.max_se;
1288 properties->shaderArraysPerEngineCount =
1289 pdevice->rad_info.max_sh_per_se;
1290 properties->computeUnitsPerShaderArray =
1291 pdevice->rad_info.num_good_cu_per_sh;
1292 properties->simdPerComputeUnit = 4;
1293 properties->wavefrontsPerSimd =
1294 pdevice->rad_info.family == CHIP_TONGA ||
1295 pdevice->rad_info.family == CHIP_ICELAND ||
1296 pdevice->rad_info.family == CHIP_POLARIS10 ||
1297 pdevice->rad_info.family == CHIP_POLARIS11 ||
1298 pdevice->rad_info.family == CHIP_POLARIS12 ||
1299 pdevice->rad_info.family == CHIP_VEGAM ? 8 : 10;
1300 properties->wavefrontSize = 64;
1301
1302 /* SGPR. */
1303 properties->sgprsPerSimd =
1304 pdevice->rad_info.num_physical_sgprs_per_simd;
1305 properties->minSgprAllocation =
1306 pdevice->rad_info.chip_class >= GFX8 ? 16 : 8;
1307 properties->maxSgprAllocation =
1308 pdevice->rad_info.family == CHIP_TONGA ||
1309 pdevice->rad_info.family == CHIP_ICELAND ? 96 : 104;
1310 properties->sgprAllocationGranularity =
1311 pdevice->rad_info.chip_class >= GFX8 ? 16 : 8;
1312
1313 /* VGPR. */
1314 properties->vgprsPerSimd = RADV_NUM_PHYSICAL_VGPRS;
1315 properties->minVgprAllocation = 4;
1316 properties->maxVgprAllocation = 256;
1317 properties->vgprAllocationGranularity = 4;
1318 break;
1319 }
1320 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CORE_PROPERTIES_2_AMD: {
1321 VkPhysicalDeviceShaderCoreProperties2AMD *properties =
1322 (VkPhysicalDeviceShaderCoreProperties2AMD *)ext;
1323
1324 properties->shaderCoreFeatures = 0;
1325 properties->activeComputeUnitCount =
1326 pdevice->rad_info.num_good_compute_units;
1327 break;
1328 }
1329 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_PROPERTIES_EXT: {
1330 VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT *properties =
1331 (VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT *)ext;
1332 properties->maxVertexAttribDivisor = UINT32_MAX;
1333 break;
1334 }
1335 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_PROPERTIES_EXT: {
1336 VkPhysicalDeviceDescriptorIndexingPropertiesEXT *properties =
1337 (VkPhysicalDeviceDescriptorIndexingPropertiesEXT*)ext;
1338 properties->maxUpdateAfterBindDescriptorsInAllPools = UINT32_MAX / 64;
1339 properties->shaderUniformBufferArrayNonUniformIndexingNative = false;
1340 properties->shaderSampledImageArrayNonUniformIndexingNative = false;
1341 properties->shaderStorageBufferArrayNonUniformIndexingNative = false;
1342 properties->shaderStorageImageArrayNonUniformIndexingNative = false;
1343 properties->shaderInputAttachmentArrayNonUniformIndexingNative = false;
1344 properties->robustBufferAccessUpdateAfterBind = false;
1345 properties->quadDivergentImplicitLod = false;
1346
1347 size_t max_descriptor_set_size = ((1ull << 31) - 16 * MAX_DYNAMIC_BUFFERS -
1348 MAX_INLINE_UNIFORM_BLOCK_SIZE * MAX_INLINE_UNIFORM_BLOCK_COUNT) /
1349 (32 /* uniform buffer, 32 due to potential space wasted on alignment */ +
1350 32 /* storage buffer, 32 due to potential space wasted on alignment */ +
1351 32 /* sampler, largest when combined with image */ +
1352 64 /* sampled image */ +
1353 64 /* storage image */);
1354 properties->maxPerStageDescriptorUpdateAfterBindSamplers = max_descriptor_set_size;
1355 properties->maxPerStageDescriptorUpdateAfterBindUniformBuffers = max_descriptor_set_size;
1356 properties->maxPerStageDescriptorUpdateAfterBindStorageBuffers = max_descriptor_set_size;
1357 properties->maxPerStageDescriptorUpdateAfterBindSampledImages = max_descriptor_set_size;
1358 properties->maxPerStageDescriptorUpdateAfterBindStorageImages = max_descriptor_set_size;
1359 properties->maxPerStageDescriptorUpdateAfterBindInputAttachments = max_descriptor_set_size;
1360 properties->maxPerStageUpdateAfterBindResources = max_descriptor_set_size;
1361 properties->maxDescriptorSetUpdateAfterBindSamplers = max_descriptor_set_size;
1362 properties->maxDescriptorSetUpdateAfterBindUniformBuffers = max_descriptor_set_size;
1363 properties->maxDescriptorSetUpdateAfterBindUniformBuffersDynamic = MAX_DYNAMIC_UNIFORM_BUFFERS;
1364 properties->maxDescriptorSetUpdateAfterBindStorageBuffers = max_descriptor_set_size;
1365 properties->maxDescriptorSetUpdateAfterBindStorageBuffersDynamic = MAX_DYNAMIC_STORAGE_BUFFERS;
1366 properties->maxDescriptorSetUpdateAfterBindSampledImages = max_descriptor_set_size;
1367 properties->maxDescriptorSetUpdateAfterBindStorageImages = max_descriptor_set_size;
1368 properties->maxDescriptorSetUpdateAfterBindInputAttachments = max_descriptor_set_size;
1369 break;
1370 }
1371 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_PROPERTIES: {
1372 VkPhysicalDeviceProtectedMemoryProperties *properties =
1373 (VkPhysicalDeviceProtectedMemoryProperties *)ext;
1374 properties->protectedNoFault = false;
1375 break;
1376 }
1377 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONSERVATIVE_RASTERIZATION_PROPERTIES_EXT: {
1378 VkPhysicalDeviceConservativeRasterizationPropertiesEXT *properties =
1379 (VkPhysicalDeviceConservativeRasterizationPropertiesEXT *)ext;
1380 properties->primitiveOverestimationSize = 0;
1381 properties->maxExtraPrimitiveOverestimationSize = 0;
1382 properties->extraPrimitiveOverestimationSizeGranularity = 0;
1383 properties->primitiveUnderestimation = VK_FALSE;
1384 properties->conservativePointAndLineRasterization = VK_FALSE;
1385 properties->degenerateTrianglesRasterized = VK_FALSE;
1386 properties->degenerateLinesRasterized = VK_FALSE;
1387 properties->fullyCoveredFragmentShaderInputVariable = VK_FALSE;
1388 properties->conservativeRasterizationPostDepthCoverage = VK_FALSE;
1389 break;
1390 }
1391 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PCI_BUS_INFO_PROPERTIES_EXT: {
1392 VkPhysicalDevicePCIBusInfoPropertiesEXT *properties =
1393 (VkPhysicalDevicePCIBusInfoPropertiesEXT *)ext;
1394 properties->pciDomain = pdevice->bus_info.domain;
1395 properties->pciBus = pdevice->bus_info.bus;
1396 properties->pciDevice = pdevice->bus_info.dev;
1397 properties->pciFunction = pdevice->bus_info.func;
1398 break;
1399 }
1400 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DRIVER_PROPERTIES_KHR: {
1401 VkPhysicalDeviceDriverPropertiesKHR *driver_props =
1402 (VkPhysicalDeviceDriverPropertiesKHR *) ext;
1403
1404 driver_props->driverID = VK_DRIVER_ID_MESA_RADV_KHR;
1405 snprintf(driver_props->driverName, VK_MAX_DRIVER_NAME_SIZE_KHR, "radv");
1406 snprintf(driver_props->driverInfo, VK_MAX_DRIVER_INFO_SIZE_KHR,
1407 "Mesa " PACKAGE_VERSION MESA_GIT_SHA1
1408 " (LLVM " MESA_LLVM_VERSION_STRING ")");
1409
1410 driver_props->conformanceVersion = (VkConformanceVersionKHR) {
1411 .major = 1,
1412 .minor = 1,
1413 .subminor = 2,
1414 .patch = 0,
1415 };
1416 break;
1417 }
1418 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_PROPERTIES_EXT: {
1419 VkPhysicalDeviceTransformFeedbackPropertiesEXT *properties =
1420 (VkPhysicalDeviceTransformFeedbackPropertiesEXT *)ext;
1421 properties->maxTransformFeedbackStreams = MAX_SO_STREAMS;
1422 properties->maxTransformFeedbackBuffers = MAX_SO_BUFFERS;
1423 properties->maxTransformFeedbackBufferSize = UINT32_MAX;
1424 properties->maxTransformFeedbackStreamDataSize = 512;
1425 properties->maxTransformFeedbackBufferDataSize = UINT32_MAX;
1426 properties->maxTransformFeedbackBufferDataStride = 512;
1427 properties->transformFeedbackQueries = !pdevice->use_ngg_streamout;
1428 properties->transformFeedbackStreamsLinesTriangles = !pdevice->use_ngg_streamout;
1429 properties->transformFeedbackRasterizationStreamSelect = false;
1430 properties->transformFeedbackDraw = true;
1431 break;
1432 }
1433 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_PROPERTIES_EXT: {
1434 VkPhysicalDeviceInlineUniformBlockPropertiesEXT *props =
1435 (VkPhysicalDeviceInlineUniformBlockPropertiesEXT *)ext;
1436
1437 props->maxInlineUniformBlockSize = MAX_INLINE_UNIFORM_BLOCK_SIZE;
1438 props->maxPerStageDescriptorInlineUniformBlocks = MAX_INLINE_UNIFORM_BLOCK_SIZE * MAX_SETS;
1439 props->maxPerStageDescriptorUpdateAfterBindInlineUniformBlocks = MAX_INLINE_UNIFORM_BLOCK_SIZE * MAX_SETS;
1440 props->maxDescriptorSetInlineUniformBlocks = MAX_INLINE_UNIFORM_BLOCK_COUNT;
1441 props->maxDescriptorSetUpdateAfterBindInlineUniformBlocks = MAX_INLINE_UNIFORM_BLOCK_COUNT;
1442 break;
1443 }
1444 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLE_LOCATIONS_PROPERTIES_EXT: {
1445 VkPhysicalDeviceSampleLocationsPropertiesEXT *properties =
1446 (VkPhysicalDeviceSampleLocationsPropertiesEXT *)ext;
1447 properties->sampleLocationSampleCounts = VK_SAMPLE_COUNT_2_BIT |
1448 VK_SAMPLE_COUNT_4_BIT |
1449 VK_SAMPLE_COUNT_8_BIT;
1450 properties->maxSampleLocationGridSize = (VkExtent2D){ 2 , 2 };
1451 properties->sampleLocationCoordinateRange[0] = 0.0f;
1452 properties->sampleLocationCoordinateRange[1] = 0.9375f;
1453 properties->sampleLocationSubPixelBits = 4;
1454 properties->variableSampleLocations = VK_FALSE;
1455 break;
1456 }
1457 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_STENCIL_RESOLVE_PROPERTIES_KHR: {
1458 VkPhysicalDeviceDepthStencilResolvePropertiesKHR *properties =
1459 (VkPhysicalDeviceDepthStencilResolvePropertiesKHR *)ext;
1460
1461 /* We support all of the depth resolve modes */
1462 properties->supportedDepthResolveModes =
1463 VK_RESOLVE_MODE_SAMPLE_ZERO_BIT_KHR |
1464 VK_RESOLVE_MODE_AVERAGE_BIT_KHR |
1465 VK_RESOLVE_MODE_MIN_BIT_KHR |
1466 VK_RESOLVE_MODE_MAX_BIT_KHR;
1467
1468 /* Average doesn't make sense for stencil so we don't support that */
1469 properties->supportedStencilResolveModes =
1470 VK_RESOLVE_MODE_SAMPLE_ZERO_BIT_KHR |
1471 VK_RESOLVE_MODE_MIN_BIT_KHR |
1472 VK_RESOLVE_MODE_MAX_BIT_KHR;
1473
1474 properties->independentResolveNone = VK_TRUE;
1475 properties->independentResolve = VK_TRUE;
1476 break;
1477 }
1478 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_PROPERTIES_EXT: {
1479 VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT *properties =
1480 (VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT *)ext;
1481 properties->storageTexelBufferOffsetAlignmentBytes = 4;
1482 properties->storageTexelBufferOffsetSingleTexelAlignment = true;
1483 properties->uniformTexelBufferOffsetAlignmentBytes = 4;
1484 properties->uniformTexelBufferOffsetSingleTexelAlignment = true;
1485 break;
1486 }
1487 default:
1488 break;
1489 }
1490 }
1491 }
1492
1493 static void radv_get_physical_device_queue_family_properties(
1494 struct radv_physical_device* pdevice,
1495 uint32_t* pCount,
1496 VkQueueFamilyProperties** pQueueFamilyProperties)
1497 {
1498 int num_queue_families = 1;
1499 int idx;
1500 if (pdevice->rad_info.num_compute_rings > 0 &&
1501 !(pdevice->instance->debug_flags & RADV_DEBUG_NO_COMPUTE_QUEUE))
1502 num_queue_families++;
1503
1504 if (pQueueFamilyProperties == NULL) {
1505 *pCount = num_queue_families;
1506 return;
1507 }
1508
1509 if (!*pCount)
1510 return;
1511
1512 idx = 0;
1513 if (*pCount >= 1) {
1514 *pQueueFamilyProperties[idx] = (VkQueueFamilyProperties) {
1515 .queueFlags = VK_QUEUE_GRAPHICS_BIT |
1516 VK_QUEUE_COMPUTE_BIT |
1517 VK_QUEUE_TRANSFER_BIT |
1518 VK_QUEUE_SPARSE_BINDING_BIT,
1519 .queueCount = 1,
1520 .timestampValidBits = 64,
1521 .minImageTransferGranularity = (VkExtent3D) { 1, 1, 1 },
1522 };
1523 idx++;
1524 }
1525
1526 if (pdevice->rad_info.num_compute_rings > 0 &&
1527 !(pdevice->instance->debug_flags & RADV_DEBUG_NO_COMPUTE_QUEUE)) {
1528 if (*pCount > idx) {
1529 *pQueueFamilyProperties[idx] = (VkQueueFamilyProperties) {
1530 .queueFlags = VK_QUEUE_COMPUTE_BIT |
1531 VK_QUEUE_TRANSFER_BIT |
1532 VK_QUEUE_SPARSE_BINDING_BIT,
1533 .queueCount = pdevice->rad_info.num_compute_rings,
1534 .timestampValidBits = 64,
1535 .minImageTransferGranularity = (VkExtent3D) { 1, 1, 1 },
1536 };
1537 idx++;
1538 }
1539 }
1540 *pCount = idx;
1541 }
1542
1543 void radv_GetPhysicalDeviceQueueFamilyProperties(
1544 VkPhysicalDevice physicalDevice,
1545 uint32_t* pCount,
1546 VkQueueFamilyProperties* pQueueFamilyProperties)
1547 {
1548 RADV_FROM_HANDLE(radv_physical_device, pdevice, physicalDevice);
1549 if (!pQueueFamilyProperties) {
1550 radv_get_physical_device_queue_family_properties(pdevice, pCount, NULL);
1551 return;
1552 }
1553 VkQueueFamilyProperties *properties[] = {
1554 pQueueFamilyProperties + 0,
1555 pQueueFamilyProperties + 1,
1556 pQueueFamilyProperties + 2,
1557 };
1558 radv_get_physical_device_queue_family_properties(pdevice, pCount, properties);
1559 assert(*pCount <= 3);
1560 }
1561
1562 void radv_GetPhysicalDeviceQueueFamilyProperties2(
1563 VkPhysicalDevice physicalDevice,
1564 uint32_t* pCount,
1565 VkQueueFamilyProperties2 *pQueueFamilyProperties)
1566 {
1567 RADV_FROM_HANDLE(radv_physical_device, pdevice, physicalDevice);
1568 if (!pQueueFamilyProperties) {
1569 radv_get_physical_device_queue_family_properties(pdevice, pCount, NULL);
1570 return;
1571 }
1572 VkQueueFamilyProperties *properties[] = {
1573 &pQueueFamilyProperties[0].queueFamilyProperties,
1574 &pQueueFamilyProperties[1].queueFamilyProperties,
1575 &pQueueFamilyProperties[2].queueFamilyProperties,
1576 };
1577 radv_get_physical_device_queue_family_properties(pdevice, pCount, properties);
1578 assert(*pCount <= 3);
1579 }
1580
1581 void radv_GetPhysicalDeviceMemoryProperties(
1582 VkPhysicalDevice physicalDevice,
1583 VkPhysicalDeviceMemoryProperties *pMemoryProperties)
1584 {
1585 RADV_FROM_HANDLE(radv_physical_device, physical_device, physicalDevice);
1586
1587 *pMemoryProperties = physical_device->memory_properties;
1588 }
1589
1590 static void
1591 radv_get_memory_budget_properties(VkPhysicalDevice physicalDevice,
1592 VkPhysicalDeviceMemoryBudgetPropertiesEXT *memoryBudget)
1593 {
1594 RADV_FROM_HANDLE(radv_physical_device, device, physicalDevice);
1595 VkPhysicalDeviceMemoryProperties *memory_properties = &device->memory_properties;
1596 uint64_t visible_vram_size = radv_get_visible_vram_size(device);
1597 uint64_t vram_size = radv_get_vram_size(device);
1598 uint64_t gtt_size = device->rad_info.gart_size;
1599 uint64_t heap_budget, heap_usage;
1600
1601 /* For all memory heaps, the computation of budget is as follow:
1602 * heap_budget = heap_size - global_heap_usage + app_heap_usage
1603 *
1604 * The Vulkan spec 1.1.97 says that the budget should include any
1605 * currently allocated device memory.
1606 *
1607 * Note that the application heap usages are not really accurate (eg.
1608 * in presence of shared buffers).
1609 */
1610 for (int i = 0; i < device->memory_properties.memoryTypeCount; i++) {
1611 uint32_t heap_index = device->memory_properties.memoryTypes[i].heapIndex;
1612
1613 switch (device->mem_type_indices[i]) {
1614 case RADV_MEM_TYPE_VRAM:
1615 heap_usage = device->ws->query_value(device->ws,
1616 RADEON_ALLOCATED_VRAM);
1617
1618 heap_budget = vram_size -
1619 device->ws->query_value(device->ws, RADEON_VRAM_USAGE) +
1620 heap_usage;
1621
1622 memoryBudget->heapBudget[heap_index] = heap_budget;
1623 memoryBudget->heapUsage[heap_index] = heap_usage;
1624 break;
1625 case RADV_MEM_TYPE_VRAM_CPU_ACCESS:
1626 heap_usage = device->ws->query_value(device->ws,
1627 RADEON_ALLOCATED_VRAM_VIS);
1628
1629 heap_budget = visible_vram_size -
1630 device->ws->query_value(device->ws, RADEON_VRAM_VIS_USAGE) +
1631 heap_usage;
1632
1633 memoryBudget->heapBudget[heap_index] = heap_budget;
1634 memoryBudget->heapUsage[heap_index] = heap_usage;
1635 break;
1636 case RADV_MEM_TYPE_GTT_WRITE_COMBINE:
1637 heap_usage = device->ws->query_value(device->ws,
1638 RADEON_ALLOCATED_GTT);
1639
1640 heap_budget = gtt_size -
1641 device->ws->query_value(device->ws, RADEON_GTT_USAGE) +
1642 heap_usage;
1643
1644 memoryBudget->heapBudget[heap_index] = heap_budget;
1645 memoryBudget->heapUsage[heap_index] = heap_usage;
1646 break;
1647 default:
1648 break;
1649 }
1650 }
1651
1652 /* The heapBudget and heapUsage values must be zero for array elements
1653 * greater than or equal to
1654 * VkPhysicalDeviceMemoryProperties::memoryHeapCount.
1655 */
1656 for (uint32_t i = memory_properties->memoryHeapCount; i < VK_MAX_MEMORY_HEAPS; i++) {
1657 memoryBudget->heapBudget[i] = 0;
1658 memoryBudget->heapUsage[i] = 0;
1659 }
1660 }
1661
1662 void radv_GetPhysicalDeviceMemoryProperties2(
1663 VkPhysicalDevice physicalDevice,
1664 VkPhysicalDeviceMemoryProperties2 *pMemoryProperties)
1665 {
1666 radv_GetPhysicalDeviceMemoryProperties(physicalDevice,
1667 &pMemoryProperties->memoryProperties);
1668
1669 VkPhysicalDeviceMemoryBudgetPropertiesEXT *memory_budget =
1670 vk_find_struct(pMemoryProperties->pNext,
1671 PHYSICAL_DEVICE_MEMORY_BUDGET_PROPERTIES_EXT);
1672 if (memory_budget)
1673 radv_get_memory_budget_properties(physicalDevice, memory_budget);
1674 }
1675
1676 VkResult radv_GetMemoryHostPointerPropertiesEXT(
1677 VkDevice _device,
1678 VkExternalMemoryHandleTypeFlagBits handleType,
1679 const void *pHostPointer,
1680 VkMemoryHostPointerPropertiesEXT *pMemoryHostPointerProperties)
1681 {
1682 RADV_FROM_HANDLE(radv_device, device, _device);
1683
1684 switch (handleType)
1685 {
1686 case VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT: {
1687 const struct radv_physical_device *physical_device = device->physical_device;
1688 uint32_t memoryTypeBits = 0;
1689 for (int i = 0; i < physical_device->memory_properties.memoryTypeCount; i++) {
1690 if (physical_device->mem_type_indices[i] == RADV_MEM_TYPE_GTT_CACHED) {
1691 memoryTypeBits = (1 << i);
1692 break;
1693 }
1694 }
1695 pMemoryHostPointerProperties->memoryTypeBits = memoryTypeBits;
1696 return VK_SUCCESS;
1697 }
1698 default:
1699 return VK_ERROR_INVALID_EXTERNAL_HANDLE;
1700 }
1701 }
1702
1703 static enum radeon_ctx_priority
1704 radv_get_queue_global_priority(const VkDeviceQueueGlobalPriorityCreateInfoEXT *pObj)
1705 {
1706 /* Default to MEDIUM when a specific global priority isn't requested */
1707 if (!pObj)
1708 return RADEON_CTX_PRIORITY_MEDIUM;
1709
1710 switch(pObj->globalPriority) {
1711 case VK_QUEUE_GLOBAL_PRIORITY_REALTIME_EXT:
1712 return RADEON_CTX_PRIORITY_REALTIME;
1713 case VK_QUEUE_GLOBAL_PRIORITY_HIGH_EXT:
1714 return RADEON_CTX_PRIORITY_HIGH;
1715 case VK_QUEUE_GLOBAL_PRIORITY_MEDIUM_EXT:
1716 return RADEON_CTX_PRIORITY_MEDIUM;
1717 case VK_QUEUE_GLOBAL_PRIORITY_LOW_EXT:
1718 return RADEON_CTX_PRIORITY_LOW;
1719 default:
1720 unreachable("Illegal global priority value");
1721 return RADEON_CTX_PRIORITY_INVALID;
1722 }
1723 }
1724
1725 static int
1726 radv_queue_init(struct radv_device *device, struct radv_queue *queue,
1727 uint32_t queue_family_index, int idx,
1728 VkDeviceQueueCreateFlags flags,
1729 const VkDeviceQueueGlobalPriorityCreateInfoEXT *global_priority)
1730 {
1731 queue->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
1732 queue->device = device;
1733 queue->queue_family_index = queue_family_index;
1734 queue->queue_idx = idx;
1735 queue->priority = radv_get_queue_global_priority(global_priority);
1736 queue->flags = flags;
1737
1738 queue->hw_ctx = device->ws->ctx_create(device->ws, queue->priority);
1739 if (!queue->hw_ctx)
1740 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1741
1742 return VK_SUCCESS;
1743 }
1744
1745 static void
1746 radv_queue_finish(struct radv_queue *queue)
1747 {
1748 if (queue->hw_ctx)
1749 queue->device->ws->ctx_destroy(queue->hw_ctx);
1750
1751 if (queue->initial_full_flush_preamble_cs)
1752 queue->device->ws->cs_destroy(queue->initial_full_flush_preamble_cs);
1753 if (queue->initial_preamble_cs)
1754 queue->device->ws->cs_destroy(queue->initial_preamble_cs);
1755 if (queue->continue_preamble_cs)
1756 queue->device->ws->cs_destroy(queue->continue_preamble_cs);
1757 if (queue->descriptor_bo)
1758 queue->device->ws->buffer_destroy(queue->descriptor_bo);
1759 if (queue->scratch_bo)
1760 queue->device->ws->buffer_destroy(queue->scratch_bo);
1761 if (queue->esgs_ring_bo)
1762 queue->device->ws->buffer_destroy(queue->esgs_ring_bo);
1763 if (queue->gsvs_ring_bo)
1764 queue->device->ws->buffer_destroy(queue->gsvs_ring_bo);
1765 if (queue->tess_rings_bo)
1766 queue->device->ws->buffer_destroy(queue->tess_rings_bo);
1767 if (queue->gds_bo)
1768 queue->device->ws->buffer_destroy(queue->gds_bo);
1769 if (queue->gds_oa_bo)
1770 queue->device->ws->buffer_destroy(queue->gds_oa_bo);
1771 if (queue->compute_scratch_bo)
1772 queue->device->ws->buffer_destroy(queue->compute_scratch_bo);
1773 }
1774
1775 static void
1776 radv_bo_list_init(struct radv_bo_list *bo_list)
1777 {
1778 pthread_mutex_init(&bo_list->mutex, NULL);
1779 bo_list->list.count = bo_list->capacity = 0;
1780 bo_list->list.bos = NULL;
1781 }
1782
1783 static void
1784 radv_bo_list_finish(struct radv_bo_list *bo_list)
1785 {
1786 free(bo_list->list.bos);
1787 pthread_mutex_destroy(&bo_list->mutex);
1788 }
1789
1790 static VkResult radv_bo_list_add(struct radv_device *device,
1791 struct radeon_winsys_bo *bo)
1792 {
1793 struct radv_bo_list *bo_list = &device->bo_list;
1794
1795 if (bo->is_local)
1796 return VK_SUCCESS;
1797
1798 if (unlikely(!device->use_global_bo_list))
1799 return VK_SUCCESS;
1800
1801 pthread_mutex_lock(&bo_list->mutex);
1802 if (bo_list->list.count == bo_list->capacity) {
1803 unsigned capacity = MAX2(4, bo_list->capacity * 2);
1804 void *data = realloc(bo_list->list.bos, capacity * sizeof(struct radeon_winsys_bo*));
1805
1806 if (!data) {
1807 pthread_mutex_unlock(&bo_list->mutex);
1808 return VK_ERROR_OUT_OF_HOST_MEMORY;
1809 }
1810
1811 bo_list->list.bos = (struct radeon_winsys_bo**)data;
1812 bo_list->capacity = capacity;
1813 }
1814
1815 bo_list->list.bos[bo_list->list.count++] = bo;
1816 pthread_mutex_unlock(&bo_list->mutex);
1817 return VK_SUCCESS;
1818 }
1819
1820 static void radv_bo_list_remove(struct radv_device *device,
1821 struct radeon_winsys_bo *bo)
1822 {
1823 struct radv_bo_list *bo_list = &device->bo_list;
1824
1825 if (bo->is_local)
1826 return;
1827
1828 if (unlikely(!device->use_global_bo_list))
1829 return;
1830
1831 pthread_mutex_lock(&bo_list->mutex);
1832 for(unsigned i = 0; i < bo_list->list.count; ++i) {
1833 if (bo_list->list.bos[i] == bo) {
1834 bo_list->list.bos[i] = bo_list->list.bos[bo_list->list.count - 1];
1835 --bo_list->list.count;
1836 break;
1837 }
1838 }
1839 pthread_mutex_unlock(&bo_list->mutex);
1840 }
1841
1842 static void
1843 radv_device_init_gs_info(struct radv_device *device)
1844 {
1845 device->gs_table_depth = ac_get_gs_table_depth(device->physical_device->rad_info.chip_class,
1846 device->physical_device->rad_info.family);
1847 }
1848
1849 static int radv_get_device_extension_index(const char *name)
1850 {
1851 for (unsigned i = 0; i < RADV_DEVICE_EXTENSION_COUNT; ++i) {
1852 if (strcmp(name, radv_device_extensions[i].extensionName) == 0)
1853 return i;
1854 }
1855 return -1;
1856 }
1857
1858 static int
1859 radv_get_int_debug_option(const char *name, int default_value)
1860 {
1861 const char *str;
1862 int result;
1863
1864 str = getenv(name);
1865 if (!str) {
1866 result = default_value;
1867 } else {
1868 char *endptr;
1869
1870 result = strtol(str, &endptr, 0);
1871 if (str == endptr) {
1872 /* No digits founs. */
1873 result = default_value;
1874 }
1875 }
1876
1877 return result;
1878 }
1879
1880 VkResult radv_CreateDevice(
1881 VkPhysicalDevice physicalDevice,
1882 const VkDeviceCreateInfo* pCreateInfo,
1883 const VkAllocationCallbacks* pAllocator,
1884 VkDevice* pDevice)
1885 {
1886 RADV_FROM_HANDLE(radv_physical_device, physical_device, physicalDevice);
1887 VkResult result;
1888 struct radv_device *device;
1889
1890 bool keep_shader_info = false;
1891
1892 /* Check enabled features */
1893 if (pCreateInfo->pEnabledFeatures) {
1894 VkPhysicalDeviceFeatures supported_features;
1895 radv_GetPhysicalDeviceFeatures(physicalDevice, &supported_features);
1896 VkBool32 *supported_feature = (VkBool32 *)&supported_features;
1897 VkBool32 *enabled_feature = (VkBool32 *)pCreateInfo->pEnabledFeatures;
1898 unsigned num_features = sizeof(VkPhysicalDeviceFeatures) / sizeof(VkBool32);
1899 for (uint32_t i = 0; i < num_features; i++) {
1900 if (enabled_feature[i] && !supported_feature[i])
1901 return vk_error(physical_device->instance, VK_ERROR_FEATURE_NOT_PRESENT);
1902 }
1903 }
1904
1905 device = vk_zalloc2(&physical_device->instance->alloc, pAllocator,
1906 sizeof(*device), 8,
1907 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
1908 if (!device)
1909 return vk_error(physical_device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1910
1911 device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
1912 device->instance = physical_device->instance;
1913 device->physical_device = physical_device;
1914
1915 device->ws = physical_device->ws;
1916 if (pAllocator)
1917 device->alloc = *pAllocator;
1918 else
1919 device->alloc = physical_device->instance->alloc;
1920
1921 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
1922 const char *ext_name = pCreateInfo->ppEnabledExtensionNames[i];
1923 int index = radv_get_device_extension_index(ext_name);
1924 if (index < 0 || !physical_device->supported_extensions.extensions[index]) {
1925 vk_free(&device->alloc, device);
1926 return vk_error(physical_device->instance, VK_ERROR_EXTENSION_NOT_PRESENT);
1927 }
1928
1929 device->enabled_extensions.extensions[index] = true;
1930 }
1931
1932 keep_shader_info = device->enabled_extensions.AMD_shader_info;
1933
1934 /* With update after bind we can't attach bo's to the command buffer
1935 * from the descriptor set anymore, so we have to use a global BO list.
1936 */
1937 device->use_global_bo_list =
1938 (device->instance->perftest_flags & RADV_PERFTEST_BO_LIST) ||
1939 device->enabled_extensions.EXT_descriptor_indexing ||
1940 device->enabled_extensions.EXT_buffer_device_address;
1941
1942 device->robust_buffer_access = pCreateInfo->pEnabledFeatures &&
1943 pCreateInfo->pEnabledFeatures->robustBufferAccess;
1944
1945 mtx_init(&device->shader_slab_mutex, mtx_plain);
1946 list_inithead(&device->shader_slabs);
1947
1948 radv_bo_list_init(&device->bo_list);
1949
1950 for (unsigned i = 0; i < pCreateInfo->queueCreateInfoCount; i++) {
1951 const VkDeviceQueueCreateInfo *queue_create = &pCreateInfo->pQueueCreateInfos[i];
1952 uint32_t qfi = queue_create->queueFamilyIndex;
1953 const VkDeviceQueueGlobalPriorityCreateInfoEXT *global_priority =
1954 vk_find_struct_const(queue_create->pNext, DEVICE_QUEUE_GLOBAL_PRIORITY_CREATE_INFO_EXT);
1955
1956 assert(!global_priority || device->physical_device->rad_info.has_ctx_priority);
1957
1958 device->queues[qfi] = vk_alloc(&device->alloc,
1959 queue_create->queueCount * sizeof(struct radv_queue), 8, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
1960 if (!device->queues[qfi]) {
1961 result = VK_ERROR_OUT_OF_HOST_MEMORY;
1962 goto fail;
1963 }
1964
1965 memset(device->queues[qfi], 0, queue_create->queueCount * sizeof(struct radv_queue));
1966
1967 device->queue_count[qfi] = queue_create->queueCount;
1968
1969 for (unsigned q = 0; q < queue_create->queueCount; q++) {
1970 result = radv_queue_init(device, &device->queues[qfi][q],
1971 qfi, q, queue_create->flags,
1972 global_priority);
1973 if (result != VK_SUCCESS)
1974 goto fail;
1975 }
1976 }
1977
1978 device->pbb_allowed = device->physical_device->rad_info.chip_class >= GFX9 &&
1979 !(device->instance->debug_flags & RADV_DEBUG_NOBINNING);
1980
1981 /* Disable DFSM by default. As of 2019-09-15 Talos on Low is still 3% slower on Raven. */
1982 device->dfsm_allowed = device->pbb_allowed &&
1983 (device->instance->perftest_flags & RADV_PERFTEST_DFSM);
1984
1985 #ifdef ANDROID
1986 device->always_use_syncobj = device->physical_device->rad_info.has_syncobj_wait_for_submit;
1987 #endif
1988
1989 /* The maximum number of scratch waves. Scratch space isn't divided
1990 * evenly between CUs. The number is only a function of the number of CUs.
1991 * We can decrease the constant to decrease the scratch buffer size.
1992 *
1993 * sctx->scratch_waves must be >= the maximum possible size of
1994 * 1 threadgroup, so that the hw doesn't hang from being unable
1995 * to start any.
1996 *
1997 * The recommended value is 4 per CU at most. Higher numbers don't
1998 * bring much benefit, but they still occupy chip resources (think
1999 * async compute). I've seen ~2% performance difference between 4 and 32.
2000 */
2001 uint32_t max_threads_per_block = 2048;
2002 device->scratch_waves = MAX2(32 * physical_device->rad_info.num_good_compute_units,
2003 max_threads_per_block / 64);
2004
2005 device->dispatch_initiator = S_00B800_COMPUTE_SHADER_EN(1) |
2006 S_00B800_CS_W32_EN(device->physical_device->cs_wave_size == 32);
2007
2008 if (device->physical_device->rad_info.chip_class >= GFX7) {
2009 /* If the KMD allows it (there is a KMD hw register for it),
2010 * allow launching waves out-of-order.
2011 */
2012 device->dispatch_initiator |= S_00B800_ORDER_MODE(1);
2013 }
2014
2015 radv_device_init_gs_info(device);
2016
2017 device->tess_offchip_block_dw_size =
2018 device->physical_device->rad_info.family == CHIP_HAWAII ? 4096 : 8192;
2019
2020 if (getenv("RADV_TRACE_FILE")) {
2021 const char *filename = getenv("RADV_TRACE_FILE");
2022
2023 keep_shader_info = true;
2024
2025 if (!radv_init_trace(device))
2026 goto fail;
2027
2028 fprintf(stderr, "*****************************************************************************\n");
2029 fprintf(stderr, "* WARNING: RADV_TRACE_FILE is costly and should only be used for debugging! *\n");
2030 fprintf(stderr, "*****************************************************************************\n");
2031
2032 fprintf(stderr, "Trace file will be dumped to %s\n", filename);
2033 radv_dump_enabled_options(device, stderr);
2034 }
2035
2036 device->keep_shader_info = keep_shader_info;
2037
2038 result = radv_device_init_meta(device);
2039 if (result != VK_SUCCESS)
2040 goto fail;
2041
2042 radv_device_init_msaa(device);
2043
2044 for (int family = 0; family < RADV_MAX_QUEUE_FAMILIES; ++family) {
2045 device->empty_cs[family] = device->ws->cs_create(device->ws, family);
2046 switch (family) {
2047 case RADV_QUEUE_GENERAL:
2048 /* Since amdgpu version 3.6.0, CONTEXT_CONTROL is emitted by the kernel */
2049 if (device->physical_device->rad_info.drm_minor < 6) {
2050 radeon_emit(device->empty_cs[family], PKT3(PKT3_CONTEXT_CONTROL, 1, 0));
2051 radeon_emit(device->empty_cs[family], CONTEXT_CONTROL_LOAD_ENABLE(1));
2052 radeon_emit(device->empty_cs[family], CONTEXT_CONTROL_SHADOW_ENABLE(1));
2053 }
2054 break;
2055 case RADV_QUEUE_COMPUTE:
2056 radeon_emit(device->empty_cs[family], PKT3(PKT3_NOP, 0, 0));
2057 radeon_emit(device->empty_cs[family], 0);
2058 break;
2059 }
2060 device->ws->cs_finalize(device->empty_cs[family]);
2061 }
2062
2063 if (device->physical_device->rad_info.chip_class >= GFX7)
2064 cik_create_gfx_config(device);
2065
2066 VkPipelineCacheCreateInfo ci;
2067 ci.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
2068 ci.pNext = NULL;
2069 ci.flags = 0;
2070 ci.pInitialData = NULL;
2071 ci.initialDataSize = 0;
2072 VkPipelineCache pc;
2073 result = radv_CreatePipelineCache(radv_device_to_handle(device),
2074 &ci, NULL, &pc);
2075 if (result != VK_SUCCESS)
2076 goto fail_meta;
2077
2078 device->mem_cache = radv_pipeline_cache_from_handle(pc);
2079
2080 device->force_aniso =
2081 MIN2(16, radv_get_int_debug_option("RADV_TEX_ANISO", -1));
2082 if (device->force_aniso >= 0) {
2083 fprintf(stderr, "radv: Forcing anisotropy filter to %ix\n",
2084 1 << util_logbase2(device->force_aniso));
2085 }
2086
2087 *pDevice = radv_device_to_handle(device);
2088 return VK_SUCCESS;
2089
2090 fail_meta:
2091 radv_device_finish_meta(device);
2092 fail:
2093 radv_bo_list_finish(&device->bo_list);
2094
2095 if (device->trace_bo)
2096 device->ws->buffer_destroy(device->trace_bo);
2097
2098 if (device->gfx_init)
2099 device->ws->buffer_destroy(device->gfx_init);
2100
2101 for (unsigned i = 0; i < RADV_MAX_QUEUE_FAMILIES; i++) {
2102 for (unsigned q = 0; q < device->queue_count[i]; q++)
2103 radv_queue_finish(&device->queues[i][q]);
2104 if (device->queue_count[i])
2105 vk_free(&device->alloc, device->queues[i]);
2106 }
2107
2108 vk_free(&device->alloc, device);
2109 return result;
2110 }
2111
2112 void radv_DestroyDevice(
2113 VkDevice _device,
2114 const VkAllocationCallbacks* pAllocator)
2115 {
2116 RADV_FROM_HANDLE(radv_device, device, _device);
2117
2118 if (!device)
2119 return;
2120
2121 if (device->trace_bo)
2122 device->ws->buffer_destroy(device->trace_bo);
2123
2124 if (device->gfx_init)
2125 device->ws->buffer_destroy(device->gfx_init);
2126
2127 for (unsigned i = 0; i < RADV_MAX_QUEUE_FAMILIES; i++) {
2128 for (unsigned q = 0; q < device->queue_count[i]; q++)
2129 radv_queue_finish(&device->queues[i][q]);
2130 if (device->queue_count[i])
2131 vk_free(&device->alloc, device->queues[i]);
2132 if (device->empty_cs[i])
2133 device->ws->cs_destroy(device->empty_cs[i]);
2134 }
2135 radv_device_finish_meta(device);
2136
2137 VkPipelineCache pc = radv_pipeline_cache_to_handle(device->mem_cache);
2138 radv_DestroyPipelineCache(radv_device_to_handle(device), pc, NULL);
2139
2140 radv_destroy_shader_slabs(device);
2141
2142 radv_bo_list_finish(&device->bo_list);
2143 vk_free(&device->alloc, device);
2144 }
2145
2146 VkResult radv_EnumerateInstanceLayerProperties(
2147 uint32_t* pPropertyCount,
2148 VkLayerProperties* pProperties)
2149 {
2150 if (pProperties == NULL) {
2151 *pPropertyCount = 0;
2152 return VK_SUCCESS;
2153 }
2154
2155 /* None supported at this time */
2156 return vk_error(NULL, VK_ERROR_LAYER_NOT_PRESENT);
2157 }
2158
2159 VkResult radv_EnumerateDeviceLayerProperties(
2160 VkPhysicalDevice physicalDevice,
2161 uint32_t* pPropertyCount,
2162 VkLayerProperties* pProperties)
2163 {
2164 if (pProperties == NULL) {
2165 *pPropertyCount = 0;
2166 return VK_SUCCESS;
2167 }
2168
2169 /* None supported at this time */
2170 return vk_error(NULL, VK_ERROR_LAYER_NOT_PRESENT);
2171 }
2172
2173 void radv_GetDeviceQueue2(
2174 VkDevice _device,
2175 const VkDeviceQueueInfo2* pQueueInfo,
2176 VkQueue* pQueue)
2177 {
2178 RADV_FROM_HANDLE(radv_device, device, _device);
2179 struct radv_queue *queue;
2180
2181 queue = &device->queues[pQueueInfo->queueFamilyIndex][pQueueInfo->queueIndex];
2182 if (pQueueInfo->flags != queue->flags) {
2183 /* From the Vulkan 1.1.70 spec:
2184 *
2185 * "The queue returned by vkGetDeviceQueue2 must have the same
2186 * flags value from this structure as that used at device
2187 * creation time in a VkDeviceQueueCreateInfo instance. If no
2188 * matching flags were specified at device creation time then
2189 * pQueue will return VK_NULL_HANDLE."
2190 */
2191 *pQueue = VK_NULL_HANDLE;
2192 return;
2193 }
2194
2195 *pQueue = radv_queue_to_handle(queue);
2196 }
2197
2198 void radv_GetDeviceQueue(
2199 VkDevice _device,
2200 uint32_t queueFamilyIndex,
2201 uint32_t queueIndex,
2202 VkQueue* pQueue)
2203 {
2204 const VkDeviceQueueInfo2 info = (VkDeviceQueueInfo2) {
2205 .sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2,
2206 .queueFamilyIndex = queueFamilyIndex,
2207 .queueIndex = queueIndex
2208 };
2209
2210 radv_GetDeviceQueue2(_device, &info, pQueue);
2211 }
2212
2213 static void
2214 fill_geom_tess_rings(struct radv_queue *queue,
2215 uint32_t *map,
2216 bool add_sample_positions,
2217 uint32_t esgs_ring_size,
2218 struct radeon_winsys_bo *esgs_ring_bo,
2219 uint32_t gsvs_ring_size,
2220 struct radeon_winsys_bo *gsvs_ring_bo,
2221 uint32_t tess_factor_ring_size,
2222 uint32_t tess_offchip_ring_offset,
2223 uint32_t tess_offchip_ring_size,
2224 struct radeon_winsys_bo *tess_rings_bo)
2225 {
2226 uint32_t *desc = &map[4];
2227
2228 if (esgs_ring_bo) {
2229 uint64_t esgs_va = radv_buffer_get_va(esgs_ring_bo);
2230
2231 /* stride 0, num records - size, add tid, swizzle, elsize4,
2232 index stride 64 */
2233 desc[0] = esgs_va;
2234 desc[1] = S_008F04_BASE_ADDRESS_HI(esgs_va >> 32) |
2235 S_008F04_SWIZZLE_ENABLE(true);
2236 desc[2] = esgs_ring_size;
2237 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
2238 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
2239 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
2240 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
2241 S_008F0C_INDEX_STRIDE(3) |
2242 S_008F0C_ADD_TID_ENABLE(1);
2243
2244 if (queue->device->physical_device->rad_info.chip_class >= GFX10) {
2245 desc[3] |= S_008F0C_FORMAT(V_008F0C_IMG_FORMAT_32_FLOAT) |
2246 S_008F0C_OOB_SELECT(2) |
2247 S_008F0C_RESOURCE_LEVEL(1);
2248 } else {
2249 desc[3] |= S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
2250 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32) |
2251 S_008F0C_ELEMENT_SIZE(1);
2252 }
2253
2254 /* GS entry for ES->GS ring */
2255 /* stride 0, num records - size, elsize0,
2256 index stride 0 */
2257 desc[4] = esgs_va;
2258 desc[5] = S_008F04_BASE_ADDRESS_HI(esgs_va >> 32);
2259 desc[6] = esgs_ring_size;
2260 desc[7] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
2261 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
2262 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
2263 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W);
2264
2265 if (queue->device->physical_device->rad_info.chip_class >= GFX10) {
2266 desc[7] |= S_008F0C_FORMAT(V_008F0C_IMG_FORMAT_32_FLOAT) |
2267 S_008F0C_OOB_SELECT(2) |
2268 S_008F0C_RESOURCE_LEVEL(1);
2269 } else {
2270 desc[7] |= S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
2271 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
2272 }
2273 }
2274
2275 desc += 8;
2276
2277 if (gsvs_ring_bo) {
2278 uint64_t gsvs_va = radv_buffer_get_va(gsvs_ring_bo);
2279
2280 /* VS entry for GS->VS ring */
2281 /* stride 0, num records - size, elsize0,
2282 index stride 0 */
2283 desc[0] = gsvs_va;
2284 desc[1] = S_008F04_BASE_ADDRESS_HI(gsvs_va >> 32);
2285 desc[2] = gsvs_ring_size;
2286 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
2287 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
2288 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
2289 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W);
2290
2291 if (queue->device->physical_device->rad_info.chip_class >= GFX10) {
2292 desc[3] |= S_008F0C_FORMAT(V_008F0C_IMG_FORMAT_32_FLOAT) |
2293 S_008F0C_OOB_SELECT(2) |
2294 S_008F0C_RESOURCE_LEVEL(1);
2295 } else {
2296 desc[3] |= S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
2297 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
2298 }
2299
2300 /* stride gsvs_itemsize, num records 64
2301 elsize 4, index stride 16 */
2302 /* shader will patch stride and desc[2] */
2303 desc[4] = gsvs_va;
2304 desc[5] = S_008F04_BASE_ADDRESS_HI(gsvs_va >> 32) |
2305 S_008F04_SWIZZLE_ENABLE(1);
2306 desc[6] = 0;
2307 desc[7] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
2308 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
2309 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
2310 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
2311 S_008F0C_INDEX_STRIDE(1) |
2312 S_008F0C_ADD_TID_ENABLE(true);
2313
2314 if (queue->device->physical_device->rad_info.chip_class >= GFX10) {
2315 desc[7] |= S_008F0C_FORMAT(V_008F0C_IMG_FORMAT_32_FLOAT) |
2316 S_008F0C_OOB_SELECT(2) |
2317 S_008F0C_RESOURCE_LEVEL(1);
2318 } else {
2319 desc[7] |= S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
2320 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32) |
2321 S_008F0C_ELEMENT_SIZE(1);
2322 }
2323
2324 }
2325
2326 desc += 8;
2327
2328 if (tess_rings_bo) {
2329 uint64_t tess_va = radv_buffer_get_va(tess_rings_bo);
2330 uint64_t tess_offchip_va = tess_va + tess_offchip_ring_offset;
2331
2332 desc[0] = tess_va;
2333 desc[1] = S_008F04_BASE_ADDRESS_HI(tess_va >> 32);
2334 desc[2] = tess_factor_ring_size;
2335 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
2336 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
2337 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
2338 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W);
2339
2340 if (queue->device->physical_device->rad_info.chip_class >= GFX10) {
2341 desc[3] |= S_008F0C_FORMAT(V_008F0C_IMG_FORMAT_32_FLOAT) |
2342 S_008F0C_OOB_SELECT(3) |
2343 S_008F0C_RESOURCE_LEVEL(1);
2344 } else {
2345 desc[3] |= S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
2346 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
2347 }
2348
2349 desc[4] = tess_offchip_va;
2350 desc[5] = S_008F04_BASE_ADDRESS_HI(tess_offchip_va >> 32);
2351 desc[6] = tess_offchip_ring_size;
2352 desc[7] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
2353 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
2354 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
2355 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W);
2356
2357 if (queue->device->physical_device->rad_info.chip_class >= GFX10) {
2358 desc[7] |= S_008F0C_FORMAT(V_008F0C_IMG_FORMAT_32_FLOAT) |
2359 S_008F0C_OOB_SELECT(3) |
2360 S_008F0C_RESOURCE_LEVEL(1);
2361 } else {
2362 desc[7] |= S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
2363 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
2364 }
2365 }
2366
2367 desc += 8;
2368
2369 if (add_sample_positions) {
2370 /* add sample positions after all rings */
2371 memcpy(desc, queue->device->sample_locations_1x, 8);
2372 desc += 2;
2373 memcpy(desc, queue->device->sample_locations_2x, 16);
2374 desc += 4;
2375 memcpy(desc, queue->device->sample_locations_4x, 32);
2376 desc += 8;
2377 memcpy(desc, queue->device->sample_locations_8x, 64);
2378 }
2379 }
2380
2381 static unsigned
2382 radv_get_hs_offchip_param(struct radv_device *device, uint32_t *max_offchip_buffers_p)
2383 {
2384 bool double_offchip_buffers = device->physical_device->rad_info.chip_class >= GFX7 &&
2385 device->physical_device->rad_info.family != CHIP_CARRIZO &&
2386 device->physical_device->rad_info.family != CHIP_STONEY;
2387 unsigned max_offchip_buffers_per_se = double_offchip_buffers ? 128 : 64;
2388 unsigned max_offchip_buffers;
2389 unsigned offchip_granularity;
2390 unsigned hs_offchip_param;
2391
2392 /*
2393 * Per RadeonSI:
2394 * This must be one less than the maximum number due to a hw limitation.
2395 * Various hardware bugs need thGFX7
2396 *
2397 * Per AMDVLK:
2398 * Vega10 should limit max_offchip_buffers to 508 (4 * 127).
2399 * Gfx7 should limit max_offchip_buffers to 508
2400 * Gfx6 should limit max_offchip_buffers to 126 (2 * 63)
2401 *
2402 * Follow AMDVLK here.
2403 */
2404 if (device->physical_device->rad_info.chip_class >= GFX10) {
2405 max_offchip_buffers_per_se = 256;
2406 } else if (device->physical_device->rad_info.family == CHIP_VEGA10 ||
2407 device->physical_device->rad_info.chip_class == GFX7 ||
2408 device->physical_device->rad_info.chip_class == GFX6)
2409 --max_offchip_buffers_per_se;
2410
2411 max_offchip_buffers = max_offchip_buffers_per_se *
2412 device->physical_device->rad_info.max_se;
2413
2414 /* Hawaii has a bug with offchip buffers > 256 that can be worked
2415 * around by setting 4K granularity.
2416 */
2417 if (device->tess_offchip_block_dw_size == 4096) {
2418 assert(device->physical_device->rad_info.family == CHIP_HAWAII);
2419 offchip_granularity = V_03093C_X_4K_DWORDS;
2420 } else {
2421 assert(device->tess_offchip_block_dw_size == 8192);
2422 offchip_granularity = V_03093C_X_8K_DWORDS;
2423 }
2424
2425 switch (device->physical_device->rad_info.chip_class) {
2426 case GFX6:
2427 max_offchip_buffers = MIN2(max_offchip_buffers, 126);
2428 break;
2429 case GFX7:
2430 case GFX8:
2431 case GFX9:
2432 max_offchip_buffers = MIN2(max_offchip_buffers, 508);
2433 break;
2434 case GFX10:
2435 break;
2436 default:
2437 break;
2438 }
2439
2440 *max_offchip_buffers_p = max_offchip_buffers;
2441 if (device->physical_device->rad_info.chip_class >= GFX7) {
2442 if (device->physical_device->rad_info.chip_class >= GFX8)
2443 --max_offchip_buffers;
2444 hs_offchip_param =
2445 S_03093C_OFFCHIP_BUFFERING(max_offchip_buffers) |
2446 S_03093C_OFFCHIP_GRANULARITY(offchip_granularity);
2447 } else {
2448 hs_offchip_param =
2449 S_0089B0_OFFCHIP_BUFFERING(max_offchip_buffers);
2450 }
2451 return hs_offchip_param;
2452 }
2453
2454 static void
2455 radv_emit_gs_ring_sizes(struct radv_queue *queue, struct radeon_cmdbuf *cs,
2456 struct radeon_winsys_bo *esgs_ring_bo,
2457 uint32_t esgs_ring_size,
2458 struct radeon_winsys_bo *gsvs_ring_bo,
2459 uint32_t gsvs_ring_size)
2460 {
2461 if (!esgs_ring_bo && !gsvs_ring_bo)
2462 return;
2463
2464 if (esgs_ring_bo)
2465 radv_cs_add_buffer(queue->device->ws, cs, esgs_ring_bo);
2466
2467 if (gsvs_ring_bo)
2468 radv_cs_add_buffer(queue->device->ws, cs, gsvs_ring_bo);
2469
2470 if (queue->device->physical_device->rad_info.chip_class >= GFX7) {
2471 radeon_set_uconfig_reg_seq(cs, R_030900_VGT_ESGS_RING_SIZE, 2);
2472 radeon_emit(cs, esgs_ring_size >> 8);
2473 radeon_emit(cs, gsvs_ring_size >> 8);
2474 } else {
2475 radeon_set_config_reg_seq(cs, R_0088C8_VGT_ESGS_RING_SIZE, 2);
2476 radeon_emit(cs, esgs_ring_size >> 8);
2477 radeon_emit(cs, gsvs_ring_size >> 8);
2478 }
2479 }
2480
2481 static void
2482 radv_emit_tess_factor_ring(struct radv_queue *queue, struct radeon_cmdbuf *cs,
2483 unsigned hs_offchip_param, unsigned tf_ring_size,
2484 struct radeon_winsys_bo *tess_rings_bo)
2485 {
2486 uint64_t tf_va;
2487
2488 if (!tess_rings_bo)
2489 return;
2490
2491 tf_va = radv_buffer_get_va(tess_rings_bo);
2492
2493 radv_cs_add_buffer(queue->device->ws, cs, tess_rings_bo);
2494
2495 if (queue->device->physical_device->rad_info.chip_class >= GFX7) {
2496 radeon_set_uconfig_reg(cs, R_030938_VGT_TF_RING_SIZE,
2497 S_030938_SIZE(tf_ring_size / 4));
2498 radeon_set_uconfig_reg(cs, R_030940_VGT_TF_MEMORY_BASE,
2499 tf_va >> 8);
2500
2501 if (queue->device->physical_device->rad_info.chip_class >= GFX10) {
2502 radeon_set_uconfig_reg(cs, R_030984_VGT_TF_MEMORY_BASE_HI_UMD,
2503 S_030984_BASE_HI(tf_va >> 40));
2504 } else if (queue->device->physical_device->rad_info.chip_class == GFX9) {
2505 radeon_set_uconfig_reg(cs, R_030944_VGT_TF_MEMORY_BASE_HI,
2506 S_030944_BASE_HI(tf_va >> 40));
2507 }
2508 radeon_set_uconfig_reg(cs, R_03093C_VGT_HS_OFFCHIP_PARAM,
2509 hs_offchip_param);
2510 } else {
2511 radeon_set_config_reg(cs, R_008988_VGT_TF_RING_SIZE,
2512 S_008988_SIZE(tf_ring_size / 4));
2513 radeon_set_config_reg(cs, R_0089B8_VGT_TF_MEMORY_BASE,
2514 tf_va >> 8);
2515 radeon_set_config_reg(cs, R_0089B0_VGT_HS_OFFCHIP_PARAM,
2516 hs_offchip_param);
2517 }
2518 }
2519
2520 static void
2521 radv_emit_compute_scratch(struct radv_queue *queue, struct radeon_cmdbuf *cs,
2522 struct radeon_winsys_bo *compute_scratch_bo)
2523 {
2524 uint64_t scratch_va;
2525
2526 if (!compute_scratch_bo)
2527 return;
2528
2529 scratch_va = radv_buffer_get_va(compute_scratch_bo);
2530
2531 radv_cs_add_buffer(queue->device->ws, cs, compute_scratch_bo);
2532
2533 radeon_set_sh_reg_seq(cs, R_00B900_COMPUTE_USER_DATA_0, 2);
2534 radeon_emit(cs, scratch_va);
2535 radeon_emit(cs, S_008F04_BASE_ADDRESS_HI(scratch_va >> 32) |
2536 S_008F04_SWIZZLE_ENABLE(1));
2537 }
2538
2539 static void
2540 radv_emit_global_shader_pointers(struct radv_queue *queue,
2541 struct radeon_cmdbuf *cs,
2542 struct radeon_winsys_bo *descriptor_bo)
2543 {
2544 uint64_t va;
2545
2546 if (!descriptor_bo)
2547 return;
2548
2549 va = radv_buffer_get_va(descriptor_bo);
2550
2551 radv_cs_add_buffer(queue->device->ws, cs, descriptor_bo);
2552
2553 if (queue->device->physical_device->rad_info.chip_class >= GFX10) {
2554 uint32_t regs[] = {R_00B030_SPI_SHADER_USER_DATA_PS_0,
2555 R_00B130_SPI_SHADER_USER_DATA_VS_0,
2556 R_00B208_SPI_SHADER_USER_DATA_ADDR_LO_GS,
2557 R_00B408_SPI_SHADER_USER_DATA_ADDR_LO_HS};
2558
2559 for (int i = 0; i < ARRAY_SIZE(regs); ++i) {
2560 radv_emit_shader_pointer(queue->device, cs, regs[i],
2561 va, true);
2562 }
2563 } else if (queue->device->physical_device->rad_info.chip_class == GFX9) {
2564 uint32_t regs[] = {R_00B030_SPI_SHADER_USER_DATA_PS_0,
2565 R_00B130_SPI_SHADER_USER_DATA_VS_0,
2566 R_00B208_SPI_SHADER_USER_DATA_ADDR_LO_GS,
2567 R_00B408_SPI_SHADER_USER_DATA_ADDR_LO_HS};
2568
2569 for (int i = 0; i < ARRAY_SIZE(regs); ++i) {
2570 radv_emit_shader_pointer(queue->device, cs, regs[i],
2571 va, true);
2572 }
2573 } else {
2574 uint32_t regs[] = {R_00B030_SPI_SHADER_USER_DATA_PS_0,
2575 R_00B130_SPI_SHADER_USER_DATA_VS_0,
2576 R_00B230_SPI_SHADER_USER_DATA_GS_0,
2577 R_00B330_SPI_SHADER_USER_DATA_ES_0,
2578 R_00B430_SPI_SHADER_USER_DATA_HS_0,
2579 R_00B530_SPI_SHADER_USER_DATA_LS_0};
2580
2581 for (int i = 0; i < ARRAY_SIZE(regs); ++i) {
2582 radv_emit_shader_pointer(queue->device, cs, regs[i],
2583 va, true);
2584 }
2585 }
2586 }
2587
2588 static void
2589 radv_init_graphics_state(struct radeon_cmdbuf *cs, struct radv_queue *queue)
2590 {
2591 struct radv_device *device = queue->device;
2592
2593 if (device->gfx_init) {
2594 uint64_t va = radv_buffer_get_va(device->gfx_init);
2595
2596 radeon_emit(cs, PKT3(PKT3_INDIRECT_BUFFER_CIK, 2, 0));
2597 radeon_emit(cs, va);
2598 radeon_emit(cs, va >> 32);
2599 radeon_emit(cs, device->gfx_init_size_dw & 0xffff);
2600
2601 radv_cs_add_buffer(device->ws, cs, device->gfx_init);
2602 } else {
2603 struct radv_physical_device *physical_device = device->physical_device;
2604 si_emit_graphics(physical_device, cs);
2605 }
2606 }
2607
2608 static void
2609 radv_init_compute_state(struct radeon_cmdbuf *cs, struct radv_queue *queue)
2610 {
2611 struct radv_physical_device *physical_device = queue->device->physical_device;
2612 si_emit_compute(physical_device, cs);
2613 }
2614
2615 static VkResult
2616 radv_get_preamble_cs(struct radv_queue *queue,
2617 uint32_t scratch_size,
2618 uint32_t compute_scratch_size,
2619 uint32_t esgs_ring_size,
2620 uint32_t gsvs_ring_size,
2621 bool needs_tess_rings,
2622 bool needs_gds,
2623 bool needs_sample_positions,
2624 struct radeon_cmdbuf **initial_full_flush_preamble_cs,
2625 struct radeon_cmdbuf **initial_preamble_cs,
2626 struct radeon_cmdbuf **continue_preamble_cs)
2627 {
2628 struct radeon_winsys_bo *scratch_bo = NULL;
2629 struct radeon_winsys_bo *descriptor_bo = NULL;
2630 struct radeon_winsys_bo *compute_scratch_bo = NULL;
2631 struct radeon_winsys_bo *esgs_ring_bo = NULL;
2632 struct radeon_winsys_bo *gsvs_ring_bo = NULL;
2633 struct radeon_winsys_bo *tess_rings_bo = NULL;
2634 struct radeon_winsys_bo *gds_bo = NULL;
2635 struct radeon_winsys_bo *gds_oa_bo = NULL;
2636 struct radeon_cmdbuf *dest_cs[3] = {0};
2637 bool add_tess_rings = false, add_gds = false, add_sample_positions = false;
2638 unsigned tess_factor_ring_size = 0, tess_offchip_ring_size = 0;
2639 unsigned max_offchip_buffers;
2640 unsigned hs_offchip_param = 0;
2641 unsigned tess_offchip_ring_offset;
2642 uint32_t ring_bo_flags = RADEON_FLAG_NO_CPU_ACCESS | RADEON_FLAG_NO_INTERPROCESS_SHARING;
2643 if (!queue->has_tess_rings) {
2644 if (needs_tess_rings)
2645 add_tess_rings = true;
2646 }
2647 if (!queue->has_gds) {
2648 if (needs_gds)
2649 add_gds = true;
2650 }
2651 if (!queue->has_sample_positions) {
2652 if (needs_sample_positions)
2653 add_sample_positions = true;
2654 }
2655 tess_factor_ring_size = 32768 * queue->device->physical_device->rad_info.max_se;
2656 hs_offchip_param = radv_get_hs_offchip_param(queue->device,
2657 &max_offchip_buffers);
2658 tess_offchip_ring_offset = align(tess_factor_ring_size, 64 * 1024);
2659 tess_offchip_ring_size = max_offchip_buffers *
2660 queue->device->tess_offchip_block_dw_size * 4;
2661
2662 if (scratch_size <= queue->scratch_size &&
2663 compute_scratch_size <= queue->compute_scratch_size &&
2664 esgs_ring_size <= queue->esgs_ring_size &&
2665 gsvs_ring_size <= queue->gsvs_ring_size &&
2666 !add_tess_rings && !add_gds && !add_sample_positions &&
2667 queue->initial_preamble_cs) {
2668 *initial_full_flush_preamble_cs = queue->initial_full_flush_preamble_cs;
2669 *initial_preamble_cs = queue->initial_preamble_cs;
2670 *continue_preamble_cs = queue->continue_preamble_cs;
2671 if (!scratch_size && !compute_scratch_size && !esgs_ring_size && !gsvs_ring_size &&
2672 !needs_tess_rings && !needs_gds && !needs_sample_positions)
2673 *continue_preamble_cs = NULL;
2674 return VK_SUCCESS;
2675 }
2676
2677 if (scratch_size > queue->scratch_size) {
2678 scratch_bo = queue->device->ws->buffer_create(queue->device->ws,
2679 scratch_size,
2680 4096,
2681 RADEON_DOMAIN_VRAM,
2682 ring_bo_flags,
2683 RADV_BO_PRIORITY_SCRATCH);
2684 if (!scratch_bo)
2685 goto fail;
2686 } else
2687 scratch_bo = queue->scratch_bo;
2688
2689 if (compute_scratch_size > queue->compute_scratch_size) {
2690 compute_scratch_bo = queue->device->ws->buffer_create(queue->device->ws,
2691 compute_scratch_size,
2692 4096,
2693 RADEON_DOMAIN_VRAM,
2694 ring_bo_flags,
2695 RADV_BO_PRIORITY_SCRATCH);
2696 if (!compute_scratch_bo)
2697 goto fail;
2698
2699 } else
2700 compute_scratch_bo = queue->compute_scratch_bo;
2701
2702 if (esgs_ring_size > queue->esgs_ring_size) {
2703 esgs_ring_bo = queue->device->ws->buffer_create(queue->device->ws,
2704 esgs_ring_size,
2705 4096,
2706 RADEON_DOMAIN_VRAM,
2707 ring_bo_flags,
2708 RADV_BO_PRIORITY_SCRATCH);
2709 if (!esgs_ring_bo)
2710 goto fail;
2711 } else {
2712 esgs_ring_bo = queue->esgs_ring_bo;
2713 esgs_ring_size = queue->esgs_ring_size;
2714 }
2715
2716 if (gsvs_ring_size > queue->gsvs_ring_size) {
2717 gsvs_ring_bo = queue->device->ws->buffer_create(queue->device->ws,
2718 gsvs_ring_size,
2719 4096,
2720 RADEON_DOMAIN_VRAM,
2721 ring_bo_flags,
2722 RADV_BO_PRIORITY_SCRATCH);
2723 if (!gsvs_ring_bo)
2724 goto fail;
2725 } else {
2726 gsvs_ring_bo = queue->gsvs_ring_bo;
2727 gsvs_ring_size = queue->gsvs_ring_size;
2728 }
2729
2730 if (add_tess_rings) {
2731 tess_rings_bo = queue->device->ws->buffer_create(queue->device->ws,
2732 tess_offchip_ring_offset + tess_offchip_ring_size,
2733 256,
2734 RADEON_DOMAIN_VRAM,
2735 ring_bo_flags,
2736 RADV_BO_PRIORITY_SCRATCH);
2737 if (!tess_rings_bo)
2738 goto fail;
2739 } else {
2740 tess_rings_bo = queue->tess_rings_bo;
2741 }
2742
2743 if (add_gds) {
2744 assert(queue->device->physical_device->rad_info.chip_class >= GFX10);
2745
2746 /* 4 streamout GDS counters.
2747 * We need 256B (64 dw) of GDS, otherwise streamout hangs.
2748 */
2749 gds_bo = queue->device->ws->buffer_create(queue->device->ws,
2750 256, 4,
2751 RADEON_DOMAIN_GDS,
2752 ring_bo_flags,
2753 RADV_BO_PRIORITY_SCRATCH);
2754 if (!gds_bo)
2755 goto fail;
2756
2757 gds_oa_bo = queue->device->ws->buffer_create(queue->device->ws,
2758 4, 1,
2759 RADEON_DOMAIN_OA,
2760 ring_bo_flags,
2761 RADV_BO_PRIORITY_SCRATCH);
2762 if (!gds_oa_bo)
2763 goto fail;
2764 } else {
2765 gds_bo = queue->gds_bo;
2766 gds_oa_bo = queue->gds_oa_bo;
2767 }
2768
2769 if (scratch_bo != queue->scratch_bo ||
2770 esgs_ring_bo != queue->esgs_ring_bo ||
2771 gsvs_ring_bo != queue->gsvs_ring_bo ||
2772 tess_rings_bo != queue->tess_rings_bo ||
2773 add_sample_positions) {
2774 uint32_t size = 0;
2775 if (gsvs_ring_bo || esgs_ring_bo ||
2776 tess_rings_bo || add_sample_positions) {
2777 size = 112; /* 2 dword + 2 padding + 4 dword * 6 */
2778 if (add_sample_positions)
2779 size += 128; /* 64+32+16+8 = 120 bytes */
2780 }
2781 else if (scratch_bo)
2782 size = 8; /* 2 dword */
2783
2784 descriptor_bo = queue->device->ws->buffer_create(queue->device->ws,
2785 size,
2786 4096,
2787 RADEON_DOMAIN_VRAM,
2788 RADEON_FLAG_CPU_ACCESS |
2789 RADEON_FLAG_NO_INTERPROCESS_SHARING |
2790 RADEON_FLAG_READ_ONLY,
2791 RADV_BO_PRIORITY_DESCRIPTOR);
2792 if (!descriptor_bo)
2793 goto fail;
2794 } else
2795 descriptor_bo = queue->descriptor_bo;
2796
2797 if (descriptor_bo != queue->descriptor_bo) {
2798 uint32_t *map = (uint32_t*)queue->device->ws->buffer_map(descriptor_bo);
2799
2800 if (scratch_bo) {
2801 uint64_t scratch_va = radv_buffer_get_va(scratch_bo);
2802 uint32_t rsrc1 = S_008F04_BASE_ADDRESS_HI(scratch_va >> 32) |
2803 S_008F04_SWIZZLE_ENABLE(1);
2804 map[0] = scratch_va;
2805 map[1] = rsrc1;
2806 }
2807
2808 if (esgs_ring_bo || gsvs_ring_bo || tess_rings_bo || add_sample_positions)
2809 fill_geom_tess_rings(queue, map, add_sample_positions,
2810 esgs_ring_size, esgs_ring_bo,
2811 gsvs_ring_size, gsvs_ring_bo,
2812 tess_factor_ring_size,
2813 tess_offchip_ring_offset,
2814 tess_offchip_ring_size,
2815 tess_rings_bo);
2816
2817 queue->device->ws->buffer_unmap(descriptor_bo);
2818 }
2819
2820 for(int i = 0; i < 3; ++i) {
2821 struct radeon_cmdbuf *cs = NULL;
2822 cs = queue->device->ws->cs_create(queue->device->ws,
2823 queue->queue_family_index ? RING_COMPUTE : RING_GFX);
2824 if (!cs)
2825 goto fail;
2826
2827 dest_cs[i] = cs;
2828
2829 if (scratch_bo)
2830 radv_cs_add_buffer(queue->device->ws, cs, scratch_bo);
2831
2832 /* Emit initial configuration. */
2833 switch (queue->queue_family_index) {
2834 case RADV_QUEUE_GENERAL:
2835 radv_init_graphics_state(cs, queue);
2836 break;
2837 case RADV_QUEUE_COMPUTE:
2838 radv_init_compute_state(cs, queue);
2839 break;
2840 case RADV_QUEUE_TRANSFER:
2841 break;
2842 }
2843
2844 if (esgs_ring_bo || gsvs_ring_bo || tess_rings_bo) {
2845 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
2846 radeon_emit(cs, EVENT_TYPE(V_028A90_VS_PARTIAL_FLUSH) | EVENT_INDEX(4));
2847
2848 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
2849 radeon_emit(cs, EVENT_TYPE(V_028A90_VGT_FLUSH) | EVENT_INDEX(0));
2850 }
2851
2852 radv_emit_gs_ring_sizes(queue, cs, esgs_ring_bo, esgs_ring_size,
2853 gsvs_ring_bo, gsvs_ring_size);
2854 radv_emit_tess_factor_ring(queue, cs, hs_offchip_param,
2855 tess_factor_ring_size, tess_rings_bo);
2856 radv_emit_global_shader_pointers(queue, cs, descriptor_bo);
2857 radv_emit_compute_scratch(queue, cs, compute_scratch_bo);
2858
2859 if (gds_bo)
2860 radv_cs_add_buffer(queue->device->ws, cs, gds_bo);
2861 if (gds_oa_bo)
2862 radv_cs_add_buffer(queue->device->ws, cs, gds_oa_bo);
2863
2864 if (i == 0) {
2865 si_cs_emit_cache_flush(cs,
2866 queue->device->physical_device->rad_info.chip_class,
2867 NULL, 0,
2868 queue->queue_family_index == RING_COMPUTE &&
2869 queue->device->physical_device->rad_info.chip_class >= GFX7,
2870 (queue->queue_family_index == RADV_QUEUE_COMPUTE ? RADV_CMD_FLAG_CS_PARTIAL_FLUSH : (RADV_CMD_FLAG_CS_PARTIAL_FLUSH | RADV_CMD_FLAG_PS_PARTIAL_FLUSH)) |
2871 RADV_CMD_FLAG_INV_ICACHE |
2872 RADV_CMD_FLAG_INV_SCACHE |
2873 RADV_CMD_FLAG_INV_VCACHE |
2874 RADV_CMD_FLAG_INV_L2 |
2875 RADV_CMD_FLAG_START_PIPELINE_STATS, 0);
2876 } else if (i == 1) {
2877 si_cs_emit_cache_flush(cs,
2878 queue->device->physical_device->rad_info.chip_class,
2879 NULL, 0,
2880 queue->queue_family_index == RING_COMPUTE &&
2881 queue->device->physical_device->rad_info.chip_class >= GFX7,
2882 RADV_CMD_FLAG_INV_ICACHE |
2883 RADV_CMD_FLAG_INV_SCACHE |
2884 RADV_CMD_FLAG_INV_VCACHE |
2885 RADV_CMD_FLAG_INV_L2 |
2886 RADV_CMD_FLAG_START_PIPELINE_STATS, 0);
2887 }
2888
2889 if (!queue->device->ws->cs_finalize(cs))
2890 goto fail;
2891 }
2892
2893 if (queue->initial_full_flush_preamble_cs)
2894 queue->device->ws->cs_destroy(queue->initial_full_flush_preamble_cs);
2895
2896 if (queue->initial_preamble_cs)
2897 queue->device->ws->cs_destroy(queue->initial_preamble_cs);
2898
2899 if (queue->continue_preamble_cs)
2900 queue->device->ws->cs_destroy(queue->continue_preamble_cs);
2901
2902 queue->initial_full_flush_preamble_cs = dest_cs[0];
2903 queue->initial_preamble_cs = dest_cs[1];
2904 queue->continue_preamble_cs = dest_cs[2];
2905
2906 if (scratch_bo != queue->scratch_bo) {
2907 if (queue->scratch_bo)
2908 queue->device->ws->buffer_destroy(queue->scratch_bo);
2909 queue->scratch_bo = scratch_bo;
2910 queue->scratch_size = scratch_size;
2911 }
2912
2913 if (compute_scratch_bo != queue->compute_scratch_bo) {
2914 if (queue->compute_scratch_bo)
2915 queue->device->ws->buffer_destroy(queue->compute_scratch_bo);
2916 queue->compute_scratch_bo = compute_scratch_bo;
2917 queue->compute_scratch_size = compute_scratch_size;
2918 }
2919
2920 if (esgs_ring_bo != queue->esgs_ring_bo) {
2921 if (queue->esgs_ring_bo)
2922 queue->device->ws->buffer_destroy(queue->esgs_ring_bo);
2923 queue->esgs_ring_bo = esgs_ring_bo;
2924 queue->esgs_ring_size = esgs_ring_size;
2925 }
2926
2927 if (gsvs_ring_bo != queue->gsvs_ring_bo) {
2928 if (queue->gsvs_ring_bo)
2929 queue->device->ws->buffer_destroy(queue->gsvs_ring_bo);
2930 queue->gsvs_ring_bo = gsvs_ring_bo;
2931 queue->gsvs_ring_size = gsvs_ring_size;
2932 }
2933
2934 if (tess_rings_bo != queue->tess_rings_bo) {
2935 queue->tess_rings_bo = tess_rings_bo;
2936 queue->has_tess_rings = true;
2937 }
2938
2939 if (gds_bo != queue->gds_bo) {
2940 queue->gds_bo = gds_bo;
2941 queue->has_gds = true;
2942 }
2943
2944 if (gds_oa_bo != queue->gds_oa_bo)
2945 queue->gds_oa_bo = gds_oa_bo;
2946
2947 if (descriptor_bo != queue->descriptor_bo) {
2948 if (queue->descriptor_bo)
2949 queue->device->ws->buffer_destroy(queue->descriptor_bo);
2950
2951 queue->descriptor_bo = descriptor_bo;
2952 }
2953
2954 if (add_sample_positions)
2955 queue->has_sample_positions = true;
2956
2957 *initial_full_flush_preamble_cs = queue->initial_full_flush_preamble_cs;
2958 *initial_preamble_cs = queue->initial_preamble_cs;
2959 *continue_preamble_cs = queue->continue_preamble_cs;
2960 if (!scratch_size && !compute_scratch_size && !esgs_ring_size && !gsvs_ring_size)
2961 *continue_preamble_cs = NULL;
2962 return VK_SUCCESS;
2963 fail:
2964 for (int i = 0; i < ARRAY_SIZE(dest_cs); ++i)
2965 if (dest_cs[i])
2966 queue->device->ws->cs_destroy(dest_cs[i]);
2967 if (descriptor_bo && descriptor_bo != queue->descriptor_bo)
2968 queue->device->ws->buffer_destroy(descriptor_bo);
2969 if (scratch_bo && scratch_bo != queue->scratch_bo)
2970 queue->device->ws->buffer_destroy(scratch_bo);
2971 if (compute_scratch_bo && compute_scratch_bo != queue->compute_scratch_bo)
2972 queue->device->ws->buffer_destroy(compute_scratch_bo);
2973 if (esgs_ring_bo && esgs_ring_bo != queue->esgs_ring_bo)
2974 queue->device->ws->buffer_destroy(esgs_ring_bo);
2975 if (gsvs_ring_bo && gsvs_ring_bo != queue->gsvs_ring_bo)
2976 queue->device->ws->buffer_destroy(gsvs_ring_bo);
2977 if (tess_rings_bo && tess_rings_bo != queue->tess_rings_bo)
2978 queue->device->ws->buffer_destroy(tess_rings_bo);
2979 if (gds_bo && gds_bo != queue->gds_bo)
2980 queue->device->ws->buffer_destroy(gds_bo);
2981 if (gds_oa_bo && gds_oa_bo != queue->gds_oa_bo)
2982 queue->device->ws->buffer_destroy(gds_oa_bo);
2983
2984 return vk_error(queue->device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
2985 }
2986
2987 static VkResult radv_alloc_sem_counts(struct radv_instance *instance,
2988 struct radv_winsys_sem_counts *counts,
2989 int num_sems,
2990 const VkSemaphore *sems,
2991 VkFence _fence,
2992 bool reset_temp)
2993 {
2994 int syncobj_idx = 0, sem_idx = 0;
2995
2996 if (num_sems == 0 && _fence == VK_NULL_HANDLE)
2997 return VK_SUCCESS;
2998
2999 for (uint32_t i = 0; i < num_sems; i++) {
3000 RADV_FROM_HANDLE(radv_semaphore, sem, sems[i]);
3001
3002 if (sem->temp_syncobj || sem->syncobj)
3003 counts->syncobj_count++;
3004 else
3005 counts->sem_count++;
3006 }
3007
3008 if (_fence != VK_NULL_HANDLE) {
3009 RADV_FROM_HANDLE(radv_fence, fence, _fence);
3010 if (fence->temp_syncobj || fence->syncobj)
3011 counts->syncobj_count++;
3012 }
3013
3014 if (counts->syncobj_count) {
3015 counts->syncobj = (uint32_t *)malloc(sizeof(uint32_t) * counts->syncobj_count);
3016 if (!counts->syncobj)
3017 return vk_error(instance, VK_ERROR_OUT_OF_HOST_MEMORY);
3018 }
3019
3020 if (counts->sem_count) {
3021 counts->sem = (struct radeon_winsys_sem **)malloc(sizeof(struct radeon_winsys_sem *) * counts->sem_count);
3022 if (!counts->sem) {
3023 free(counts->syncobj);
3024 return vk_error(instance, VK_ERROR_OUT_OF_HOST_MEMORY);
3025 }
3026 }
3027
3028 for (uint32_t i = 0; i < num_sems; i++) {
3029 RADV_FROM_HANDLE(radv_semaphore, sem, sems[i]);
3030
3031 if (sem->temp_syncobj) {
3032 counts->syncobj[syncobj_idx++] = sem->temp_syncobj;
3033 }
3034 else if (sem->syncobj)
3035 counts->syncobj[syncobj_idx++] = sem->syncobj;
3036 else {
3037 assert(sem->sem);
3038 counts->sem[sem_idx++] = sem->sem;
3039 }
3040 }
3041
3042 if (_fence != VK_NULL_HANDLE) {
3043 RADV_FROM_HANDLE(radv_fence, fence, _fence);
3044 if (fence->temp_syncobj)
3045 counts->syncobj[syncobj_idx++] = fence->temp_syncobj;
3046 else if (fence->syncobj)
3047 counts->syncobj[syncobj_idx++] = fence->syncobj;
3048 }
3049
3050 return VK_SUCCESS;
3051 }
3052
3053 static void
3054 radv_free_sem_info(struct radv_winsys_sem_info *sem_info)
3055 {
3056 free(sem_info->wait.syncobj);
3057 free(sem_info->wait.sem);
3058 free(sem_info->signal.syncobj);
3059 free(sem_info->signal.sem);
3060 }
3061
3062
3063 static void radv_free_temp_syncobjs(struct radv_device *device,
3064 int num_sems,
3065 const VkSemaphore *sems)
3066 {
3067 for (uint32_t i = 0; i < num_sems; i++) {
3068 RADV_FROM_HANDLE(radv_semaphore, sem, sems[i]);
3069
3070 if (sem->temp_syncobj) {
3071 device->ws->destroy_syncobj(device->ws, sem->temp_syncobj);
3072 sem->temp_syncobj = 0;
3073 }
3074 }
3075 }
3076
3077 static VkResult
3078 radv_alloc_sem_info(struct radv_instance *instance,
3079 struct radv_winsys_sem_info *sem_info,
3080 int num_wait_sems,
3081 const VkSemaphore *wait_sems,
3082 int num_signal_sems,
3083 const VkSemaphore *signal_sems,
3084 VkFence fence)
3085 {
3086 VkResult ret;
3087 memset(sem_info, 0, sizeof(*sem_info));
3088
3089 ret = radv_alloc_sem_counts(instance, &sem_info->wait, num_wait_sems, wait_sems, VK_NULL_HANDLE, true);
3090 if (ret)
3091 return ret;
3092 ret = radv_alloc_sem_counts(instance, &sem_info->signal, num_signal_sems, signal_sems, fence, false);
3093 if (ret)
3094 radv_free_sem_info(sem_info);
3095
3096 /* caller can override these */
3097 sem_info->cs_emit_wait = true;
3098 sem_info->cs_emit_signal = true;
3099 return ret;
3100 }
3101
3102 /* Signals fence as soon as all the work currently put on queue is done. */
3103 static VkResult radv_signal_fence(struct radv_queue *queue,
3104 struct radv_fence *fence)
3105 {
3106 int ret;
3107 VkResult result;
3108 struct radv_winsys_sem_info sem_info;
3109
3110 result = radv_alloc_sem_info(queue->device->instance, &sem_info, 0, NULL, 0, NULL,
3111 radv_fence_to_handle(fence));
3112 if (result != VK_SUCCESS)
3113 return result;
3114
3115 ret = queue->device->ws->cs_submit(queue->hw_ctx, queue->queue_idx,
3116 &queue->device->empty_cs[queue->queue_family_index],
3117 1, NULL, NULL, &sem_info, NULL,
3118 false, fence->fence);
3119 radv_free_sem_info(&sem_info);
3120
3121 if (ret)
3122 return vk_error(queue->device->instance, VK_ERROR_DEVICE_LOST);
3123
3124 return VK_SUCCESS;
3125 }
3126
3127 VkResult radv_QueueSubmit(
3128 VkQueue _queue,
3129 uint32_t submitCount,
3130 const VkSubmitInfo* pSubmits,
3131 VkFence _fence)
3132 {
3133 RADV_FROM_HANDLE(radv_queue, queue, _queue);
3134 RADV_FROM_HANDLE(radv_fence, fence, _fence);
3135 struct radeon_winsys_fence *base_fence = fence ? fence->fence : NULL;
3136 struct radeon_winsys_ctx *ctx = queue->hw_ctx;
3137 int ret;
3138 uint32_t max_cs_submission = queue->device->trace_bo ? 1 : RADV_MAX_IBS_PER_SUBMIT;
3139 uint32_t scratch_size = 0;
3140 uint32_t compute_scratch_size = 0;
3141 uint32_t esgs_ring_size = 0, gsvs_ring_size = 0;
3142 struct radeon_cmdbuf *initial_preamble_cs = NULL, *initial_flush_preamble_cs = NULL, *continue_preamble_cs = NULL;
3143 VkResult result;
3144 bool fence_emitted = false;
3145 bool tess_rings_needed = false;
3146 bool gds_needed = false;
3147 bool sample_positions_needed = false;
3148
3149 /* Do this first so failing to allocate scratch buffers can't result in
3150 * partially executed submissions. */
3151 for (uint32_t i = 0; i < submitCount; i++) {
3152 for (uint32_t j = 0; j < pSubmits[i].commandBufferCount; j++) {
3153 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer,
3154 pSubmits[i].pCommandBuffers[j]);
3155
3156 scratch_size = MAX2(scratch_size, cmd_buffer->scratch_size_needed);
3157 compute_scratch_size = MAX2(compute_scratch_size,
3158 cmd_buffer->compute_scratch_size_needed);
3159 esgs_ring_size = MAX2(esgs_ring_size, cmd_buffer->esgs_ring_size_needed);
3160 gsvs_ring_size = MAX2(gsvs_ring_size, cmd_buffer->gsvs_ring_size_needed);
3161 tess_rings_needed |= cmd_buffer->tess_rings_needed;
3162 gds_needed |= cmd_buffer->gds_needed;
3163 sample_positions_needed |= cmd_buffer->sample_positions_needed;
3164 }
3165 }
3166
3167 result = radv_get_preamble_cs(queue, scratch_size, compute_scratch_size,
3168 esgs_ring_size, gsvs_ring_size, tess_rings_needed,
3169 gds_needed, sample_positions_needed,
3170 &initial_flush_preamble_cs,
3171 &initial_preamble_cs, &continue_preamble_cs);
3172 if (result != VK_SUCCESS)
3173 return result;
3174
3175 for (uint32_t i = 0; i < submitCount; i++) {
3176 struct radeon_cmdbuf **cs_array;
3177 bool do_flush = !i || pSubmits[i].pWaitDstStageMask;
3178 bool can_patch = true;
3179 uint32_t advance;
3180 struct radv_winsys_sem_info sem_info;
3181
3182 result = radv_alloc_sem_info(queue->device->instance,
3183 &sem_info,
3184 pSubmits[i].waitSemaphoreCount,
3185 pSubmits[i].pWaitSemaphores,
3186 pSubmits[i].signalSemaphoreCount,
3187 pSubmits[i].pSignalSemaphores,
3188 _fence);
3189 if (result != VK_SUCCESS)
3190 return result;
3191
3192 if (!pSubmits[i].commandBufferCount) {
3193 if (pSubmits[i].waitSemaphoreCount || pSubmits[i].signalSemaphoreCount) {
3194 ret = queue->device->ws->cs_submit(ctx, queue->queue_idx,
3195 &queue->device->empty_cs[queue->queue_family_index],
3196 1, NULL, NULL,
3197 &sem_info, NULL,
3198 false, base_fence);
3199 if (ret) {
3200 radv_loge("failed to submit CS %d\n", i);
3201 abort();
3202 }
3203 fence_emitted = true;
3204 }
3205 radv_free_sem_info(&sem_info);
3206 continue;
3207 }
3208
3209 cs_array = malloc(sizeof(struct radeon_cmdbuf *) *
3210 (pSubmits[i].commandBufferCount));
3211
3212 for (uint32_t j = 0; j < pSubmits[i].commandBufferCount; j++) {
3213 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer,
3214 pSubmits[i].pCommandBuffers[j]);
3215 assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
3216
3217 cs_array[j] = cmd_buffer->cs;
3218 if ((cmd_buffer->usage_flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT))
3219 can_patch = false;
3220
3221 cmd_buffer->status = RADV_CMD_BUFFER_STATUS_PENDING;
3222 }
3223
3224 for (uint32_t j = 0; j < pSubmits[i].commandBufferCount; j += advance) {
3225 struct radeon_cmdbuf *initial_preamble = (do_flush && !j) ? initial_flush_preamble_cs : initial_preamble_cs;
3226 const struct radv_winsys_bo_list *bo_list = NULL;
3227
3228 advance = MIN2(max_cs_submission,
3229 pSubmits[i].commandBufferCount - j);
3230
3231 if (queue->device->trace_bo)
3232 *queue->device->trace_id_ptr = 0;
3233
3234 sem_info.cs_emit_wait = j == 0;
3235 sem_info.cs_emit_signal = j + advance == pSubmits[i].commandBufferCount;
3236
3237 if (unlikely(queue->device->use_global_bo_list)) {
3238 pthread_mutex_lock(&queue->device->bo_list.mutex);
3239 bo_list = &queue->device->bo_list.list;
3240 }
3241
3242 ret = queue->device->ws->cs_submit(ctx, queue->queue_idx, cs_array + j,
3243 advance, initial_preamble, continue_preamble_cs,
3244 &sem_info, bo_list,
3245 can_patch, base_fence);
3246
3247 if (unlikely(queue->device->use_global_bo_list))
3248 pthread_mutex_unlock(&queue->device->bo_list.mutex);
3249
3250 if (ret) {
3251 radv_loge("failed to submit CS %d\n", i);
3252 abort();
3253 }
3254 fence_emitted = true;
3255 if (queue->device->trace_bo) {
3256 radv_check_gpu_hangs(queue, cs_array[j]);
3257 }
3258 }
3259
3260 radv_free_temp_syncobjs(queue->device,
3261 pSubmits[i].waitSemaphoreCount,
3262 pSubmits[i].pWaitSemaphores);
3263 radv_free_sem_info(&sem_info);
3264 free(cs_array);
3265 }
3266
3267 if (fence) {
3268 if (!fence_emitted) {
3269 result = radv_signal_fence(queue, fence);
3270 if (result != VK_SUCCESS)
3271 return result;
3272 }
3273 }
3274
3275 return VK_SUCCESS;
3276 }
3277
3278 VkResult radv_QueueWaitIdle(
3279 VkQueue _queue)
3280 {
3281 RADV_FROM_HANDLE(radv_queue, queue, _queue);
3282
3283 queue->device->ws->ctx_wait_idle(queue->hw_ctx,
3284 radv_queue_family_to_ring(queue->queue_family_index),
3285 queue->queue_idx);
3286 return VK_SUCCESS;
3287 }
3288
3289 VkResult radv_DeviceWaitIdle(
3290 VkDevice _device)
3291 {
3292 RADV_FROM_HANDLE(radv_device, device, _device);
3293
3294 for (unsigned i = 0; i < RADV_MAX_QUEUE_FAMILIES; i++) {
3295 for (unsigned q = 0; q < device->queue_count[i]; q++) {
3296 radv_QueueWaitIdle(radv_queue_to_handle(&device->queues[i][q]));
3297 }
3298 }
3299 return VK_SUCCESS;
3300 }
3301
3302 VkResult radv_EnumerateInstanceExtensionProperties(
3303 const char* pLayerName,
3304 uint32_t* pPropertyCount,
3305 VkExtensionProperties* pProperties)
3306 {
3307 VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
3308
3309 for (int i = 0; i < RADV_INSTANCE_EXTENSION_COUNT; i++) {
3310 if (radv_supported_instance_extensions.extensions[i]) {
3311 vk_outarray_append(&out, prop) {
3312 *prop = radv_instance_extensions[i];
3313 }
3314 }
3315 }
3316
3317 return vk_outarray_status(&out);
3318 }
3319
3320 VkResult radv_EnumerateDeviceExtensionProperties(
3321 VkPhysicalDevice physicalDevice,
3322 const char* pLayerName,
3323 uint32_t* pPropertyCount,
3324 VkExtensionProperties* pProperties)
3325 {
3326 RADV_FROM_HANDLE(radv_physical_device, device, physicalDevice);
3327 VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
3328
3329 for (int i = 0; i < RADV_DEVICE_EXTENSION_COUNT; i++) {
3330 if (device->supported_extensions.extensions[i]) {
3331 vk_outarray_append(&out, prop) {
3332 *prop = radv_device_extensions[i];
3333 }
3334 }
3335 }
3336
3337 return vk_outarray_status(&out);
3338 }
3339
3340 PFN_vkVoidFunction radv_GetInstanceProcAddr(
3341 VkInstance _instance,
3342 const char* pName)
3343 {
3344 RADV_FROM_HANDLE(radv_instance, instance, _instance);
3345 bool unchecked = instance ? instance->debug_flags & RADV_DEBUG_ALL_ENTRYPOINTS : false;
3346
3347 if (unchecked) {
3348 return radv_lookup_entrypoint_unchecked(pName);
3349 } else {
3350 return radv_lookup_entrypoint_checked(pName,
3351 instance ? instance->apiVersion : 0,
3352 instance ? &instance->enabled_extensions : NULL,
3353 NULL);
3354 }
3355 }
3356
3357 /* The loader wants us to expose a second GetInstanceProcAddr function
3358 * to work around certain LD_PRELOAD issues seen in apps.
3359 */
3360 PUBLIC
3361 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vk_icdGetInstanceProcAddr(
3362 VkInstance instance,
3363 const char* pName);
3364
3365 PUBLIC
3366 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vk_icdGetInstanceProcAddr(
3367 VkInstance instance,
3368 const char* pName)
3369 {
3370 return radv_GetInstanceProcAddr(instance, pName);
3371 }
3372
3373 PUBLIC
3374 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vk_icdGetPhysicalDeviceProcAddr(
3375 VkInstance _instance,
3376 const char* pName);
3377
3378 PUBLIC
3379 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vk_icdGetPhysicalDeviceProcAddr(
3380 VkInstance _instance,
3381 const char* pName)
3382 {
3383 RADV_FROM_HANDLE(radv_instance, instance, _instance);
3384
3385 return radv_lookup_physical_device_entrypoint_checked(pName,
3386 instance ? instance->apiVersion : 0,
3387 instance ? &instance->enabled_extensions : NULL);
3388 }
3389
3390 PFN_vkVoidFunction radv_GetDeviceProcAddr(
3391 VkDevice _device,
3392 const char* pName)
3393 {
3394 RADV_FROM_HANDLE(radv_device, device, _device);
3395 bool unchecked = device ? device->instance->debug_flags & RADV_DEBUG_ALL_ENTRYPOINTS : false;
3396
3397 if (unchecked) {
3398 return radv_lookup_entrypoint_unchecked(pName);
3399 } else {
3400 return radv_lookup_entrypoint_checked(pName,
3401 device->instance->apiVersion,
3402 &device->instance->enabled_extensions,
3403 &device->enabled_extensions);
3404 }
3405 }
3406
3407 bool radv_get_memory_fd(struct radv_device *device,
3408 struct radv_device_memory *memory,
3409 int *pFD)
3410 {
3411 struct radeon_bo_metadata metadata;
3412
3413 if (memory->image) {
3414 radv_init_metadata(device, memory->image, &metadata);
3415 device->ws->buffer_set_metadata(memory->bo, &metadata);
3416 }
3417
3418 return device->ws->buffer_get_fd(device->ws, memory->bo,
3419 pFD);
3420 }
3421
3422 static VkResult radv_alloc_memory(struct radv_device *device,
3423 const VkMemoryAllocateInfo* pAllocateInfo,
3424 const VkAllocationCallbacks* pAllocator,
3425 VkDeviceMemory* pMem)
3426 {
3427 struct radv_device_memory *mem;
3428 VkResult result;
3429 enum radeon_bo_domain domain;
3430 uint32_t flags = 0;
3431 enum radv_mem_type mem_type_index = device->physical_device->mem_type_indices[pAllocateInfo->memoryTypeIndex];
3432
3433 assert(pAllocateInfo->sType == VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO);
3434
3435 if (pAllocateInfo->allocationSize == 0) {
3436 /* Apparently, this is allowed */
3437 *pMem = VK_NULL_HANDLE;
3438 return VK_SUCCESS;
3439 }
3440
3441 const VkImportMemoryFdInfoKHR *import_info =
3442 vk_find_struct_const(pAllocateInfo->pNext, IMPORT_MEMORY_FD_INFO_KHR);
3443 const VkMemoryDedicatedAllocateInfo *dedicate_info =
3444 vk_find_struct_const(pAllocateInfo->pNext, MEMORY_DEDICATED_ALLOCATE_INFO);
3445 const VkExportMemoryAllocateInfo *export_info =
3446 vk_find_struct_const(pAllocateInfo->pNext, EXPORT_MEMORY_ALLOCATE_INFO);
3447 const VkImportMemoryHostPointerInfoEXT *host_ptr_info =
3448 vk_find_struct_const(pAllocateInfo->pNext, IMPORT_MEMORY_HOST_POINTER_INFO_EXT);
3449
3450 const struct wsi_memory_allocate_info *wsi_info =
3451 vk_find_struct_const(pAllocateInfo->pNext, WSI_MEMORY_ALLOCATE_INFO_MESA);
3452
3453 mem = vk_alloc2(&device->alloc, pAllocator, sizeof(*mem), 8,
3454 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
3455 if (mem == NULL)
3456 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
3457
3458 if (wsi_info && wsi_info->implicit_sync)
3459 flags |= RADEON_FLAG_IMPLICIT_SYNC;
3460
3461 if (dedicate_info) {
3462 mem->image = radv_image_from_handle(dedicate_info->image);
3463 mem->buffer = radv_buffer_from_handle(dedicate_info->buffer);
3464 } else {
3465 mem->image = NULL;
3466 mem->buffer = NULL;
3467 }
3468
3469 float priority_float = 0.5;
3470 const struct VkMemoryPriorityAllocateInfoEXT *priority_ext =
3471 vk_find_struct_const(pAllocateInfo->pNext,
3472 MEMORY_PRIORITY_ALLOCATE_INFO_EXT);
3473 if (priority_ext)
3474 priority_float = priority_ext->priority;
3475
3476 unsigned priority = MIN2(RADV_BO_PRIORITY_APPLICATION_MAX - 1,
3477 (int)(priority_float * RADV_BO_PRIORITY_APPLICATION_MAX));
3478
3479 mem->user_ptr = NULL;
3480
3481 if (import_info) {
3482 assert(import_info->handleType ==
3483 VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT ||
3484 import_info->handleType ==
3485 VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
3486 mem->bo = device->ws->buffer_from_fd(device->ws, import_info->fd,
3487 priority, NULL, NULL);
3488 if (!mem->bo) {
3489 result = VK_ERROR_INVALID_EXTERNAL_HANDLE;
3490 goto fail;
3491 } else {
3492 close(import_info->fd);
3493 }
3494 } else if (host_ptr_info) {
3495 assert(host_ptr_info->handleType == VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT);
3496 assert(mem_type_index == RADV_MEM_TYPE_GTT_CACHED);
3497 mem->bo = device->ws->buffer_from_ptr(device->ws, host_ptr_info->pHostPointer,
3498 pAllocateInfo->allocationSize,
3499 priority);
3500 if (!mem->bo) {
3501 result = VK_ERROR_INVALID_EXTERNAL_HANDLE;
3502 goto fail;
3503 } else {
3504 mem->user_ptr = host_ptr_info->pHostPointer;
3505 }
3506 } else {
3507 uint64_t alloc_size = align_u64(pAllocateInfo->allocationSize, 4096);
3508 if (mem_type_index == RADV_MEM_TYPE_GTT_WRITE_COMBINE ||
3509 mem_type_index == RADV_MEM_TYPE_GTT_CACHED)
3510 domain = RADEON_DOMAIN_GTT;
3511 else
3512 domain = RADEON_DOMAIN_VRAM;
3513
3514 if (mem_type_index == RADV_MEM_TYPE_VRAM)
3515 flags |= RADEON_FLAG_NO_CPU_ACCESS;
3516 else
3517 flags |= RADEON_FLAG_CPU_ACCESS;
3518
3519 if (mem_type_index == RADV_MEM_TYPE_GTT_WRITE_COMBINE)
3520 flags |= RADEON_FLAG_GTT_WC;
3521
3522 if (!dedicate_info && !import_info && (!export_info || !export_info->handleTypes)) {
3523 flags |= RADEON_FLAG_NO_INTERPROCESS_SHARING;
3524 if (device->use_global_bo_list) {
3525 flags |= RADEON_FLAG_PREFER_LOCAL_BO;
3526 }
3527 }
3528
3529 mem->bo = device->ws->buffer_create(device->ws, alloc_size, device->physical_device->rad_info.max_alignment,
3530 domain, flags, priority);
3531
3532 if (!mem->bo) {
3533 result = VK_ERROR_OUT_OF_DEVICE_MEMORY;
3534 goto fail;
3535 }
3536 mem->type_index = mem_type_index;
3537 }
3538
3539 result = radv_bo_list_add(device, mem->bo);
3540 if (result != VK_SUCCESS)
3541 goto fail_bo;
3542
3543 *pMem = radv_device_memory_to_handle(mem);
3544
3545 return VK_SUCCESS;
3546
3547 fail_bo:
3548 device->ws->buffer_destroy(mem->bo);
3549 fail:
3550 vk_free2(&device->alloc, pAllocator, mem);
3551
3552 return result;
3553 }
3554
3555 VkResult radv_AllocateMemory(
3556 VkDevice _device,
3557 const VkMemoryAllocateInfo* pAllocateInfo,
3558 const VkAllocationCallbacks* pAllocator,
3559 VkDeviceMemory* pMem)
3560 {
3561 RADV_FROM_HANDLE(radv_device, device, _device);
3562 return radv_alloc_memory(device, pAllocateInfo, pAllocator, pMem);
3563 }
3564
3565 void radv_FreeMemory(
3566 VkDevice _device,
3567 VkDeviceMemory _mem,
3568 const VkAllocationCallbacks* pAllocator)
3569 {
3570 RADV_FROM_HANDLE(radv_device, device, _device);
3571 RADV_FROM_HANDLE(radv_device_memory, mem, _mem);
3572
3573 if (mem == NULL)
3574 return;
3575
3576 radv_bo_list_remove(device, mem->bo);
3577 device->ws->buffer_destroy(mem->bo);
3578 mem->bo = NULL;
3579
3580 vk_free2(&device->alloc, pAllocator, mem);
3581 }
3582
3583 VkResult radv_MapMemory(
3584 VkDevice _device,
3585 VkDeviceMemory _memory,
3586 VkDeviceSize offset,
3587 VkDeviceSize size,
3588 VkMemoryMapFlags flags,
3589 void** ppData)
3590 {
3591 RADV_FROM_HANDLE(radv_device, device, _device);
3592 RADV_FROM_HANDLE(radv_device_memory, mem, _memory);
3593
3594 if (mem == NULL) {
3595 *ppData = NULL;
3596 return VK_SUCCESS;
3597 }
3598
3599 if (mem->user_ptr)
3600 *ppData = mem->user_ptr;
3601 else
3602 *ppData = device->ws->buffer_map(mem->bo);
3603
3604 if (*ppData) {
3605 *ppData += offset;
3606 return VK_SUCCESS;
3607 }
3608
3609 return vk_error(device->instance, VK_ERROR_MEMORY_MAP_FAILED);
3610 }
3611
3612 void radv_UnmapMemory(
3613 VkDevice _device,
3614 VkDeviceMemory _memory)
3615 {
3616 RADV_FROM_HANDLE(radv_device, device, _device);
3617 RADV_FROM_HANDLE(radv_device_memory, mem, _memory);
3618
3619 if (mem == NULL)
3620 return;
3621
3622 if (mem->user_ptr == NULL)
3623 device->ws->buffer_unmap(mem->bo);
3624 }
3625
3626 VkResult radv_FlushMappedMemoryRanges(
3627 VkDevice _device,
3628 uint32_t memoryRangeCount,
3629 const VkMappedMemoryRange* pMemoryRanges)
3630 {
3631 return VK_SUCCESS;
3632 }
3633
3634 VkResult radv_InvalidateMappedMemoryRanges(
3635 VkDevice _device,
3636 uint32_t memoryRangeCount,
3637 const VkMappedMemoryRange* pMemoryRanges)
3638 {
3639 return VK_SUCCESS;
3640 }
3641
3642 void radv_GetBufferMemoryRequirements(
3643 VkDevice _device,
3644 VkBuffer _buffer,
3645 VkMemoryRequirements* pMemoryRequirements)
3646 {
3647 RADV_FROM_HANDLE(radv_device, device, _device);
3648 RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
3649
3650 pMemoryRequirements->memoryTypeBits = (1u << device->physical_device->memory_properties.memoryTypeCount) - 1;
3651
3652 if (buffer->flags & VK_BUFFER_CREATE_SPARSE_BINDING_BIT)
3653 pMemoryRequirements->alignment = 4096;
3654 else
3655 pMemoryRequirements->alignment = 16;
3656
3657 pMemoryRequirements->size = align64(buffer->size, pMemoryRequirements->alignment);
3658 }
3659
3660 void radv_GetBufferMemoryRequirements2(
3661 VkDevice device,
3662 const VkBufferMemoryRequirementsInfo2 *pInfo,
3663 VkMemoryRequirements2 *pMemoryRequirements)
3664 {
3665 radv_GetBufferMemoryRequirements(device, pInfo->buffer,
3666 &pMemoryRequirements->memoryRequirements);
3667 RADV_FROM_HANDLE(radv_buffer, buffer, pInfo->buffer);
3668 vk_foreach_struct(ext, pMemoryRequirements->pNext) {
3669 switch (ext->sType) {
3670 case VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS: {
3671 VkMemoryDedicatedRequirements *req =
3672 (VkMemoryDedicatedRequirements *) ext;
3673 req->requiresDedicatedAllocation = buffer->shareable;
3674 req->prefersDedicatedAllocation = req->requiresDedicatedAllocation;
3675 break;
3676 }
3677 default:
3678 break;
3679 }
3680 }
3681 }
3682
3683 void radv_GetImageMemoryRequirements(
3684 VkDevice _device,
3685 VkImage _image,
3686 VkMemoryRequirements* pMemoryRequirements)
3687 {
3688 RADV_FROM_HANDLE(radv_device, device, _device);
3689 RADV_FROM_HANDLE(radv_image, image, _image);
3690
3691 pMemoryRequirements->memoryTypeBits = (1u << device->physical_device->memory_properties.memoryTypeCount) - 1;
3692
3693 pMemoryRequirements->size = image->size;
3694 pMemoryRequirements->alignment = image->alignment;
3695 }
3696
3697 void radv_GetImageMemoryRequirements2(
3698 VkDevice device,
3699 const VkImageMemoryRequirementsInfo2 *pInfo,
3700 VkMemoryRequirements2 *pMemoryRequirements)
3701 {
3702 radv_GetImageMemoryRequirements(device, pInfo->image,
3703 &pMemoryRequirements->memoryRequirements);
3704
3705 RADV_FROM_HANDLE(radv_image, image, pInfo->image);
3706
3707 vk_foreach_struct(ext, pMemoryRequirements->pNext) {
3708 switch (ext->sType) {
3709 case VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS: {
3710 VkMemoryDedicatedRequirements *req =
3711 (VkMemoryDedicatedRequirements *) ext;
3712 req->requiresDedicatedAllocation = image->shareable;
3713 req->prefersDedicatedAllocation = req->requiresDedicatedAllocation;
3714 break;
3715 }
3716 default:
3717 break;
3718 }
3719 }
3720 }
3721
3722 void radv_GetImageSparseMemoryRequirements(
3723 VkDevice device,
3724 VkImage image,
3725 uint32_t* pSparseMemoryRequirementCount,
3726 VkSparseImageMemoryRequirements* pSparseMemoryRequirements)
3727 {
3728 stub();
3729 }
3730
3731 void radv_GetImageSparseMemoryRequirements2(
3732 VkDevice device,
3733 const VkImageSparseMemoryRequirementsInfo2 *pInfo,
3734 uint32_t* pSparseMemoryRequirementCount,
3735 VkSparseImageMemoryRequirements2 *pSparseMemoryRequirements)
3736 {
3737 stub();
3738 }
3739
3740 void radv_GetDeviceMemoryCommitment(
3741 VkDevice device,
3742 VkDeviceMemory memory,
3743 VkDeviceSize* pCommittedMemoryInBytes)
3744 {
3745 *pCommittedMemoryInBytes = 0;
3746 }
3747
3748 VkResult radv_BindBufferMemory2(VkDevice device,
3749 uint32_t bindInfoCount,
3750 const VkBindBufferMemoryInfo *pBindInfos)
3751 {
3752 for (uint32_t i = 0; i < bindInfoCount; ++i) {
3753 RADV_FROM_HANDLE(radv_device_memory, mem, pBindInfos[i].memory);
3754 RADV_FROM_HANDLE(radv_buffer, buffer, pBindInfos[i].buffer);
3755
3756 if (mem) {
3757 buffer->bo = mem->bo;
3758 buffer->offset = pBindInfos[i].memoryOffset;
3759 } else {
3760 buffer->bo = NULL;
3761 }
3762 }
3763 return VK_SUCCESS;
3764 }
3765
3766 VkResult radv_BindBufferMemory(
3767 VkDevice device,
3768 VkBuffer buffer,
3769 VkDeviceMemory memory,
3770 VkDeviceSize memoryOffset)
3771 {
3772 const VkBindBufferMemoryInfo info = {
3773 .sType = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO,
3774 .buffer = buffer,
3775 .memory = memory,
3776 .memoryOffset = memoryOffset
3777 };
3778
3779 return radv_BindBufferMemory2(device, 1, &info);
3780 }
3781
3782 VkResult radv_BindImageMemory2(VkDevice device,
3783 uint32_t bindInfoCount,
3784 const VkBindImageMemoryInfo *pBindInfos)
3785 {
3786 for (uint32_t i = 0; i < bindInfoCount; ++i) {
3787 RADV_FROM_HANDLE(radv_device_memory, mem, pBindInfos[i].memory);
3788 RADV_FROM_HANDLE(radv_image, image, pBindInfos[i].image);
3789
3790 if (mem) {
3791 image->bo = mem->bo;
3792 image->offset = pBindInfos[i].memoryOffset;
3793 } else {
3794 image->bo = NULL;
3795 image->offset = 0;
3796 }
3797 }
3798 return VK_SUCCESS;
3799 }
3800
3801
3802 VkResult radv_BindImageMemory(
3803 VkDevice device,
3804 VkImage image,
3805 VkDeviceMemory memory,
3806 VkDeviceSize memoryOffset)
3807 {
3808 const VkBindImageMemoryInfo info = {
3809 .sType = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO,
3810 .image = image,
3811 .memory = memory,
3812 .memoryOffset = memoryOffset
3813 };
3814
3815 return radv_BindImageMemory2(device, 1, &info);
3816 }
3817
3818
3819 static void
3820 radv_sparse_buffer_bind_memory(struct radv_device *device,
3821 const VkSparseBufferMemoryBindInfo *bind)
3822 {
3823 RADV_FROM_HANDLE(radv_buffer, buffer, bind->buffer);
3824
3825 for (uint32_t i = 0; i < bind->bindCount; ++i) {
3826 struct radv_device_memory *mem = NULL;
3827
3828 if (bind->pBinds[i].memory != VK_NULL_HANDLE)
3829 mem = radv_device_memory_from_handle(bind->pBinds[i].memory);
3830
3831 device->ws->buffer_virtual_bind(buffer->bo,
3832 bind->pBinds[i].resourceOffset,
3833 bind->pBinds[i].size,
3834 mem ? mem->bo : NULL,
3835 bind->pBinds[i].memoryOffset);
3836 }
3837 }
3838
3839 static void
3840 radv_sparse_image_opaque_bind_memory(struct radv_device *device,
3841 const VkSparseImageOpaqueMemoryBindInfo *bind)
3842 {
3843 RADV_FROM_HANDLE(radv_image, image, bind->image);
3844
3845 for (uint32_t i = 0; i < bind->bindCount; ++i) {
3846 struct radv_device_memory *mem = NULL;
3847
3848 if (bind->pBinds[i].memory != VK_NULL_HANDLE)
3849 mem = radv_device_memory_from_handle(bind->pBinds[i].memory);
3850
3851 device->ws->buffer_virtual_bind(image->bo,
3852 bind->pBinds[i].resourceOffset,
3853 bind->pBinds[i].size,
3854 mem ? mem->bo : NULL,
3855 bind->pBinds[i].memoryOffset);
3856 }
3857 }
3858
3859 VkResult radv_QueueBindSparse(
3860 VkQueue _queue,
3861 uint32_t bindInfoCount,
3862 const VkBindSparseInfo* pBindInfo,
3863 VkFence _fence)
3864 {
3865 RADV_FROM_HANDLE(radv_fence, fence, _fence);
3866 RADV_FROM_HANDLE(radv_queue, queue, _queue);
3867 struct radeon_winsys_fence *base_fence = fence ? fence->fence : NULL;
3868 bool fence_emitted = false;
3869 VkResult result;
3870 int ret;
3871
3872 for (uint32_t i = 0; i < bindInfoCount; ++i) {
3873 struct radv_winsys_sem_info sem_info;
3874 for (uint32_t j = 0; j < pBindInfo[i].bufferBindCount; ++j) {
3875 radv_sparse_buffer_bind_memory(queue->device,
3876 pBindInfo[i].pBufferBinds + j);
3877 }
3878
3879 for (uint32_t j = 0; j < pBindInfo[i].imageOpaqueBindCount; ++j) {
3880 radv_sparse_image_opaque_bind_memory(queue->device,
3881 pBindInfo[i].pImageOpaqueBinds + j);
3882 }
3883
3884 VkResult result;
3885 result = radv_alloc_sem_info(queue->device->instance,
3886 &sem_info,
3887 pBindInfo[i].waitSemaphoreCount,
3888 pBindInfo[i].pWaitSemaphores,
3889 pBindInfo[i].signalSemaphoreCount,
3890 pBindInfo[i].pSignalSemaphores,
3891 _fence);
3892 if (result != VK_SUCCESS)
3893 return result;
3894
3895 if (pBindInfo[i].waitSemaphoreCount || pBindInfo[i].signalSemaphoreCount) {
3896 ret = queue->device->ws->cs_submit(queue->hw_ctx, queue->queue_idx,
3897 &queue->device->empty_cs[queue->queue_family_index],
3898 1, NULL, NULL,
3899 &sem_info, NULL,
3900 false, base_fence);
3901 if (ret) {
3902 radv_loge("failed to submit CS %d\n", i);
3903 abort();
3904 }
3905
3906 fence_emitted = true;
3907 }
3908
3909 radv_free_sem_info(&sem_info);
3910
3911 }
3912
3913 if (fence) {
3914 if (!fence_emitted) {
3915 result = radv_signal_fence(queue, fence);
3916 if (result != VK_SUCCESS)
3917 return result;
3918 }
3919 }
3920
3921 return VK_SUCCESS;
3922 }
3923
3924 VkResult radv_CreateFence(
3925 VkDevice _device,
3926 const VkFenceCreateInfo* pCreateInfo,
3927 const VkAllocationCallbacks* pAllocator,
3928 VkFence* pFence)
3929 {
3930 RADV_FROM_HANDLE(radv_device, device, _device);
3931 const VkExportFenceCreateInfo *export =
3932 vk_find_struct_const(pCreateInfo->pNext, EXPORT_FENCE_CREATE_INFO);
3933 VkExternalFenceHandleTypeFlags handleTypes =
3934 export ? export->handleTypes : 0;
3935
3936 struct radv_fence *fence = vk_alloc2(&device->alloc, pAllocator,
3937 sizeof(*fence), 8,
3938 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
3939
3940 if (!fence)
3941 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
3942
3943 fence->fence_wsi = NULL;
3944 fence->temp_syncobj = 0;
3945 if (device->always_use_syncobj || handleTypes) {
3946 int ret = device->ws->create_syncobj(device->ws, &fence->syncobj);
3947 if (ret) {
3948 vk_free2(&device->alloc, pAllocator, fence);
3949 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
3950 }
3951 if (pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT) {
3952 device->ws->signal_syncobj(device->ws, fence->syncobj);
3953 }
3954 fence->fence = NULL;
3955 } else {
3956 fence->fence = device->ws->create_fence();
3957 if (!fence->fence) {
3958 vk_free2(&device->alloc, pAllocator, fence);
3959 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
3960 }
3961 fence->syncobj = 0;
3962 if (pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT)
3963 device->ws->signal_fence(fence->fence);
3964 }
3965
3966 *pFence = radv_fence_to_handle(fence);
3967
3968 return VK_SUCCESS;
3969 }
3970
3971 void radv_DestroyFence(
3972 VkDevice _device,
3973 VkFence _fence,
3974 const VkAllocationCallbacks* pAllocator)
3975 {
3976 RADV_FROM_HANDLE(radv_device, device, _device);
3977 RADV_FROM_HANDLE(radv_fence, fence, _fence);
3978
3979 if (!fence)
3980 return;
3981
3982 if (fence->temp_syncobj)
3983 device->ws->destroy_syncobj(device->ws, fence->temp_syncobj);
3984 if (fence->syncobj)
3985 device->ws->destroy_syncobj(device->ws, fence->syncobj);
3986 if (fence->fence)
3987 device->ws->destroy_fence(fence->fence);
3988 if (fence->fence_wsi)
3989 fence->fence_wsi->destroy(fence->fence_wsi);
3990 vk_free2(&device->alloc, pAllocator, fence);
3991 }
3992
3993
3994 uint64_t radv_get_current_time(void)
3995 {
3996 struct timespec tv;
3997 clock_gettime(CLOCK_MONOTONIC, &tv);
3998 return tv.tv_nsec + tv.tv_sec*1000000000ull;
3999 }
4000
4001 static uint64_t radv_get_absolute_timeout(uint64_t timeout)
4002 {
4003 uint64_t current_time = radv_get_current_time();
4004
4005 timeout = MIN2(UINT64_MAX - current_time, timeout);
4006
4007 return current_time + timeout;
4008 }
4009
4010
4011 static bool radv_all_fences_plain_and_submitted(struct radv_device *device,
4012 uint32_t fenceCount, const VkFence *pFences)
4013 {
4014 for (uint32_t i = 0; i < fenceCount; ++i) {
4015 RADV_FROM_HANDLE(radv_fence, fence, pFences[i]);
4016 if (fence->fence == NULL || fence->syncobj ||
4017 fence->temp_syncobj || fence->fence_wsi ||
4018 (!device->ws->is_fence_waitable(fence->fence)))
4019 return false;
4020 }
4021 return true;
4022 }
4023
4024 static bool radv_all_fences_syncobj(uint32_t fenceCount, const VkFence *pFences)
4025 {
4026 for (uint32_t i = 0; i < fenceCount; ++i) {
4027 RADV_FROM_HANDLE(radv_fence, fence, pFences[i]);
4028 if (fence->syncobj == 0 && fence->temp_syncobj == 0)
4029 return false;
4030 }
4031 return true;
4032 }
4033
4034 VkResult radv_WaitForFences(
4035 VkDevice _device,
4036 uint32_t fenceCount,
4037 const VkFence* pFences,
4038 VkBool32 waitAll,
4039 uint64_t timeout)
4040 {
4041 RADV_FROM_HANDLE(radv_device, device, _device);
4042 timeout = radv_get_absolute_timeout(timeout);
4043
4044 if (device->always_use_syncobj &&
4045 radv_all_fences_syncobj(fenceCount, pFences))
4046 {
4047 uint32_t *handles = malloc(sizeof(uint32_t) * fenceCount);
4048 if (!handles)
4049 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
4050
4051 for (uint32_t i = 0; i < fenceCount; ++i) {
4052 RADV_FROM_HANDLE(radv_fence, fence, pFences[i]);
4053 handles[i] = fence->temp_syncobj ? fence->temp_syncobj : fence->syncobj;
4054 }
4055
4056 bool success = device->ws->wait_syncobj(device->ws, handles, fenceCount, waitAll, timeout);
4057
4058 free(handles);
4059 return success ? VK_SUCCESS : VK_TIMEOUT;
4060 }
4061
4062 if (!waitAll && fenceCount > 1) {
4063 /* Not doing this by default for waitAll, due to needing to allocate twice. */
4064 if (device->physical_device->rad_info.drm_minor >= 10 && radv_all_fences_plain_and_submitted(device, fenceCount, pFences)) {
4065 uint32_t wait_count = 0;
4066 struct radeon_winsys_fence **fences = malloc(sizeof(struct radeon_winsys_fence *) * fenceCount);
4067 if (!fences)
4068 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
4069
4070 for (uint32_t i = 0; i < fenceCount; ++i) {
4071 RADV_FROM_HANDLE(radv_fence, fence, pFences[i]);
4072
4073 if (device->ws->fence_wait(device->ws, fence->fence, false, 0)) {
4074 free(fences);
4075 return VK_SUCCESS;
4076 }
4077
4078 fences[wait_count++] = fence->fence;
4079 }
4080
4081 bool success = device->ws->fences_wait(device->ws, fences, wait_count,
4082 waitAll, timeout - radv_get_current_time());
4083
4084 free(fences);
4085 return success ? VK_SUCCESS : VK_TIMEOUT;
4086 }
4087
4088 while(radv_get_current_time() <= timeout) {
4089 for (uint32_t i = 0; i < fenceCount; ++i) {
4090 if (radv_GetFenceStatus(_device, pFences[i]) == VK_SUCCESS)
4091 return VK_SUCCESS;
4092 }
4093 }
4094 return VK_TIMEOUT;
4095 }
4096
4097 for (uint32_t i = 0; i < fenceCount; ++i) {
4098 RADV_FROM_HANDLE(radv_fence, fence, pFences[i]);
4099 bool expired = false;
4100
4101 if (fence->temp_syncobj) {
4102 if (!device->ws->wait_syncobj(device->ws, &fence->temp_syncobj, 1, true, timeout))
4103 return VK_TIMEOUT;
4104 continue;
4105 }
4106
4107 if (fence->syncobj) {
4108 if (!device->ws->wait_syncobj(device->ws, &fence->syncobj, 1, true, timeout))
4109 return VK_TIMEOUT;
4110 continue;
4111 }
4112
4113 if (fence->fence) {
4114 if (!device->ws->is_fence_waitable(fence->fence)) {
4115 while(!device->ws->is_fence_waitable(fence->fence) &&
4116 radv_get_current_time() <= timeout)
4117 /* Do nothing */;
4118 }
4119
4120 expired = device->ws->fence_wait(device->ws,
4121 fence->fence,
4122 true, timeout);
4123 if (!expired)
4124 return VK_TIMEOUT;
4125 }
4126
4127 if (fence->fence_wsi) {
4128 VkResult result = fence->fence_wsi->wait(fence->fence_wsi, timeout);
4129 if (result != VK_SUCCESS)
4130 return result;
4131 }
4132 }
4133
4134 return VK_SUCCESS;
4135 }
4136
4137 VkResult radv_ResetFences(VkDevice _device,
4138 uint32_t fenceCount,
4139 const VkFence *pFences)
4140 {
4141 RADV_FROM_HANDLE(radv_device, device, _device);
4142
4143 for (unsigned i = 0; i < fenceCount; ++i) {
4144 RADV_FROM_HANDLE(radv_fence, fence, pFences[i]);
4145 if (fence->fence)
4146 device->ws->reset_fence(fence->fence);
4147
4148 /* Per spec, we first restore the permanent payload, and then reset, so
4149 * having a temp syncobj should not skip resetting the permanent syncobj. */
4150 if (fence->temp_syncobj) {
4151 device->ws->destroy_syncobj(device->ws, fence->temp_syncobj);
4152 fence->temp_syncobj = 0;
4153 }
4154
4155 if (fence->syncobj) {
4156 device->ws->reset_syncobj(device->ws, fence->syncobj);
4157 }
4158 }
4159
4160 return VK_SUCCESS;
4161 }
4162
4163 VkResult radv_GetFenceStatus(VkDevice _device, VkFence _fence)
4164 {
4165 RADV_FROM_HANDLE(radv_device, device, _device);
4166 RADV_FROM_HANDLE(radv_fence, fence, _fence);
4167
4168 if (fence->temp_syncobj) {
4169 bool success = device->ws->wait_syncobj(device->ws, &fence->temp_syncobj, 1, true, 0);
4170 return success ? VK_SUCCESS : VK_NOT_READY;
4171 }
4172
4173 if (fence->syncobj) {
4174 bool success = device->ws->wait_syncobj(device->ws, &fence->syncobj, 1, true, 0);
4175 return success ? VK_SUCCESS : VK_NOT_READY;
4176 }
4177
4178 if (fence->fence) {
4179 if (!device->ws->fence_wait(device->ws, fence->fence, false, 0))
4180 return VK_NOT_READY;
4181 }
4182 if (fence->fence_wsi) {
4183 VkResult result = fence->fence_wsi->wait(fence->fence_wsi, 0);
4184
4185 if (result != VK_SUCCESS) {
4186 if (result == VK_TIMEOUT)
4187 return VK_NOT_READY;
4188 return result;
4189 }
4190 }
4191 return VK_SUCCESS;
4192 }
4193
4194
4195 // Queue semaphore functions
4196
4197 VkResult radv_CreateSemaphore(
4198 VkDevice _device,
4199 const VkSemaphoreCreateInfo* pCreateInfo,
4200 const VkAllocationCallbacks* pAllocator,
4201 VkSemaphore* pSemaphore)
4202 {
4203 RADV_FROM_HANDLE(radv_device, device, _device);
4204 const VkExportSemaphoreCreateInfo *export =
4205 vk_find_struct_const(pCreateInfo->pNext, EXPORT_SEMAPHORE_CREATE_INFO);
4206 VkExternalSemaphoreHandleTypeFlags handleTypes =
4207 export ? export->handleTypes : 0;
4208
4209 struct radv_semaphore *sem = vk_alloc2(&device->alloc, pAllocator,
4210 sizeof(*sem), 8,
4211 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
4212 if (!sem)
4213 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
4214
4215 sem->temp_syncobj = 0;
4216 /* create a syncobject if we are going to export this semaphore */
4217 if (device->always_use_syncobj || handleTypes) {
4218 assert (device->physical_device->rad_info.has_syncobj);
4219 int ret = device->ws->create_syncobj(device->ws, &sem->syncobj);
4220 if (ret) {
4221 vk_free2(&device->alloc, pAllocator, sem);
4222 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
4223 }
4224 sem->sem = NULL;
4225 } else {
4226 sem->sem = device->ws->create_sem(device->ws);
4227 if (!sem->sem) {
4228 vk_free2(&device->alloc, pAllocator, sem);
4229 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
4230 }
4231 sem->syncobj = 0;
4232 }
4233
4234 *pSemaphore = radv_semaphore_to_handle(sem);
4235 return VK_SUCCESS;
4236 }
4237
4238 void radv_DestroySemaphore(
4239 VkDevice _device,
4240 VkSemaphore _semaphore,
4241 const VkAllocationCallbacks* pAllocator)
4242 {
4243 RADV_FROM_HANDLE(radv_device, device, _device);
4244 RADV_FROM_HANDLE(radv_semaphore, sem, _semaphore);
4245 if (!_semaphore)
4246 return;
4247
4248 if (sem->syncobj)
4249 device->ws->destroy_syncobj(device->ws, sem->syncobj);
4250 else
4251 device->ws->destroy_sem(sem->sem);
4252 vk_free2(&device->alloc, pAllocator, sem);
4253 }
4254
4255 VkResult radv_CreateEvent(
4256 VkDevice _device,
4257 const VkEventCreateInfo* pCreateInfo,
4258 const VkAllocationCallbacks* pAllocator,
4259 VkEvent* pEvent)
4260 {
4261 RADV_FROM_HANDLE(radv_device, device, _device);
4262 struct radv_event *event = vk_alloc2(&device->alloc, pAllocator,
4263 sizeof(*event), 8,
4264 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
4265
4266 if (!event)
4267 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
4268
4269 event->bo = device->ws->buffer_create(device->ws, 8, 8,
4270 RADEON_DOMAIN_GTT,
4271 RADEON_FLAG_VA_UNCACHED | RADEON_FLAG_CPU_ACCESS | RADEON_FLAG_NO_INTERPROCESS_SHARING,
4272 RADV_BO_PRIORITY_FENCE);
4273 if (!event->bo) {
4274 vk_free2(&device->alloc, pAllocator, event);
4275 return vk_error(device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
4276 }
4277
4278 event->map = (uint64_t*)device->ws->buffer_map(event->bo);
4279
4280 *pEvent = radv_event_to_handle(event);
4281
4282 return VK_SUCCESS;
4283 }
4284
4285 void radv_DestroyEvent(
4286 VkDevice _device,
4287 VkEvent _event,
4288 const VkAllocationCallbacks* pAllocator)
4289 {
4290 RADV_FROM_HANDLE(radv_device, device, _device);
4291 RADV_FROM_HANDLE(radv_event, event, _event);
4292
4293 if (!event)
4294 return;
4295 device->ws->buffer_destroy(event->bo);
4296 vk_free2(&device->alloc, pAllocator, event);
4297 }
4298
4299 VkResult radv_GetEventStatus(
4300 VkDevice _device,
4301 VkEvent _event)
4302 {
4303 RADV_FROM_HANDLE(radv_event, event, _event);
4304
4305 if (*event->map == 1)
4306 return VK_EVENT_SET;
4307 return VK_EVENT_RESET;
4308 }
4309
4310 VkResult radv_SetEvent(
4311 VkDevice _device,
4312 VkEvent _event)
4313 {
4314 RADV_FROM_HANDLE(radv_event, event, _event);
4315 *event->map = 1;
4316
4317 return VK_SUCCESS;
4318 }
4319
4320 VkResult radv_ResetEvent(
4321 VkDevice _device,
4322 VkEvent _event)
4323 {
4324 RADV_FROM_HANDLE(radv_event, event, _event);
4325 *event->map = 0;
4326
4327 return VK_SUCCESS;
4328 }
4329
4330 VkResult radv_CreateBuffer(
4331 VkDevice _device,
4332 const VkBufferCreateInfo* pCreateInfo,
4333 const VkAllocationCallbacks* pAllocator,
4334 VkBuffer* pBuffer)
4335 {
4336 RADV_FROM_HANDLE(radv_device, device, _device);
4337 struct radv_buffer *buffer;
4338
4339 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO);
4340
4341 buffer = vk_alloc2(&device->alloc, pAllocator, sizeof(*buffer), 8,
4342 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
4343 if (buffer == NULL)
4344 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
4345
4346 buffer->size = pCreateInfo->size;
4347 buffer->usage = pCreateInfo->usage;
4348 buffer->bo = NULL;
4349 buffer->offset = 0;
4350 buffer->flags = pCreateInfo->flags;
4351
4352 buffer->shareable = vk_find_struct_const(pCreateInfo->pNext,
4353 EXTERNAL_MEMORY_BUFFER_CREATE_INFO) != NULL;
4354
4355 if (pCreateInfo->flags & VK_BUFFER_CREATE_SPARSE_BINDING_BIT) {
4356 buffer->bo = device->ws->buffer_create(device->ws,
4357 align64(buffer->size, 4096),
4358 4096, 0, RADEON_FLAG_VIRTUAL,
4359 RADV_BO_PRIORITY_VIRTUAL);
4360 if (!buffer->bo) {
4361 vk_free2(&device->alloc, pAllocator, buffer);
4362 return vk_error(device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
4363 }
4364 }
4365
4366 *pBuffer = radv_buffer_to_handle(buffer);
4367
4368 return VK_SUCCESS;
4369 }
4370
4371 void radv_DestroyBuffer(
4372 VkDevice _device,
4373 VkBuffer _buffer,
4374 const VkAllocationCallbacks* pAllocator)
4375 {
4376 RADV_FROM_HANDLE(radv_device, device, _device);
4377 RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
4378
4379 if (!buffer)
4380 return;
4381
4382 if (buffer->flags & VK_BUFFER_CREATE_SPARSE_BINDING_BIT)
4383 device->ws->buffer_destroy(buffer->bo);
4384
4385 vk_free2(&device->alloc, pAllocator, buffer);
4386 }
4387
4388 VkDeviceAddress radv_GetBufferDeviceAddressEXT(
4389 VkDevice device,
4390 const VkBufferDeviceAddressInfoEXT* pInfo)
4391 {
4392 RADV_FROM_HANDLE(radv_buffer, buffer, pInfo->buffer);
4393 return radv_buffer_get_va(buffer->bo) + buffer->offset;
4394 }
4395
4396
4397 static inline unsigned
4398 si_tile_mode_index(const struct radv_image_plane *plane, unsigned level, bool stencil)
4399 {
4400 if (stencil)
4401 return plane->surface.u.legacy.stencil_tiling_index[level];
4402 else
4403 return plane->surface.u.legacy.tiling_index[level];
4404 }
4405
4406 static uint32_t radv_surface_max_layer_count(struct radv_image_view *iview)
4407 {
4408 return iview->type == VK_IMAGE_VIEW_TYPE_3D ? iview->extent.depth : (iview->base_layer + iview->layer_count);
4409 }
4410
4411 static uint32_t
4412 radv_init_dcc_control_reg(struct radv_device *device,
4413 struct radv_image_view *iview)
4414 {
4415 unsigned max_uncompressed_block_size = V_028C78_MAX_BLOCK_SIZE_256B;
4416 unsigned min_compressed_block_size = V_028C78_MIN_BLOCK_SIZE_32B;
4417 unsigned max_compressed_block_size;
4418 unsigned independent_128b_blocks;
4419 unsigned independent_64b_blocks;
4420
4421 if (!radv_dcc_enabled(iview->image, iview->base_mip))
4422 return 0;
4423
4424 if (!device->physical_device->rad_info.has_dedicated_vram) {
4425 /* amdvlk: [min-compressed-block-size] should be set to 32 for
4426 * dGPU and 64 for APU because all of our APUs to date use
4427 * DIMMs which have a request granularity size of 64B while all
4428 * other chips have a 32B request size.
4429 */
4430 min_compressed_block_size = V_028C78_MIN_BLOCK_SIZE_64B;
4431 }
4432
4433 if (device->physical_device->rad_info.chip_class >= GFX10) {
4434 max_compressed_block_size = V_028C78_MAX_BLOCK_SIZE_128B;
4435 independent_64b_blocks = 0;
4436 independent_128b_blocks = 1;
4437 } else {
4438 independent_128b_blocks = 0;
4439
4440 if (iview->image->info.samples > 1) {
4441 if (iview->image->planes[0].surface.bpe == 1)
4442 max_uncompressed_block_size = V_028C78_MAX_BLOCK_SIZE_64B;
4443 else if (iview->image->planes[0].surface.bpe == 2)
4444 max_uncompressed_block_size = V_028C78_MAX_BLOCK_SIZE_128B;
4445 }
4446
4447 if (iview->image->usage & (VK_IMAGE_USAGE_SAMPLED_BIT |
4448 VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
4449 VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT)) {
4450 /* If this DCC image is potentially going to be used in texture
4451 * fetches, we need some special settings.
4452 */
4453 independent_64b_blocks = 1;
4454 max_compressed_block_size = V_028C78_MAX_BLOCK_SIZE_64B;
4455 } else {
4456 /* MAX_UNCOMPRESSED_BLOCK_SIZE must be >=
4457 * MAX_COMPRESSED_BLOCK_SIZE. Set MAX_COMPRESSED_BLOCK_SIZE as
4458 * big as possible for better compression state.
4459 */
4460 independent_64b_blocks = 0;
4461 max_compressed_block_size = max_uncompressed_block_size;
4462 }
4463 }
4464
4465 return S_028C78_MAX_UNCOMPRESSED_BLOCK_SIZE(max_uncompressed_block_size) |
4466 S_028C78_MAX_COMPRESSED_BLOCK_SIZE(max_compressed_block_size) |
4467 S_028C78_MIN_COMPRESSED_BLOCK_SIZE(min_compressed_block_size) |
4468 S_028C78_INDEPENDENT_64B_BLOCKS(independent_64b_blocks) |
4469 S_028C78_INDEPENDENT_128B_BLOCKS(independent_128b_blocks);
4470 }
4471
4472 void
4473 radv_initialise_color_surface(struct radv_device *device,
4474 struct radv_color_buffer_info *cb,
4475 struct radv_image_view *iview)
4476 {
4477 const struct vk_format_description *desc;
4478 unsigned ntype, format, swap, endian;
4479 unsigned blend_clamp = 0, blend_bypass = 0;
4480 uint64_t va;
4481 const struct radv_image_plane *plane = &iview->image->planes[iview->plane_id];
4482 const struct radeon_surf *surf = &plane->surface;
4483
4484 desc = vk_format_description(iview->vk_format);
4485
4486 memset(cb, 0, sizeof(*cb));
4487
4488 /* Intensity is implemented as Red, so treat it that way. */
4489 cb->cb_color_attrib = S_028C74_FORCE_DST_ALPHA_1(desc->swizzle[3] == VK_SWIZZLE_1);
4490
4491 va = radv_buffer_get_va(iview->bo) + iview->image->offset + plane->offset;
4492
4493 cb->cb_color_base = va >> 8;
4494
4495 if (device->physical_device->rad_info.chip_class >= GFX9) {
4496 struct gfx9_surf_meta_flags meta;
4497 if (iview->image->dcc_offset)
4498 meta = surf->u.gfx9.dcc;
4499 else
4500 meta = surf->u.gfx9.cmask;
4501
4502 if (device->physical_device->rad_info.chip_class >= GFX10) {
4503 cb->cb_color_attrib3 |= S_028EE0_COLOR_SW_MODE(surf->u.gfx9.surf.swizzle_mode) |
4504 S_028EE0_FMASK_SW_MODE(surf->u.gfx9.fmask.swizzle_mode) |
4505 S_028EE0_CMASK_PIPE_ALIGNED(surf->u.gfx9.cmask.pipe_aligned) |
4506 S_028EE0_DCC_PIPE_ALIGNED(surf->u.gfx9.dcc.pipe_aligned);
4507 } else {
4508 cb->cb_color_attrib |= S_028C74_COLOR_SW_MODE(surf->u.gfx9.surf.swizzle_mode) |
4509 S_028C74_FMASK_SW_MODE(surf->u.gfx9.fmask.swizzle_mode) |
4510 S_028C74_RB_ALIGNED(meta.rb_aligned) |
4511 S_028C74_PIPE_ALIGNED(meta.pipe_aligned);
4512 cb->cb_mrt_epitch = S_0287A0_EPITCH(surf->u.gfx9.surf.epitch);
4513 }
4514
4515 cb->cb_color_base += surf->u.gfx9.surf_offset >> 8;
4516 cb->cb_color_base |= surf->tile_swizzle;
4517 } else {
4518 const struct legacy_surf_level *level_info = &surf->u.legacy.level[iview->base_mip];
4519 unsigned pitch_tile_max, slice_tile_max, tile_mode_index;
4520
4521 cb->cb_color_base += level_info->offset >> 8;
4522 if (level_info->mode == RADEON_SURF_MODE_2D)
4523 cb->cb_color_base |= surf->tile_swizzle;
4524
4525 pitch_tile_max = level_info->nblk_x / 8 - 1;
4526 slice_tile_max = (level_info->nblk_x * level_info->nblk_y) / 64 - 1;
4527 tile_mode_index = si_tile_mode_index(plane, iview->base_mip, false);
4528
4529 cb->cb_color_pitch = S_028C64_TILE_MAX(pitch_tile_max);
4530 cb->cb_color_slice = S_028C68_TILE_MAX(slice_tile_max);
4531 cb->cb_color_cmask_slice = surf->u.legacy.cmask_slice_tile_max;
4532
4533 cb->cb_color_attrib |= S_028C74_TILE_MODE_INDEX(tile_mode_index);
4534
4535 if (radv_image_has_fmask(iview->image)) {
4536 if (device->physical_device->rad_info.chip_class >= GFX7)
4537 cb->cb_color_pitch |= S_028C64_FMASK_TILE_MAX(surf->u.legacy.fmask.pitch_in_pixels / 8 - 1);
4538 cb->cb_color_attrib |= S_028C74_FMASK_TILE_MODE_INDEX(surf->u.legacy.fmask.tiling_index);
4539 cb->cb_color_fmask_slice = S_028C88_TILE_MAX(surf->u.legacy.fmask.slice_tile_max);
4540 } else {
4541 /* This must be set for fast clear to work without FMASK. */
4542 if (device->physical_device->rad_info.chip_class >= GFX7)
4543 cb->cb_color_pitch |= S_028C64_FMASK_TILE_MAX(pitch_tile_max);
4544 cb->cb_color_attrib |= S_028C74_FMASK_TILE_MODE_INDEX(tile_mode_index);
4545 cb->cb_color_fmask_slice = S_028C88_TILE_MAX(slice_tile_max);
4546 }
4547 }
4548
4549 /* CMASK variables */
4550 va = radv_buffer_get_va(iview->bo) + iview->image->offset;
4551 va += iview->image->cmask_offset;
4552 cb->cb_color_cmask = va >> 8;
4553
4554 va = radv_buffer_get_va(iview->bo) + iview->image->offset;
4555 va += iview->image->dcc_offset;
4556
4557 if (radv_dcc_enabled(iview->image, iview->base_mip) &&
4558 device->physical_device->rad_info.chip_class <= GFX8)
4559 va += plane->surface.u.legacy.level[iview->base_mip].dcc_offset;
4560
4561 unsigned dcc_tile_swizzle = surf->tile_swizzle;
4562 dcc_tile_swizzle &= (surf->dcc_alignment - 1) >> 8;
4563
4564 cb->cb_dcc_base = va >> 8;
4565 cb->cb_dcc_base |= dcc_tile_swizzle;
4566
4567 /* GFX10 field has the same base shift as the GFX6 field. */
4568 uint32_t max_slice = radv_surface_max_layer_count(iview) - 1;
4569 cb->cb_color_view = S_028C6C_SLICE_START(iview->base_layer) |
4570 S_028C6C_SLICE_MAX_GFX10(max_slice);
4571
4572 if (iview->image->info.samples > 1) {
4573 unsigned log_samples = util_logbase2(iview->image->info.samples);
4574
4575 cb->cb_color_attrib |= S_028C74_NUM_SAMPLES(log_samples) |
4576 S_028C74_NUM_FRAGMENTS(log_samples);
4577 }
4578
4579 if (radv_image_has_fmask(iview->image)) {
4580 va = radv_buffer_get_va(iview->bo) + iview->image->offset + iview->image->fmask_offset;
4581 cb->cb_color_fmask = va >> 8;
4582 cb->cb_color_fmask |= surf->fmask_tile_swizzle;
4583 } else {
4584 cb->cb_color_fmask = cb->cb_color_base;
4585 }
4586
4587 ntype = radv_translate_color_numformat(iview->vk_format,
4588 desc,
4589 vk_format_get_first_non_void_channel(iview->vk_format));
4590 format = radv_translate_colorformat(iview->vk_format);
4591 if (format == V_028C70_COLOR_INVALID || ntype == ~0u)
4592 radv_finishme("Illegal color\n");
4593 swap = radv_translate_colorswap(iview->vk_format, false);
4594 endian = radv_colorformat_endian_swap(format);
4595
4596 /* blend clamp should be set for all NORM/SRGB types */
4597 if (ntype == V_028C70_NUMBER_UNORM ||
4598 ntype == V_028C70_NUMBER_SNORM ||
4599 ntype == V_028C70_NUMBER_SRGB)
4600 blend_clamp = 1;
4601
4602 /* set blend bypass according to docs if SINT/UINT or
4603 8/24 COLOR variants */
4604 if (ntype == V_028C70_NUMBER_UINT || ntype == V_028C70_NUMBER_SINT ||
4605 format == V_028C70_COLOR_8_24 || format == V_028C70_COLOR_24_8 ||
4606 format == V_028C70_COLOR_X24_8_32_FLOAT) {
4607 blend_clamp = 0;
4608 blend_bypass = 1;
4609 }
4610 #if 0
4611 if ((ntype == V_028C70_NUMBER_UINT || ntype == V_028C70_NUMBER_SINT) &&
4612 (format == V_028C70_COLOR_8 ||
4613 format == V_028C70_COLOR_8_8 ||
4614 format == V_028C70_COLOR_8_8_8_8))
4615 ->color_is_int8 = true;
4616 #endif
4617 cb->cb_color_info = S_028C70_FORMAT(format) |
4618 S_028C70_COMP_SWAP(swap) |
4619 S_028C70_BLEND_CLAMP(blend_clamp) |
4620 S_028C70_BLEND_BYPASS(blend_bypass) |
4621 S_028C70_SIMPLE_FLOAT(1) |
4622 S_028C70_ROUND_MODE(ntype != V_028C70_NUMBER_UNORM &&
4623 ntype != V_028C70_NUMBER_SNORM &&
4624 ntype != V_028C70_NUMBER_SRGB &&
4625 format != V_028C70_COLOR_8_24 &&
4626 format != V_028C70_COLOR_24_8) |
4627 S_028C70_NUMBER_TYPE(ntype) |
4628 S_028C70_ENDIAN(endian);
4629 if (radv_image_has_fmask(iview->image)) {
4630 cb->cb_color_info |= S_028C70_COMPRESSION(1);
4631 if (device->physical_device->rad_info.chip_class == GFX6) {
4632 unsigned fmask_bankh = util_logbase2(surf->u.legacy.fmask.bankh);
4633 cb->cb_color_attrib |= S_028C74_FMASK_BANK_HEIGHT(fmask_bankh);
4634 }
4635
4636 if (radv_image_is_tc_compat_cmask(iview->image)) {
4637 /* Allow the texture block to read FMASK directly
4638 * without decompressing it. This bit must be cleared
4639 * when performing FMASK_DECOMPRESS or DCC_COMPRESS,
4640 * otherwise the operation doesn't happen.
4641 */
4642 cb->cb_color_info |= S_028C70_FMASK_COMPRESS_1FRAG_ONLY(1);
4643
4644 /* Set CMASK into a tiling format that allows the
4645 * texture block to read it.
4646 */
4647 cb->cb_color_info |= S_028C70_CMASK_ADDR_TYPE(2);
4648 }
4649 }
4650
4651 if (radv_image_has_cmask(iview->image) &&
4652 !(device->instance->debug_flags & RADV_DEBUG_NO_FAST_CLEARS))
4653 cb->cb_color_info |= S_028C70_FAST_CLEAR(1);
4654
4655 if (radv_dcc_enabled(iview->image, iview->base_mip))
4656 cb->cb_color_info |= S_028C70_DCC_ENABLE(1);
4657
4658 cb->cb_dcc_control = radv_init_dcc_control_reg(device, iview);
4659
4660 /* This must be set for fast clear to work without FMASK. */
4661 if (!radv_image_has_fmask(iview->image) &&
4662 device->physical_device->rad_info.chip_class == GFX6) {
4663 unsigned bankh = util_logbase2(surf->u.legacy.bankh);
4664 cb->cb_color_attrib |= S_028C74_FMASK_BANK_HEIGHT(bankh);
4665 }
4666
4667 if (device->physical_device->rad_info.chip_class >= GFX9) {
4668 const struct vk_format_description *format_desc = vk_format_description(iview->image->vk_format);
4669
4670 unsigned mip0_depth = iview->image->type == VK_IMAGE_TYPE_3D ?
4671 (iview->extent.depth - 1) : (iview->image->info.array_size - 1);
4672 unsigned width = iview->extent.width / (iview->plane_id ? format_desc->width_divisor : 1);
4673 unsigned height = iview->extent.height / (iview->plane_id ? format_desc->height_divisor : 1);
4674
4675 if (device->physical_device->rad_info.chip_class >= GFX10) {
4676 cb->cb_color_view |= S_028C6C_MIP_LEVEL_GFX10(iview->base_mip);
4677
4678 cb->cb_color_attrib3 |= S_028EE0_MIP0_DEPTH(mip0_depth) |
4679 S_028EE0_RESOURCE_TYPE(surf->u.gfx9.resource_type) |
4680 S_028EE0_RESOURCE_LEVEL(1);
4681 } else {
4682 cb->cb_color_view |= S_028C6C_MIP_LEVEL_GFX9(iview->base_mip);
4683 cb->cb_color_attrib |= S_028C74_MIP0_DEPTH(mip0_depth) |
4684 S_028C74_RESOURCE_TYPE(surf->u.gfx9.resource_type);
4685 }
4686
4687 cb->cb_color_attrib2 = S_028C68_MIP0_WIDTH(width - 1) |
4688 S_028C68_MIP0_HEIGHT(height - 1) |
4689 S_028C68_MAX_MIP(iview->image->info.levels - 1);
4690 }
4691 }
4692
4693 static unsigned
4694 radv_calc_decompress_on_z_planes(struct radv_device *device,
4695 struct radv_image_view *iview)
4696 {
4697 unsigned max_zplanes = 0;
4698
4699 assert(radv_image_is_tc_compat_htile(iview->image));
4700
4701 if (device->physical_device->rad_info.chip_class >= GFX9) {
4702 /* Default value for 32-bit depth surfaces. */
4703 max_zplanes = 4;
4704
4705 if (iview->vk_format == VK_FORMAT_D16_UNORM &&
4706 iview->image->info.samples > 1)
4707 max_zplanes = 2;
4708
4709 max_zplanes = max_zplanes + 1;
4710 } else {
4711 if (iview->vk_format == VK_FORMAT_D16_UNORM) {
4712 /* Do not enable Z plane compression for 16-bit depth
4713 * surfaces because isn't supported on GFX8. Only
4714 * 32-bit depth surfaces are supported by the hardware.
4715 * This allows to maintain shader compatibility and to
4716 * reduce the number of depth decompressions.
4717 */
4718 max_zplanes = 1;
4719 } else {
4720 if (iview->image->info.samples <= 1)
4721 max_zplanes = 5;
4722 else if (iview->image->info.samples <= 4)
4723 max_zplanes = 3;
4724 else
4725 max_zplanes = 2;
4726 }
4727 }
4728
4729 return max_zplanes;
4730 }
4731
4732 void
4733 radv_initialise_ds_surface(struct radv_device *device,
4734 struct radv_ds_buffer_info *ds,
4735 struct radv_image_view *iview)
4736 {
4737 unsigned level = iview->base_mip;
4738 unsigned format, stencil_format;
4739 uint64_t va, s_offs, z_offs;
4740 bool stencil_only = false;
4741 const struct radv_image_plane *plane = &iview->image->planes[0];
4742 const struct radeon_surf *surf = &plane->surface;
4743
4744 assert(vk_format_get_plane_count(iview->image->vk_format) == 1);
4745
4746 memset(ds, 0, sizeof(*ds));
4747 switch (iview->image->vk_format) {
4748 case VK_FORMAT_D24_UNORM_S8_UINT:
4749 case VK_FORMAT_X8_D24_UNORM_PACK32:
4750 ds->pa_su_poly_offset_db_fmt_cntl = S_028B78_POLY_OFFSET_NEG_NUM_DB_BITS(-24);
4751 ds->offset_scale = 2.0f;
4752 break;
4753 case VK_FORMAT_D16_UNORM:
4754 case VK_FORMAT_D16_UNORM_S8_UINT:
4755 ds->pa_su_poly_offset_db_fmt_cntl = S_028B78_POLY_OFFSET_NEG_NUM_DB_BITS(-16);
4756 ds->offset_scale = 4.0f;
4757 break;
4758 case VK_FORMAT_D32_SFLOAT:
4759 case VK_FORMAT_D32_SFLOAT_S8_UINT:
4760 ds->pa_su_poly_offset_db_fmt_cntl = S_028B78_POLY_OFFSET_NEG_NUM_DB_BITS(-23) |
4761 S_028B78_POLY_OFFSET_DB_IS_FLOAT_FMT(1);
4762 ds->offset_scale = 1.0f;
4763 break;
4764 case VK_FORMAT_S8_UINT:
4765 stencil_only = true;
4766 break;
4767 default:
4768 break;
4769 }
4770
4771 format = radv_translate_dbformat(iview->image->vk_format);
4772 stencil_format = surf->has_stencil ?
4773 V_028044_STENCIL_8 : V_028044_STENCIL_INVALID;
4774
4775 uint32_t max_slice = radv_surface_max_layer_count(iview) - 1;
4776 ds->db_depth_view = S_028008_SLICE_START(iview->base_layer) |
4777 S_028008_SLICE_MAX(max_slice);
4778 if (device->physical_device->rad_info.chip_class >= GFX10) {
4779 ds->db_depth_view |= S_028008_SLICE_START_HI(iview->base_layer >> 11) |
4780 S_028008_SLICE_MAX_HI(max_slice >> 11);
4781 }
4782
4783 ds->db_htile_data_base = 0;
4784 ds->db_htile_surface = 0;
4785
4786 va = radv_buffer_get_va(iview->bo) + iview->image->offset;
4787 s_offs = z_offs = va;
4788
4789 if (device->physical_device->rad_info.chip_class >= GFX9) {
4790 assert(surf->u.gfx9.surf_offset == 0);
4791 s_offs += surf->u.gfx9.stencil_offset;
4792
4793 ds->db_z_info = S_028038_FORMAT(format) |
4794 S_028038_NUM_SAMPLES(util_logbase2(iview->image->info.samples)) |
4795 S_028038_SW_MODE(surf->u.gfx9.surf.swizzle_mode) |
4796 S_028038_MAXMIP(iview->image->info.levels - 1) |
4797 S_028038_ZRANGE_PRECISION(1);
4798 ds->db_stencil_info = S_02803C_FORMAT(stencil_format) |
4799 S_02803C_SW_MODE(surf->u.gfx9.stencil.swizzle_mode);
4800
4801 if (device->physical_device->rad_info.chip_class == GFX9) {
4802 ds->db_z_info2 = S_028068_EPITCH(surf->u.gfx9.surf.epitch);
4803 ds->db_stencil_info2 = S_02806C_EPITCH(surf->u.gfx9.stencil.epitch);
4804 }
4805
4806 ds->db_depth_view |= S_028008_MIPID(level);
4807 ds->db_depth_size = S_02801C_X_MAX(iview->image->info.width - 1) |
4808 S_02801C_Y_MAX(iview->image->info.height - 1);
4809
4810 if (radv_htile_enabled(iview->image, level)) {
4811 ds->db_z_info |= S_028038_TILE_SURFACE_ENABLE(1);
4812
4813 if (radv_image_is_tc_compat_htile(iview->image)) {
4814 unsigned max_zplanes =
4815 radv_calc_decompress_on_z_planes(device, iview);
4816
4817 ds->db_z_info |= S_028038_DECOMPRESS_ON_N_ZPLANES(max_zplanes);
4818
4819 if (device->physical_device->rad_info.chip_class >= GFX10) {
4820 ds->db_z_info |= S_028040_ITERATE_FLUSH(1);
4821 ds->db_stencil_info |= S_028044_ITERATE_FLUSH(1);
4822 } else {
4823 ds->db_z_info |= S_028038_ITERATE_FLUSH(1);
4824 ds->db_stencil_info |= S_02803C_ITERATE_FLUSH(1);
4825 }
4826 }
4827
4828 if (!surf->has_stencil)
4829 /* Use all of the htile_buffer for depth if there's no stencil. */
4830 ds->db_stencil_info |= S_02803C_TILE_STENCIL_DISABLE(1);
4831 va = radv_buffer_get_va(iview->bo) + iview->image->offset +
4832 iview->image->htile_offset;
4833 ds->db_htile_data_base = va >> 8;
4834 ds->db_htile_surface = S_028ABC_FULL_CACHE(1) |
4835 S_028ABC_PIPE_ALIGNED(surf->u.gfx9.htile.pipe_aligned);
4836
4837 if (device->physical_device->rad_info.chip_class == GFX9) {
4838 ds->db_htile_surface |= S_028ABC_RB_ALIGNED(surf->u.gfx9.htile.rb_aligned);
4839 }
4840 }
4841 } else {
4842 const struct legacy_surf_level *level_info = &surf->u.legacy.level[level];
4843
4844 if (stencil_only)
4845 level_info = &surf->u.legacy.stencil_level[level];
4846
4847 z_offs += surf->u.legacy.level[level].offset;
4848 s_offs += surf->u.legacy.stencil_level[level].offset;
4849
4850 ds->db_depth_info = S_02803C_ADDR5_SWIZZLE_MASK(!radv_image_is_tc_compat_htile(iview->image));
4851 ds->db_z_info = S_028040_FORMAT(format) | S_028040_ZRANGE_PRECISION(1);
4852 ds->db_stencil_info = S_028044_FORMAT(stencil_format);
4853
4854 if (iview->image->info.samples > 1)
4855 ds->db_z_info |= S_028040_NUM_SAMPLES(util_logbase2(iview->image->info.samples));
4856
4857 if (device->physical_device->rad_info.chip_class >= GFX7) {
4858 struct radeon_info *info = &device->physical_device->rad_info;
4859 unsigned tiling_index = surf->u.legacy.tiling_index[level];
4860 unsigned stencil_index = surf->u.legacy.stencil_tiling_index[level];
4861 unsigned macro_index = surf->u.legacy.macro_tile_index;
4862 unsigned tile_mode = info->si_tile_mode_array[tiling_index];
4863 unsigned stencil_tile_mode = info->si_tile_mode_array[stencil_index];
4864 unsigned macro_mode = info->cik_macrotile_mode_array[macro_index];
4865
4866 if (stencil_only)
4867 tile_mode = stencil_tile_mode;
4868
4869 ds->db_depth_info |=
4870 S_02803C_ARRAY_MODE(G_009910_ARRAY_MODE(tile_mode)) |
4871 S_02803C_PIPE_CONFIG(G_009910_PIPE_CONFIG(tile_mode)) |
4872 S_02803C_BANK_WIDTH(G_009990_BANK_WIDTH(macro_mode)) |
4873 S_02803C_BANK_HEIGHT(G_009990_BANK_HEIGHT(macro_mode)) |
4874 S_02803C_MACRO_TILE_ASPECT(G_009990_MACRO_TILE_ASPECT(macro_mode)) |
4875 S_02803C_NUM_BANKS(G_009990_NUM_BANKS(macro_mode));
4876 ds->db_z_info |= S_028040_TILE_SPLIT(G_009910_TILE_SPLIT(tile_mode));
4877 ds->db_stencil_info |= S_028044_TILE_SPLIT(G_009910_TILE_SPLIT(stencil_tile_mode));
4878 } else {
4879 unsigned tile_mode_index = si_tile_mode_index(&iview->image->planes[0], level, false);
4880 ds->db_z_info |= S_028040_TILE_MODE_INDEX(tile_mode_index);
4881 tile_mode_index = si_tile_mode_index(&iview->image->planes[0], level, true);
4882 ds->db_stencil_info |= S_028044_TILE_MODE_INDEX(tile_mode_index);
4883 if (stencil_only)
4884 ds->db_z_info |= S_028040_TILE_MODE_INDEX(tile_mode_index);
4885 }
4886
4887 ds->db_depth_size = S_028058_PITCH_TILE_MAX((level_info->nblk_x / 8) - 1) |
4888 S_028058_HEIGHT_TILE_MAX((level_info->nblk_y / 8) - 1);
4889 ds->db_depth_slice = S_02805C_SLICE_TILE_MAX((level_info->nblk_x * level_info->nblk_y) / 64 - 1);
4890
4891 if (radv_htile_enabled(iview->image, level)) {
4892 ds->db_z_info |= S_028040_TILE_SURFACE_ENABLE(1);
4893
4894 if (!surf->has_stencil &&
4895 !radv_image_is_tc_compat_htile(iview->image))
4896 /* Use all of the htile_buffer for depth if there's no stencil. */
4897 ds->db_stencil_info |= S_028044_TILE_STENCIL_DISABLE(1);
4898
4899 va = radv_buffer_get_va(iview->bo) + iview->image->offset +
4900 iview->image->htile_offset;
4901 ds->db_htile_data_base = va >> 8;
4902 ds->db_htile_surface = S_028ABC_FULL_CACHE(1);
4903
4904 if (radv_image_is_tc_compat_htile(iview->image)) {
4905 unsigned max_zplanes =
4906 radv_calc_decompress_on_z_planes(device, iview);
4907
4908 ds->db_htile_surface |= S_028ABC_TC_COMPATIBLE(1);
4909 ds->db_z_info |= S_028040_DECOMPRESS_ON_N_ZPLANES(max_zplanes);
4910 }
4911 }
4912 }
4913
4914 ds->db_z_read_base = ds->db_z_write_base = z_offs >> 8;
4915 ds->db_stencil_read_base = ds->db_stencil_write_base = s_offs >> 8;
4916 }
4917
4918 VkResult radv_CreateFramebuffer(
4919 VkDevice _device,
4920 const VkFramebufferCreateInfo* pCreateInfo,
4921 const VkAllocationCallbacks* pAllocator,
4922 VkFramebuffer* pFramebuffer)
4923 {
4924 RADV_FROM_HANDLE(radv_device, device, _device);
4925 struct radv_framebuffer *framebuffer;
4926 const VkFramebufferAttachmentsCreateInfoKHR *imageless_create_info =
4927 vk_find_struct_const(pCreateInfo->pNext,
4928 FRAMEBUFFER_ATTACHMENTS_CREATE_INFO_KHR);
4929
4930 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO);
4931
4932 size_t size = sizeof(*framebuffer);
4933 if (!imageless_create_info)
4934 size += sizeof(struct radv_image_view*) * pCreateInfo->attachmentCount;
4935 framebuffer = vk_alloc2(&device->alloc, pAllocator, size, 8,
4936 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
4937 if (framebuffer == NULL)
4938 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
4939
4940 framebuffer->attachment_count = pCreateInfo->attachmentCount;
4941 framebuffer->width = pCreateInfo->width;
4942 framebuffer->height = pCreateInfo->height;
4943 framebuffer->layers = pCreateInfo->layers;
4944 if (imageless_create_info) {
4945 for (unsigned i = 0; i < imageless_create_info->attachmentImageInfoCount; ++i) {
4946 const VkFramebufferAttachmentImageInfoKHR *attachment =
4947 imageless_create_info->pAttachmentImageInfos + i;
4948 framebuffer->width = MIN2(framebuffer->width, attachment->width);
4949 framebuffer->height = MIN2(framebuffer->height, attachment->height);
4950 framebuffer->layers = MIN2(framebuffer->layers, attachment->layerCount);
4951 }
4952 } else {
4953 for (uint32_t i = 0; i < pCreateInfo->attachmentCount; i++) {
4954 VkImageView _iview = pCreateInfo->pAttachments[i];
4955 struct radv_image_view *iview = radv_image_view_from_handle(_iview);
4956 framebuffer->attachments[i] = iview;
4957 framebuffer->width = MIN2(framebuffer->width, iview->extent.width);
4958 framebuffer->height = MIN2(framebuffer->height, iview->extent.height);
4959 framebuffer->layers = MIN2(framebuffer->layers, radv_surface_max_layer_count(iview));
4960 }
4961 }
4962
4963 *pFramebuffer = radv_framebuffer_to_handle(framebuffer);
4964 return VK_SUCCESS;
4965 }
4966
4967 void radv_DestroyFramebuffer(
4968 VkDevice _device,
4969 VkFramebuffer _fb,
4970 const VkAllocationCallbacks* pAllocator)
4971 {
4972 RADV_FROM_HANDLE(radv_device, device, _device);
4973 RADV_FROM_HANDLE(radv_framebuffer, fb, _fb);
4974
4975 if (!fb)
4976 return;
4977 vk_free2(&device->alloc, pAllocator, fb);
4978 }
4979
4980 static unsigned radv_tex_wrap(VkSamplerAddressMode address_mode)
4981 {
4982 switch (address_mode) {
4983 case VK_SAMPLER_ADDRESS_MODE_REPEAT:
4984 return V_008F30_SQ_TEX_WRAP;
4985 case VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT:
4986 return V_008F30_SQ_TEX_MIRROR;
4987 case VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE:
4988 return V_008F30_SQ_TEX_CLAMP_LAST_TEXEL;
4989 case VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER:
4990 return V_008F30_SQ_TEX_CLAMP_BORDER;
4991 case VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE:
4992 return V_008F30_SQ_TEX_MIRROR_ONCE_LAST_TEXEL;
4993 default:
4994 unreachable("illegal tex wrap mode");
4995 break;
4996 }
4997 }
4998
4999 static unsigned
5000 radv_tex_compare(VkCompareOp op)
5001 {
5002 switch (op) {
5003 case VK_COMPARE_OP_NEVER:
5004 return V_008F30_SQ_TEX_DEPTH_COMPARE_NEVER;
5005 case VK_COMPARE_OP_LESS:
5006 return V_008F30_SQ_TEX_DEPTH_COMPARE_LESS;
5007 case VK_COMPARE_OP_EQUAL:
5008 return V_008F30_SQ_TEX_DEPTH_COMPARE_EQUAL;
5009 case VK_COMPARE_OP_LESS_OR_EQUAL:
5010 return V_008F30_SQ_TEX_DEPTH_COMPARE_LESSEQUAL;
5011 case VK_COMPARE_OP_GREATER:
5012 return V_008F30_SQ_TEX_DEPTH_COMPARE_GREATER;
5013 case VK_COMPARE_OP_NOT_EQUAL:
5014 return V_008F30_SQ_TEX_DEPTH_COMPARE_NOTEQUAL;
5015 case VK_COMPARE_OP_GREATER_OR_EQUAL:
5016 return V_008F30_SQ_TEX_DEPTH_COMPARE_GREATEREQUAL;
5017 case VK_COMPARE_OP_ALWAYS:
5018 return V_008F30_SQ_TEX_DEPTH_COMPARE_ALWAYS;
5019 default:
5020 unreachable("illegal compare mode");
5021 break;
5022 }
5023 }
5024
5025 static unsigned
5026 radv_tex_filter(VkFilter filter, unsigned max_ansio)
5027 {
5028 switch (filter) {
5029 case VK_FILTER_NEAREST:
5030 return (max_ansio > 1 ? V_008F38_SQ_TEX_XY_FILTER_ANISO_POINT :
5031 V_008F38_SQ_TEX_XY_FILTER_POINT);
5032 case VK_FILTER_LINEAR:
5033 return (max_ansio > 1 ? V_008F38_SQ_TEX_XY_FILTER_ANISO_BILINEAR :
5034 V_008F38_SQ_TEX_XY_FILTER_BILINEAR);
5035 case VK_FILTER_CUBIC_IMG:
5036 default:
5037 fprintf(stderr, "illegal texture filter");
5038 return 0;
5039 }
5040 }
5041
5042 static unsigned
5043 radv_tex_mipfilter(VkSamplerMipmapMode mode)
5044 {
5045 switch (mode) {
5046 case VK_SAMPLER_MIPMAP_MODE_NEAREST:
5047 return V_008F38_SQ_TEX_Z_FILTER_POINT;
5048 case VK_SAMPLER_MIPMAP_MODE_LINEAR:
5049 return V_008F38_SQ_TEX_Z_FILTER_LINEAR;
5050 default:
5051 return V_008F38_SQ_TEX_Z_FILTER_NONE;
5052 }
5053 }
5054
5055 static unsigned
5056 radv_tex_bordercolor(VkBorderColor bcolor)
5057 {
5058 switch (bcolor) {
5059 case VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK:
5060 case VK_BORDER_COLOR_INT_TRANSPARENT_BLACK:
5061 return V_008F3C_SQ_TEX_BORDER_COLOR_TRANS_BLACK;
5062 case VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK:
5063 case VK_BORDER_COLOR_INT_OPAQUE_BLACK:
5064 return V_008F3C_SQ_TEX_BORDER_COLOR_OPAQUE_BLACK;
5065 case VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE:
5066 case VK_BORDER_COLOR_INT_OPAQUE_WHITE:
5067 return V_008F3C_SQ_TEX_BORDER_COLOR_OPAQUE_WHITE;
5068 default:
5069 break;
5070 }
5071 return 0;
5072 }
5073
5074 static unsigned
5075 radv_tex_aniso_filter(unsigned filter)
5076 {
5077 if (filter < 2)
5078 return 0;
5079 if (filter < 4)
5080 return 1;
5081 if (filter < 8)
5082 return 2;
5083 if (filter < 16)
5084 return 3;
5085 return 4;
5086 }
5087
5088 static unsigned
5089 radv_tex_filter_mode(VkSamplerReductionModeEXT mode)
5090 {
5091 switch (mode) {
5092 case VK_SAMPLER_REDUCTION_MODE_WEIGHTED_AVERAGE_EXT:
5093 return V_008F30_SQ_IMG_FILTER_MODE_BLEND;
5094 case VK_SAMPLER_REDUCTION_MODE_MIN_EXT:
5095 return V_008F30_SQ_IMG_FILTER_MODE_MIN;
5096 case VK_SAMPLER_REDUCTION_MODE_MAX_EXT:
5097 return V_008F30_SQ_IMG_FILTER_MODE_MAX;
5098 default:
5099 break;
5100 }
5101 return 0;
5102 }
5103
5104 static uint32_t
5105 radv_get_max_anisotropy(struct radv_device *device,
5106 const VkSamplerCreateInfo *pCreateInfo)
5107 {
5108 if (device->force_aniso >= 0)
5109 return device->force_aniso;
5110
5111 if (pCreateInfo->anisotropyEnable &&
5112 pCreateInfo->maxAnisotropy > 1.0f)
5113 return (uint32_t)pCreateInfo->maxAnisotropy;
5114
5115 return 0;
5116 }
5117
5118 static void
5119 radv_init_sampler(struct radv_device *device,
5120 struct radv_sampler *sampler,
5121 const VkSamplerCreateInfo *pCreateInfo)
5122 {
5123 uint32_t max_aniso = radv_get_max_anisotropy(device, pCreateInfo);
5124 uint32_t max_aniso_ratio = radv_tex_aniso_filter(max_aniso);
5125 bool compat_mode = device->physical_device->rad_info.chip_class == GFX8 ||
5126 device->physical_device->rad_info.chip_class == GFX9;
5127 unsigned filter_mode = V_008F30_SQ_IMG_FILTER_MODE_BLEND;
5128
5129 const struct VkSamplerReductionModeCreateInfoEXT *sampler_reduction =
5130 vk_find_struct_const(pCreateInfo->pNext,
5131 SAMPLER_REDUCTION_MODE_CREATE_INFO_EXT);
5132 if (sampler_reduction)
5133 filter_mode = radv_tex_filter_mode(sampler_reduction->reductionMode);
5134
5135 sampler->state[0] = (S_008F30_CLAMP_X(radv_tex_wrap(pCreateInfo->addressModeU)) |
5136 S_008F30_CLAMP_Y(radv_tex_wrap(pCreateInfo->addressModeV)) |
5137 S_008F30_CLAMP_Z(radv_tex_wrap(pCreateInfo->addressModeW)) |
5138 S_008F30_MAX_ANISO_RATIO(max_aniso_ratio) |
5139 S_008F30_DEPTH_COMPARE_FUNC(radv_tex_compare(pCreateInfo->compareOp)) |
5140 S_008F30_FORCE_UNNORMALIZED(pCreateInfo->unnormalizedCoordinates ? 1 : 0) |
5141 S_008F30_ANISO_THRESHOLD(max_aniso_ratio >> 1) |
5142 S_008F30_ANISO_BIAS(max_aniso_ratio) |
5143 S_008F30_DISABLE_CUBE_WRAP(0) |
5144 S_008F30_COMPAT_MODE(compat_mode) |
5145 S_008F30_FILTER_MODE(filter_mode));
5146 sampler->state[1] = (S_008F34_MIN_LOD(S_FIXED(CLAMP(pCreateInfo->minLod, 0, 15), 8)) |
5147 S_008F34_MAX_LOD(S_FIXED(CLAMP(pCreateInfo->maxLod, 0, 15), 8)) |
5148 S_008F34_PERF_MIP(max_aniso_ratio ? max_aniso_ratio + 6 : 0));
5149 sampler->state[2] = (S_008F38_LOD_BIAS(S_FIXED(CLAMP(pCreateInfo->mipLodBias, -16, 16), 8)) |
5150 S_008F38_XY_MAG_FILTER(radv_tex_filter(pCreateInfo->magFilter, max_aniso)) |
5151 S_008F38_XY_MIN_FILTER(radv_tex_filter(pCreateInfo->minFilter, max_aniso)) |
5152 S_008F38_MIP_FILTER(radv_tex_mipfilter(pCreateInfo->mipmapMode)) |
5153 S_008F38_MIP_POINT_PRECLAMP(0));
5154 sampler->state[3] = (S_008F3C_BORDER_COLOR_PTR(0) |
5155 S_008F3C_BORDER_COLOR_TYPE(radv_tex_bordercolor(pCreateInfo->borderColor)));
5156
5157 if (device->physical_device->rad_info.chip_class >= GFX10) {
5158 sampler->state[2] |= S_008F38_ANISO_OVERRIDE_GFX10(1);
5159 } else {
5160 sampler->state[2] |=
5161 S_008F38_DISABLE_LSB_CEIL(device->physical_device->rad_info.chip_class <= GFX8) |
5162 S_008F38_FILTER_PREC_FIX(1) |
5163 S_008F38_ANISO_OVERRIDE_GFX6(device->physical_device->rad_info.chip_class >= GFX8);
5164 }
5165 }
5166
5167 VkResult radv_CreateSampler(
5168 VkDevice _device,
5169 const VkSamplerCreateInfo* pCreateInfo,
5170 const VkAllocationCallbacks* pAllocator,
5171 VkSampler* pSampler)
5172 {
5173 RADV_FROM_HANDLE(radv_device, device, _device);
5174 struct radv_sampler *sampler;
5175
5176 const struct VkSamplerYcbcrConversionInfo *ycbcr_conversion =
5177 vk_find_struct_const(pCreateInfo->pNext,
5178 SAMPLER_YCBCR_CONVERSION_INFO);
5179
5180 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO);
5181
5182 sampler = vk_alloc2(&device->alloc, pAllocator, sizeof(*sampler), 8,
5183 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
5184 if (!sampler)
5185 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
5186
5187 radv_init_sampler(device, sampler, pCreateInfo);
5188
5189 sampler->ycbcr_sampler = ycbcr_conversion ? radv_sampler_ycbcr_conversion_from_handle(ycbcr_conversion->conversion): NULL;
5190 *pSampler = radv_sampler_to_handle(sampler);
5191
5192 return VK_SUCCESS;
5193 }
5194
5195 void radv_DestroySampler(
5196 VkDevice _device,
5197 VkSampler _sampler,
5198 const VkAllocationCallbacks* pAllocator)
5199 {
5200 RADV_FROM_HANDLE(radv_device, device, _device);
5201 RADV_FROM_HANDLE(radv_sampler, sampler, _sampler);
5202
5203 if (!sampler)
5204 return;
5205 vk_free2(&device->alloc, pAllocator, sampler);
5206 }
5207
5208 /* vk_icd.h does not declare this function, so we declare it here to
5209 * suppress Wmissing-prototypes.
5210 */
5211 PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
5212 vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion);
5213
5214 PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
5215 vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion)
5216 {
5217 /* For the full details on loader interface versioning, see
5218 * <https://github.com/KhronosGroup/Vulkan-LoaderAndValidationLayers/blob/master/loader/LoaderAndLayerInterface.md>.
5219 * What follows is a condensed summary, to help you navigate the large and
5220 * confusing official doc.
5221 *
5222 * - Loader interface v0 is incompatible with later versions. We don't
5223 * support it.
5224 *
5225 * - In loader interface v1:
5226 * - The first ICD entrypoint called by the loader is
5227 * vk_icdGetInstanceProcAddr(). The ICD must statically expose this
5228 * entrypoint.
5229 * - The ICD must statically expose no other Vulkan symbol unless it is
5230 * linked with -Bsymbolic.
5231 * - Each dispatchable Vulkan handle created by the ICD must be
5232 * a pointer to a struct whose first member is VK_LOADER_DATA. The
5233 * ICD must initialize VK_LOADER_DATA.loadMagic to ICD_LOADER_MAGIC.
5234 * - The loader implements vkCreate{PLATFORM}SurfaceKHR() and
5235 * vkDestroySurfaceKHR(). The ICD must be capable of working with
5236 * such loader-managed surfaces.
5237 *
5238 * - Loader interface v2 differs from v1 in:
5239 * - The first ICD entrypoint called by the loader is
5240 * vk_icdNegotiateLoaderICDInterfaceVersion(). The ICD must
5241 * statically expose this entrypoint.
5242 *
5243 * - Loader interface v3 differs from v2 in:
5244 * - The ICD must implement vkCreate{PLATFORM}SurfaceKHR(),
5245 * vkDestroySurfaceKHR(), and other API which uses VKSurfaceKHR,
5246 * because the loader no longer does so.
5247 */
5248 *pSupportedVersion = MIN2(*pSupportedVersion, 4u);
5249 return VK_SUCCESS;
5250 }
5251
5252 VkResult radv_GetMemoryFdKHR(VkDevice _device,
5253 const VkMemoryGetFdInfoKHR *pGetFdInfo,
5254 int *pFD)
5255 {
5256 RADV_FROM_HANDLE(radv_device, device, _device);
5257 RADV_FROM_HANDLE(radv_device_memory, memory, pGetFdInfo->memory);
5258
5259 assert(pGetFdInfo->sType == VK_STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR);
5260
5261 /* At the moment, we support only the below handle types. */
5262 assert(pGetFdInfo->handleType ==
5263 VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT ||
5264 pGetFdInfo->handleType ==
5265 VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
5266
5267 bool ret = radv_get_memory_fd(device, memory, pFD);
5268 if (ret == false)
5269 return vk_error(device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
5270 return VK_SUCCESS;
5271 }
5272
5273 VkResult radv_GetMemoryFdPropertiesKHR(VkDevice _device,
5274 VkExternalMemoryHandleTypeFlagBits handleType,
5275 int fd,
5276 VkMemoryFdPropertiesKHR *pMemoryFdProperties)
5277 {
5278 RADV_FROM_HANDLE(radv_device, device, _device);
5279
5280 switch (handleType) {
5281 case VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT:
5282 pMemoryFdProperties->memoryTypeBits = (1 << RADV_MEM_TYPE_COUNT) - 1;
5283 return VK_SUCCESS;
5284
5285 default:
5286 /* The valid usage section for this function says:
5287 *
5288 * "handleType must not be one of the handle types defined as
5289 * opaque."
5290 *
5291 * So opaque handle types fall into the default "unsupported" case.
5292 */
5293 return vk_error(device->instance, VK_ERROR_INVALID_EXTERNAL_HANDLE);
5294 }
5295 }
5296
5297 static VkResult radv_import_opaque_fd(struct radv_device *device,
5298 int fd,
5299 uint32_t *syncobj)
5300 {
5301 uint32_t syncobj_handle = 0;
5302 int ret = device->ws->import_syncobj(device->ws, fd, &syncobj_handle);
5303 if (ret != 0)
5304 return vk_error(device->instance, VK_ERROR_INVALID_EXTERNAL_HANDLE);
5305
5306 if (*syncobj)
5307 device->ws->destroy_syncobj(device->ws, *syncobj);
5308
5309 *syncobj = syncobj_handle;
5310 close(fd);
5311
5312 return VK_SUCCESS;
5313 }
5314
5315 static VkResult radv_import_sync_fd(struct radv_device *device,
5316 int fd,
5317 uint32_t *syncobj)
5318 {
5319 /* If we create a syncobj we do it locally so that if we have an error, we don't
5320 * leave a syncobj in an undetermined state in the fence. */
5321 uint32_t syncobj_handle = *syncobj;
5322 if (!syncobj_handle) {
5323 int ret = device->ws->create_syncobj(device->ws, &syncobj_handle);
5324 if (ret) {
5325 return vk_error(device->instance, VK_ERROR_INVALID_EXTERNAL_HANDLE);
5326 }
5327 }
5328
5329 if (fd == -1) {
5330 device->ws->signal_syncobj(device->ws, syncobj_handle);
5331 } else {
5332 int ret = device->ws->import_syncobj_from_sync_file(device->ws, syncobj_handle, fd);
5333 if (ret != 0)
5334 return vk_error(device->instance, VK_ERROR_INVALID_EXTERNAL_HANDLE);
5335 }
5336
5337 *syncobj = syncobj_handle;
5338 if (fd != -1)
5339 close(fd);
5340
5341 return VK_SUCCESS;
5342 }
5343
5344 VkResult radv_ImportSemaphoreFdKHR(VkDevice _device,
5345 const VkImportSemaphoreFdInfoKHR *pImportSemaphoreFdInfo)
5346 {
5347 RADV_FROM_HANDLE(radv_device, device, _device);
5348 RADV_FROM_HANDLE(radv_semaphore, sem, pImportSemaphoreFdInfo->semaphore);
5349 uint32_t *syncobj_dst = NULL;
5350
5351 if (pImportSemaphoreFdInfo->flags & VK_SEMAPHORE_IMPORT_TEMPORARY_BIT) {
5352 syncobj_dst = &sem->temp_syncobj;
5353 } else {
5354 syncobj_dst = &sem->syncobj;
5355 }
5356
5357 switch(pImportSemaphoreFdInfo->handleType) {
5358 case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT:
5359 return radv_import_opaque_fd(device, pImportSemaphoreFdInfo->fd, syncobj_dst);
5360 case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT:
5361 return radv_import_sync_fd(device, pImportSemaphoreFdInfo->fd, syncobj_dst);
5362 default:
5363 unreachable("Unhandled semaphore handle type");
5364 }
5365 }
5366
5367 VkResult radv_GetSemaphoreFdKHR(VkDevice _device,
5368 const VkSemaphoreGetFdInfoKHR *pGetFdInfo,
5369 int *pFd)
5370 {
5371 RADV_FROM_HANDLE(radv_device, device, _device);
5372 RADV_FROM_HANDLE(radv_semaphore, sem, pGetFdInfo->semaphore);
5373 int ret;
5374 uint32_t syncobj_handle;
5375
5376 if (sem->temp_syncobj)
5377 syncobj_handle = sem->temp_syncobj;
5378 else
5379 syncobj_handle = sem->syncobj;
5380
5381 switch(pGetFdInfo->handleType) {
5382 case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT:
5383 ret = device->ws->export_syncobj(device->ws, syncobj_handle, pFd);
5384 break;
5385 case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT:
5386 ret = device->ws->export_syncobj_to_sync_file(device->ws, syncobj_handle, pFd);
5387 if (!ret) {
5388 if (sem->temp_syncobj) {
5389 close (sem->temp_syncobj);
5390 sem->temp_syncobj = 0;
5391 } else {
5392 device->ws->reset_syncobj(device->ws, syncobj_handle);
5393 }
5394 }
5395 break;
5396 default:
5397 unreachable("Unhandled semaphore handle type");
5398 }
5399
5400 if (ret)
5401 return vk_error(device->instance, VK_ERROR_INVALID_EXTERNAL_HANDLE);
5402 return VK_SUCCESS;
5403 }
5404
5405 void radv_GetPhysicalDeviceExternalSemaphoreProperties(
5406 VkPhysicalDevice physicalDevice,
5407 const VkPhysicalDeviceExternalSemaphoreInfo *pExternalSemaphoreInfo,
5408 VkExternalSemaphoreProperties *pExternalSemaphoreProperties)
5409 {
5410 RADV_FROM_HANDLE(radv_physical_device, pdevice, physicalDevice);
5411
5412 /* Require has_syncobj_wait_for_submit for the syncobj signal ioctl introduced at virtually the same time */
5413 if (pdevice->rad_info.has_syncobj_wait_for_submit &&
5414 (pExternalSemaphoreInfo->handleType == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT ||
5415 pExternalSemaphoreInfo->handleType == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT)) {
5416 pExternalSemaphoreProperties->exportFromImportedHandleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT | VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
5417 pExternalSemaphoreProperties->compatibleHandleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT | VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
5418 pExternalSemaphoreProperties->externalSemaphoreFeatures = VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT |
5419 VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT;
5420 } else if (pExternalSemaphoreInfo->handleType == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT) {
5421 pExternalSemaphoreProperties->exportFromImportedHandleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT;
5422 pExternalSemaphoreProperties->compatibleHandleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT;
5423 pExternalSemaphoreProperties->externalSemaphoreFeatures = VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT |
5424 VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT;
5425 } else {
5426 pExternalSemaphoreProperties->exportFromImportedHandleTypes = 0;
5427 pExternalSemaphoreProperties->compatibleHandleTypes = 0;
5428 pExternalSemaphoreProperties->externalSemaphoreFeatures = 0;
5429 }
5430 }
5431
5432 VkResult radv_ImportFenceFdKHR(VkDevice _device,
5433 const VkImportFenceFdInfoKHR *pImportFenceFdInfo)
5434 {
5435 RADV_FROM_HANDLE(radv_device, device, _device);
5436 RADV_FROM_HANDLE(radv_fence, fence, pImportFenceFdInfo->fence);
5437 uint32_t *syncobj_dst = NULL;
5438
5439
5440 if (pImportFenceFdInfo->flags & VK_FENCE_IMPORT_TEMPORARY_BIT) {
5441 syncobj_dst = &fence->temp_syncobj;
5442 } else {
5443 syncobj_dst = &fence->syncobj;
5444 }
5445
5446 switch(pImportFenceFdInfo->handleType) {
5447 case VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT:
5448 return radv_import_opaque_fd(device, pImportFenceFdInfo->fd, syncobj_dst);
5449 case VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT:
5450 return radv_import_sync_fd(device, pImportFenceFdInfo->fd, syncobj_dst);
5451 default:
5452 unreachable("Unhandled fence handle type");
5453 }
5454 }
5455
5456 VkResult radv_GetFenceFdKHR(VkDevice _device,
5457 const VkFenceGetFdInfoKHR *pGetFdInfo,
5458 int *pFd)
5459 {
5460 RADV_FROM_HANDLE(radv_device, device, _device);
5461 RADV_FROM_HANDLE(radv_fence, fence, pGetFdInfo->fence);
5462 int ret;
5463 uint32_t syncobj_handle;
5464
5465 if (fence->temp_syncobj)
5466 syncobj_handle = fence->temp_syncobj;
5467 else
5468 syncobj_handle = fence->syncobj;
5469
5470 switch(pGetFdInfo->handleType) {
5471 case VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT:
5472 ret = device->ws->export_syncobj(device->ws, syncobj_handle, pFd);
5473 break;
5474 case VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT:
5475 ret = device->ws->export_syncobj_to_sync_file(device->ws, syncobj_handle, pFd);
5476 if (!ret) {
5477 if (fence->temp_syncobj) {
5478 close (fence->temp_syncobj);
5479 fence->temp_syncobj = 0;
5480 } else {
5481 device->ws->reset_syncobj(device->ws, syncobj_handle);
5482 }
5483 }
5484 break;
5485 default:
5486 unreachable("Unhandled fence handle type");
5487 }
5488
5489 if (ret)
5490 return vk_error(device->instance, VK_ERROR_INVALID_EXTERNAL_HANDLE);
5491 return VK_SUCCESS;
5492 }
5493
5494 void radv_GetPhysicalDeviceExternalFenceProperties(
5495 VkPhysicalDevice physicalDevice,
5496 const VkPhysicalDeviceExternalFenceInfo *pExternalFenceInfo,
5497 VkExternalFenceProperties *pExternalFenceProperties)
5498 {
5499 RADV_FROM_HANDLE(radv_physical_device, pdevice, physicalDevice);
5500
5501 if (pdevice->rad_info.has_syncobj_wait_for_submit &&
5502 (pExternalFenceInfo->handleType == VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT ||
5503 pExternalFenceInfo->handleType == VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT)) {
5504 pExternalFenceProperties->exportFromImportedHandleTypes = VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT | VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT;
5505 pExternalFenceProperties->compatibleHandleTypes = VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT | VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT;
5506 pExternalFenceProperties->externalFenceFeatures = VK_EXTERNAL_FENCE_FEATURE_EXPORTABLE_BIT |
5507 VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT;
5508 } else {
5509 pExternalFenceProperties->exportFromImportedHandleTypes = 0;
5510 pExternalFenceProperties->compatibleHandleTypes = 0;
5511 pExternalFenceProperties->externalFenceFeatures = 0;
5512 }
5513 }
5514
5515 VkResult
5516 radv_CreateDebugReportCallbackEXT(VkInstance _instance,
5517 const VkDebugReportCallbackCreateInfoEXT* pCreateInfo,
5518 const VkAllocationCallbacks* pAllocator,
5519 VkDebugReportCallbackEXT* pCallback)
5520 {
5521 RADV_FROM_HANDLE(radv_instance, instance, _instance);
5522 return vk_create_debug_report_callback(&instance->debug_report_callbacks,
5523 pCreateInfo, pAllocator, &instance->alloc,
5524 pCallback);
5525 }
5526
5527 void
5528 radv_DestroyDebugReportCallbackEXT(VkInstance _instance,
5529 VkDebugReportCallbackEXT _callback,
5530 const VkAllocationCallbacks* pAllocator)
5531 {
5532 RADV_FROM_HANDLE(radv_instance, instance, _instance);
5533 vk_destroy_debug_report_callback(&instance->debug_report_callbacks,
5534 _callback, pAllocator, &instance->alloc);
5535 }
5536
5537 void
5538 radv_DebugReportMessageEXT(VkInstance _instance,
5539 VkDebugReportFlagsEXT flags,
5540 VkDebugReportObjectTypeEXT objectType,
5541 uint64_t object,
5542 size_t location,
5543 int32_t messageCode,
5544 const char* pLayerPrefix,
5545 const char* pMessage)
5546 {
5547 RADV_FROM_HANDLE(radv_instance, instance, _instance);
5548 vk_debug_report(&instance->debug_report_callbacks, flags, objectType,
5549 object, location, messageCode, pLayerPrefix, pMessage);
5550 }
5551
5552 void
5553 radv_GetDeviceGroupPeerMemoryFeatures(
5554 VkDevice device,
5555 uint32_t heapIndex,
5556 uint32_t localDeviceIndex,
5557 uint32_t remoteDeviceIndex,
5558 VkPeerMemoryFeatureFlags* pPeerMemoryFeatures)
5559 {
5560 assert(localDeviceIndex == remoteDeviceIndex);
5561
5562 *pPeerMemoryFeatures = VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT |
5563 VK_PEER_MEMORY_FEATURE_COPY_DST_BIT |
5564 VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT |
5565 VK_PEER_MEMORY_FEATURE_GENERIC_DST_BIT;
5566 }
5567
5568 static const VkTimeDomainEXT radv_time_domains[] = {
5569 VK_TIME_DOMAIN_DEVICE_EXT,
5570 VK_TIME_DOMAIN_CLOCK_MONOTONIC_EXT,
5571 VK_TIME_DOMAIN_CLOCK_MONOTONIC_RAW_EXT,
5572 };
5573
5574 VkResult radv_GetPhysicalDeviceCalibrateableTimeDomainsEXT(
5575 VkPhysicalDevice physicalDevice,
5576 uint32_t *pTimeDomainCount,
5577 VkTimeDomainEXT *pTimeDomains)
5578 {
5579 int d;
5580 VK_OUTARRAY_MAKE(out, pTimeDomains, pTimeDomainCount);
5581
5582 for (d = 0; d < ARRAY_SIZE(radv_time_domains); d++) {
5583 vk_outarray_append(&out, i) {
5584 *i = radv_time_domains[d];
5585 }
5586 }
5587
5588 return vk_outarray_status(&out);
5589 }
5590
5591 static uint64_t
5592 radv_clock_gettime(clockid_t clock_id)
5593 {
5594 struct timespec current;
5595 int ret;
5596
5597 ret = clock_gettime(clock_id, &current);
5598 if (ret < 0 && clock_id == CLOCK_MONOTONIC_RAW)
5599 ret = clock_gettime(CLOCK_MONOTONIC, &current);
5600 if (ret < 0)
5601 return 0;
5602
5603 return (uint64_t) current.tv_sec * 1000000000ULL + current.tv_nsec;
5604 }
5605
5606 VkResult radv_GetCalibratedTimestampsEXT(
5607 VkDevice _device,
5608 uint32_t timestampCount,
5609 const VkCalibratedTimestampInfoEXT *pTimestampInfos,
5610 uint64_t *pTimestamps,
5611 uint64_t *pMaxDeviation)
5612 {
5613 RADV_FROM_HANDLE(radv_device, device, _device);
5614 uint32_t clock_crystal_freq = device->physical_device->rad_info.clock_crystal_freq;
5615 int d;
5616 uint64_t begin, end;
5617 uint64_t max_clock_period = 0;
5618
5619 begin = radv_clock_gettime(CLOCK_MONOTONIC_RAW);
5620
5621 for (d = 0; d < timestampCount; d++) {
5622 switch (pTimestampInfos[d].timeDomain) {
5623 case VK_TIME_DOMAIN_DEVICE_EXT:
5624 pTimestamps[d] = device->ws->query_value(device->ws,
5625 RADEON_TIMESTAMP);
5626 uint64_t device_period = DIV_ROUND_UP(1000000, clock_crystal_freq);
5627 max_clock_period = MAX2(max_clock_period, device_period);
5628 break;
5629 case VK_TIME_DOMAIN_CLOCK_MONOTONIC_EXT:
5630 pTimestamps[d] = radv_clock_gettime(CLOCK_MONOTONIC);
5631 max_clock_period = MAX2(max_clock_period, 1);
5632 break;
5633
5634 case VK_TIME_DOMAIN_CLOCK_MONOTONIC_RAW_EXT:
5635 pTimestamps[d] = begin;
5636 break;
5637 default:
5638 pTimestamps[d] = 0;
5639 break;
5640 }
5641 }
5642
5643 end = radv_clock_gettime(CLOCK_MONOTONIC_RAW);
5644
5645 /*
5646 * The maximum deviation is the sum of the interval over which we
5647 * perform the sampling and the maximum period of any sampled
5648 * clock. That's because the maximum skew between any two sampled
5649 * clock edges is when the sampled clock with the largest period is
5650 * sampled at the end of that period but right at the beginning of the
5651 * sampling interval and some other clock is sampled right at the
5652 * begining of its sampling period and right at the end of the
5653 * sampling interval. Let's assume the GPU has the longest clock
5654 * period and that the application is sampling GPU and monotonic:
5655 *
5656 * s e
5657 * w x y z 0 1 2 3 4 5 6 7 8 9 a b c d e f
5658 * Raw -_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-
5659 *
5660 * g
5661 * 0 1 2 3
5662 * GPU -----_____-----_____-----_____-----_____
5663 *
5664 * m
5665 * x y z 0 1 2 3 4 5 6 7 8 9 a b c
5666 * Monotonic -_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-
5667 *
5668 * Interval <----------------->
5669 * Deviation <-------------------------->
5670 *
5671 * s = read(raw) 2
5672 * g = read(GPU) 1
5673 * m = read(monotonic) 2
5674 * e = read(raw) b
5675 *
5676 * We round the sample interval up by one tick to cover sampling error
5677 * in the interval clock
5678 */
5679
5680 uint64_t sample_interval = end - begin + 1;
5681
5682 *pMaxDeviation = sample_interval + max_clock_period;
5683
5684 return VK_SUCCESS;
5685 }
5686
5687 void radv_GetPhysicalDeviceMultisamplePropertiesEXT(
5688 VkPhysicalDevice physicalDevice,
5689 VkSampleCountFlagBits samples,
5690 VkMultisamplePropertiesEXT* pMultisampleProperties)
5691 {
5692 if (samples & (VK_SAMPLE_COUNT_2_BIT |
5693 VK_SAMPLE_COUNT_4_BIT |
5694 VK_SAMPLE_COUNT_8_BIT)) {
5695 pMultisampleProperties->maxSampleLocationGridSize = (VkExtent2D){ 2, 2 };
5696 } else {
5697 pMultisampleProperties->maxSampleLocationGridSize = (VkExtent2D){ 0, 0 };
5698 }
5699 }