c16944e5f8ee857e11bb1c74c430ea250dce47c8
[mesa.git] / src / amd / vulkan / radv_device.c
1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 */
27
28 #include <stdbool.h>
29 #include <string.h>
30 #include <unistd.h>
31 #include <fcntl.h>
32 #include "radv_debug.h"
33 #include "radv_private.h"
34 #include "radv_shader.h"
35 #include "radv_cs.h"
36 #include "util/disk_cache.h"
37 #include "util/strtod.h"
38 #include "vk_util.h"
39 #include <xf86drm.h>
40 #include <amdgpu.h>
41 #include <amdgpu_drm.h>
42 #include "winsys/amdgpu/radv_amdgpu_winsys_public.h"
43 #include "ac_llvm_util.h"
44 #include "vk_format.h"
45 #include "sid.h"
46 #include "gfx9d.h"
47 #include "util/debug.h"
48
49 static int
50 radv_device_get_cache_uuid(enum radeon_family family, void *uuid)
51 {
52 uint32_t mesa_timestamp, llvm_timestamp;
53 uint16_t f = family;
54 memset(uuid, 0, VK_UUID_SIZE);
55 if (!disk_cache_get_function_timestamp(radv_device_get_cache_uuid, &mesa_timestamp) ||
56 !disk_cache_get_function_timestamp(LLVMInitializeAMDGPUTargetInfo, &llvm_timestamp))
57 return -1;
58
59 memcpy(uuid, &mesa_timestamp, 4);
60 memcpy((char*)uuid + 4, &llvm_timestamp, 4);
61 memcpy((char*)uuid + 8, &f, 2);
62 snprintf((char*)uuid + 10, VK_UUID_SIZE - 10, "radv");
63 return 0;
64 }
65
66 static void
67 radv_get_driver_uuid(void *uuid)
68 {
69 ac_compute_driver_uuid(uuid, VK_UUID_SIZE);
70 }
71
72 static void
73 radv_get_device_uuid(struct radeon_info *info, void *uuid)
74 {
75 ac_compute_device_uuid(info, uuid, VK_UUID_SIZE);
76 }
77
78 static void
79 radv_get_device_name(enum radeon_family family, char *name, size_t name_len)
80 {
81 const char *chip_string;
82 char llvm_string[32] = {};
83
84 switch (family) {
85 case CHIP_TAHITI: chip_string = "AMD RADV TAHITI"; break;
86 case CHIP_PITCAIRN: chip_string = "AMD RADV PITCAIRN"; break;
87 case CHIP_VERDE: chip_string = "AMD RADV CAPE VERDE"; break;
88 case CHIP_OLAND: chip_string = "AMD RADV OLAND"; break;
89 case CHIP_HAINAN: chip_string = "AMD RADV HAINAN"; break;
90 case CHIP_BONAIRE: chip_string = "AMD RADV BONAIRE"; break;
91 case CHIP_KAVERI: chip_string = "AMD RADV KAVERI"; break;
92 case CHIP_KABINI: chip_string = "AMD RADV KABINI"; break;
93 case CHIP_HAWAII: chip_string = "AMD RADV HAWAII"; break;
94 case CHIP_MULLINS: chip_string = "AMD RADV MULLINS"; break;
95 case CHIP_TONGA: chip_string = "AMD RADV TONGA"; break;
96 case CHIP_ICELAND: chip_string = "AMD RADV ICELAND"; break;
97 case CHIP_CARRIZO: chip_string = "AMD RADV CARRIZO"; break;
98 case CHIP_FIJI: chip_string = "AMD RADV FIJI"; break;
99 case CHIP_POLARIS10: chip_string = "AMD RADV POLARIS10"; break;
100 case CHIP_POLARIS11: chip_string = "AMD RADV POLARIS11"; break;
101 case CHIP_POLARIS12: chip_string = "AMD RADV POLARIS12"; break;
102 case CHIP_STONEY: chip_string = "AMD RADV STONEY"; break;
103 case CHIP_VEGA10: chip_string = "AMD RADV VEGA"; break;
104 case CHIP_RAVEN: chip_string = "AMD RADV RAVEN"; break;
105 default: chip_string = "AMD RADV unknown"; break;
106 }
107
108 if (HAVE_LLVM > 0) {
109 snprintf(llvm_string, sizeof(llvm_string),
110 " (LLVM %i.%i.%i)", (HAVE_LLVM >> 8) & 0xff,
111 HAVE_LLVM & 0xff, MESA_LLVM_VERSION_PATCH);
112 }
113
114 snprintf(name, name_len, "%s%s", chip_string, llvm_string);
115 }
116
117 static void
118 radv_physical_device_init_mem_types(struct radv_physical_device *device)
119 {
120 STATIC_ASSERT(RADV_MEM_HEAP_COUNT <= VK_MAX_MEMORY_HEAPS);
121 uint64_t visible_vram_size = MIN2(device->rad_info.vram_size,
122 device->rad_info.vram_vis_size);
123
124 int vram_index = -1, visible_vram_index = -1, gart_index = -1;
125 device->memory_properties.memoryHeapCount = 0;
126 if (device->rad_info.vram_size - visible_vram_size > 0) {
127 vram_index = device->memory_properties.memoryHeapCount++;
128 device->memory_properties.memoryHeaps[vram_index] = (VkMemoryHeap) {
129 .size = device->rad_info.vram_size - visible_vram_size,
130 .flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT,
131 };
132 }
133 if (visible_vram_size) {
134 visible_vram_index = device->memory_properties.memoryHeapCount++;
135 device->memory_properties.memoryHeaps[visible_vram_index] = (VkMemoryHeap) {
136 .size = visible_vram_size,
137 .flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT,
138 };
139 }
140 if (device->rad_info.gart_size > 0) {
141 gart_index = device->memory_properties.memoryHeapCount++;
142 device->memory_properties.memoryHeaps[gart_index] = (VkMemoryHeap) {
143 .size = device->rad_info.gart_size,
144 .flags = 0,
145 };
146 }
147
148 STATIC_ASSERT(RADV_MEM_TYPE_COUNT <= VK_MAX_MEMORY_TYPES);
149 unsigned type_count = 0;
150 if (vram_index >= 0) {
151 device->mem_type_indices[type_count] = RADV_MEM_TYPE_VRAM;
152 device->memory_properties.memoryTypes[type_count++] = (VkMemoryType) {
153 .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT,
154 .heapIndex = vram_index,
155 };
156 }
157 if (gart_index >= 0) {
158 device->mem_type_indices[type_count] = RADV_MEM_TYPE_GTT_WRITE_COMBINE;
159 device->memory_properties.memoryTypes[type_count++] = (VkMemoryType) {
160 .propertyFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
161 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
162 .heapIndex = gart_index,
163 };
164 }
165 if (visible_vram_index >= 0) {
166 device->mem_type_indices[type_count] = RADV_MEM_TYPE_VRAM_CPU_ACCESS;
167 device->memory_properties.memoryTypes[type_count++] = (VkMemoryType) {
168 .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
169 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
170 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
171 .heapIndex = visible_vram_index,
172 };
173 }
174 if (gart_index >= 0) {
175 device->mem_type_indices[type_count] = RADV_MEM_TYPE_GTT_CACHED;
176 device->memory_properties.memoryTypes[type_count++] = (VkMemoryType) {
177 .propertyFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
178 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT |
179 VK_MEMORY_PROPERTY_HOST_CACHED_BIT,
180 .heapIndex = gart_index,
181 };
182 }
183 device->memory_properties.memoryTypeCount = type_count;
184 }
185
186 static void
187 radv_handle_env_var_force_family(struct radv_physical_device *device)
188 {
189 const char *family = getenv("RADV_FORCE_FAMILY");
190 unsigned i;
191
192 if (!family)
193 return;
194
195 for (i = CHIP_TAHITI; i < CHIP_LAST; i++) {
196 if (!strcmp(family, ac_get_llvm_processor_name(i))) {
197 /* Override family and chip_class. */
198 device->rad_info.family = i;
199
200 if (i >= CHIP_VEGA10)
201 device->rad_info.chip_class = GFX9;
202 else if (i >= CHIP_TONGA)
203 device->rad_info.chip_class = VI;
204 else if (i >= CHIP_BONAIRE)
205 device->rad_info.chip_class = CIK;
206 else
207 device->rad_info.chip_class = SI;
208
209 return;
210 }
211 }
212
213 fprintf(stderr, "radv: Unknown family: %s\n", family);
214 exit(1);
215 }
216
217 static VkResult
218 radv_physical_device_init(struct radv_physical_device *device,
219 struct radv_instance *instance,
220 drmDevicePtr drm_device)
221 {
222 const char *path = drm_device->nodes[DRM_NODE_RENDER];
223 VkResult result;
224 drmVersionPtr version;
225 int fd;
226
227 fd = open(path, O_RDWR | O_CLOEXEC);
228 if (fd < 0)
229 return vk_error(VK_ERROR_INCOMPATIBLE_DRIVER);
230
231 version = drmGetVersion(fd);
232 if (!version) {
233 close(fd);
234 return vk_errorf(VK_ERROR_INCOMPATIBLE_DRIVER,
235 "failed to get version %s: %m", path);
236 }
237
238 if (strcmp(version->name, "amdgpu")) {
239 drmFreeVersion(version);
240 close(fd);
241 return VK_ERROR_INCOMPATIBLE_DRIVER;
242 }
243 drmFreeVersion(version);
244
245 device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
246 device->instance = instance;
247 assert(strlen(path) < ARRAY_SIZE(device->path));
248 strncpy(device->path, path, ARRAY_SIZE(device->path));
249
250 device->ws = radv_amdgpu_winsys_create(fd, instance->debug_flags,
251 instance->perftest_flags);
252 if (!device->ws) {
253 result = VK_ERROR_INCOMPATIBLE_DRIVER;
254 goto fail;
255 }
256
257 device->local_fd = fd;
258 device->ws->query_info(device->ws, &device->rad_info);
259
260 radv_handle_env_var_force_family(device);
261
262 radv_get_device_name(device->rad_info.family, device->name, sizeof(device->name));
263
264 if (radv_device_get_cache_uuid(device->rad_info.family, device->cache_uuid)) {
265 device->ws->destroy(device->ws);
266 result = vk_errorf(VK_ERROR_INITIALIZATION_FAILED,
267 "cannot generate UUID");
268 goto fail;
269 }
270
271 /* These flags affect shader compilation. */
272 uint64_t shader_env_flags =
273 (device->instance->perftest_flags & RADV_PERFTEST_SISCHED ? 0x1 : 0) |
274 (device->instance->debug_flags & RADV_DEBUG_UNSAFE_MATH ? 0x2 : 0);
275
276 /* The gpu id is already embeded in the uuid so we just pass "radv"
277 * when creating the cache.
278 */
279 char buf[VK_UUID_SIZE * 2 + 1];
280 disk_cache_format_hex_id(buf, device->cache_uuid, VK_UUID_SIZE * 2);
281 device->disk_cache = disk_cache_create(device->name, buf, shader_env_flags);
282
283 fprintf(stderr, "WARNING: radv is not a conformant vulkan implementation, testing use only.\n");
284
285 radv_get_driver_uuid(&device->device_uuid);
286 radv_get_device_uuid(&device->rad_info, &device->device_uuid);
287
288 if (device->rad_info.family == CHIP_STONEY ||
289 device->rad_info.chip_class >= GFX9) {
290 device->has_rbplus = true;
291 device->rbplus_allowed = device->rad_info.family == CHIP_STONEY;
292 }
293
294 /* The mere presense of CLEAR_STATE in the IB causes random GPU hangs
295 * on SI.
296 */
297 device->has_clear_state = device->rad_info.chip_class >= CIK;
298
299 device->cpdma_prefetch_writes_memory = device->rad_info.chip_class <= VI;
300
301 /* Vega10/Raven need a special workaround for a hardware bug. */
302 device->has_scissor_bug = device->rad_info.family == CHIP_VEGA10 ||
303 device->rad_info.family == CHIP_RAVEN;
304
305 radv_physical_device_init_mem_types(device);
306 radv_fill_device_extension_table(device, &device->supported_extensions);
307
308 result = radv_init_wsi(device);
309 if (result != VK_SUCCESS) {
310 device->ws->destroy(device->ws);
311 goto fail;
312 }
313
314 return VK_SUCCESS;
315
316 fail:
317 close(fd);
318 return result;
319 }
320
321 static void
322 radv_physical_device_finish(struct radv_physical_device *device)
323 {
324 radv_finish_wsi(device);
325 device->ws->destroy(device->ws);
326 disk_cache_destroy(device->disk_cache);
327 close(device->local_fd);
328 }
329
330 static void *
331 default_alloc_func(void *pUserData, size_t size, size_t align,
332 VkSystemAllocationScope allocationScope)
333 {
334 return malloc(size);
335 }
336
337 static void *
338 default_realloc_func(void *pUserData, void *pOriginal, size_t size,
339 size_t align, VkSystemAllocationScope allocationScope)
340 {
341 return realloc(pOriginal, size);
342 }
343
344 static void
345 default_free_func(void *pUserData, void *pMemory)
346 {
347 free(pMemory);
348 }
349
350 static const VkAllocationCallbacks default_alloc = {
351 .pUserData = NULL,
352 .pfnAllocation = default_alloc_func,
353 .pfnReallocation = default_realloc_func,
354 .pfnFree = default_free_func,
355 };
356
357 static const struct debug_control radv_debug_options[] = {
358 {"nofastclears", RADV_DEBUG_NO_FAST_CLEARS},
359 {"nodcc", RADV_DEBUG_NO_DCC},
360 {"shaders", RADV_DEBUG_DUMP_SHADERS},
361 {"nocache", RADV_DEBUG_NO_CACHE},
362 {"shaderstats", RADV_DEBUG_DUMP_SHADER_STATS},
363 {"nohiz", RADV_DEBUG_NO_HIZ},
364 {"nocompute", RADV_DEBUG_NO_COMPUTE_QUEUE},
365 {"unsafemath", RADV_DEBUG_UNSAFE_MATH},
366 {"allbos", RADV_DEBUG_ALL_BOS},
367 {"noibs", RADV_DEBUG_NO_IBS},
368 {"spirv", RADV_DEBUG_DUMP_SPIRV},
369 {"vmfaults", RADV_DEBUG_VM_FAULTS},
370 {"zerovram", RADV_DEBUG_ZERO_VRAM},
371 {"syncshaders", RADV_DEBUG_SYNC_SHADERS},
372 {"nosisched", RADV_DEBUG_NO_SISCHED},
373 {"preoptir", RADV_DEBUG_PREOPTIR},
374 {NULL, 0}
375 };
376
377 const char *
378 radv_get_debug_option_name(int id)
379 {
380 assert(id < ARRAY_SIZE(radv_debug_options) - 1);
381 return radv_debug_options[id].string;
382 }
383
384 static const struct debug_control radv_perftest_options[] = {
385 {"nobatchchain", RADV_PERFTEST_NO_BATCHCHAIN},
386 {"sisched", RADV_PERFTEST_SISCHED},
387 {"localbos", RADV_PERFTEST_LOCAL_BOS},
388 {"binning", RADV_PERFTEST_BINNING},
389 {NULL, 0}
390 };
391
392 const char *
393 radv_get_perftest_option_name(int id)
394 {
395 assert(id < ARRAY_SIZE(radv_debug_options) - 1);
396 return radv_perftest_options[id].string;
397 }
398
399 static void
400 radv_handle_per_app_options(struct radv_instance *instance,
401 const VkApplicationInfo *info)
402 {
403 const char *name = info ? info->pApplicationName : NULL;
404
405 if (!name)
406 return;
407
408 if (!strcmp(name, "Talos - Linux - 32bit") ||
409 !strcmp(name, "Talos - Linux - 64bit")) {
410 /* Force enable LLVM sisched for Talos because it looks safe
411 * and it gives few more FPS.
412 */
413 instance->perftest_flags |= RADV_PERFTEST_SISCHED;
414 }
415 }
416
417 static int radv_get_instance_extension_index(const char *name)
418 {
419 for (unsigned i = 0; i < RADV_INSTANCE_EXTENSION_COUNT; ++i) {
420 if (strcmp(name, radv_instance_extensions[i].extensionName) == 0)
421 return i;
422 }
423 return -1;
424 }
425
426
427 VkResult radv_CreateInstance(
428 const VkInstanceCreateInfo* pCreateInfo,
429 const VkAllocationCallbacks* pAllocator,
430 VkInstance* pInstance)
431 {
432 struct radv_instance *instance;
433 VkResult result;
434
435 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO);
436
437 uint32_t client_version;
438 if (pCreateInfo->pApplicationInfo &&
439 pCreateInfo->pApplicationInfo->apiVersion != 0) {
440 client_version = pCreateInfo->pApplicationInfo->apiVersion;
441 } else {
442 client_version = VK_MAKE_VERSION(1, 0, 0);
443 }
444
445 if (VK_MAKE_VERSION(1, 0, 0) > client_version ||
446 client_version > VK_MAKE_VERSION(1, 1, 0xfff)) {
447 return vk_errorf(VK_ERROR_INCOMPATIBLE_DRIVER,
448 "Client requested version %d.%d.%d",
449 VK_VERSION_MAJOR(client_version),
450 VK_VERSION_MINOR(client_version),
451 VK_VERSION_PATCH(client_version));
452 }
453
454 instance = vk_zalloc2(&default_alloc, pAllocator, sizeof(*instance), 8,
455 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
456 if (!instance)
457 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
458
459 instance->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
460
461 if (pAllocator)
462 instance->alloc = *pAllocator;
463 else
464 instance->alloc = default_alloc;
465
466 instance->apiVersion = client_version;
467 instance->physicalDeviceCount = -1;
468
469 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
470 const char *ext_name = pCreateInfo->ppEnabledExtensionNames[i];
471 int index = radv_get_instance_extension_index(ext_name);
472
473 if (index < 0 || !radv_supported_instance_extensions.extensions[index]) {
474 vk_free2(&default_alloc, pAllocator, instance);
475 return vk_error(VK_ERROR_EXTENSION_NOT_PRESENT);
476 }
477
478 instance->enabled_extensions.extensions[index] = true;
479 }
480
481 result = vk_debug_report_instance_init(&instance->debug_report_callbacks);
482 if (result != VK_SUCCESS) {
483 vk_free2(&default_alloc, pAllocator, instance);
484 return vk_error(result);
485 }
486
487 _mesa_locale_init();
488
489 VG(VALGRIND_CREATE_MEMPOOL(instance, 0, false));
490
491 instance->debug_flags = parse_debug_string(getenv("RADV_DEBUG"),
492 radv_debug_options);
493
494 instance->perftest_flags = parse_debug_string(getenv("RADV_PERFTEST"),
495 radv_perftest_options);
496
497 radv_handle_per_app_options(instance, pCreateInfo->pApplicationInfo);
498
499 if (instance->debug_flags & RADV_DEBUG_NO_SISCHED) {
500 /* Disable sisched when the user requests it, this is mostly
501 * useful when the driver force-enable sisched for the given
502 * application.
503 */
504 instance->perftest_flags &= ~RADV_PERFTEST_SISCHED;
505 }
506
507 *pInstance = radv_instance_to_handle(instance);
508
509 return VK_SUCCESS;
510 }
511
512 void radv_DestroyInstance(
513 VkInstance _instance,
514 const VkAllocationCallbacks* pAllocator)
515 {
516 RADV_FROM_HANDLE(radv_instance, instance, _instance);
517
518 if (!instance)
519 return;
520
521 for (int i = 0; i < instance->physicalDeviceCount; ++i) {
522 radv_physical_device_finish(instance->physicalDevices + i);
523 }
524
525 VG(VALGRIND_DESTROY_MEMPOOL(instance));
526
527 _mesa_locale_fini();
528
529 vk_debug_report_instance_destroy(&instance->debug_report_callbacks);
530
531 vk_free(&instance->alloc, instance);
532 }
533
534 static VkResult
535 radv_enumerate_devices(struct radv_instance *instance)
536 {
537 /* TODO: Check for more devices ? */
538 drmDevicePtr devices[8];
539 VkResult result = VK_ERROR_INCOMPATIBLE_DRIVER;
540 int max_devices;
541
542 instance->physicalDeviceCount = 0;
543
544 max_devices = drmGetDevices2(0, devices, ARRAY_SIZE(devices));
545 if (max_devices < 1)
546 return vk_error(VK_ERROR_INCOMPATIBLE_DRIVER);
547
548 for (unsigned i = 0; i < (unsigned)max_devices; i++) {
549 if (devices[i]->available_nodes & 1 << DRM_NODE_RENDER &&
550 devices[i]->bustype == DRM_BUS_PCI &&
551 devices[i]->deviceinfo.pci->vendor_id == ATI_VENDOR_ID) {
552
553 result = radv_physical_device_init(instance->physicalDevices +
554 instance->physicalDeviceCount,
555 instance,
556 devices[i]);
557 if (result == VK_SUCCESS)
558 ++instance->physicalDeviceCount;
559 else if (result != VK_ERROR_INCOMPATIBLE_DRIVER)
560 break;
561 }
562 }
563 drmFreeDevices(devices, max_devices);
564
565 return result;
566 }
567
568 VkResult radv_EnumeratePhysicalDevices(
569 VkInstance _instance,
570 uint32_t* pPhysicalDeviceCount,
571 VkPhysicalDevice* pPhysicalDevices)
572 {
573 RADV_FROM_HANDLE(radv_instance, instance, _instance);
574 VkResult result;
575
576 if (instance->physicalDeviceCount < 0) {
577 result = radv_enumerate_devices(instance);
578 if (result != VK_SUCCESS &&
579 result != VK_ERROR_INCOMPATIBLE_DRIVER)
580 return result;
581 }
582
583 if (!pPhysicalDevices) {
584 *pPhysicalDeviceCount = instance->physicalDeviceCount;
585 } else {
586 *pPhysicalDeviceCount = MIN2(*pPhysicalDeviceCount, instance->physicalDeviceCount);
587 for (unsigned i = 0; i < *pPhysicalDeviceCount; ++i)
588 pPhysicalDevices[i] = radv_physical_device_to_handle(instance->physicalDevices + i);
589 }
590
591 return *pPhysicalDeviceCount < instance->physicalDeviceCount ? VK_INCOMPLETE
592 : VK_SUCCESS;
593 }
594
595 void radv_GetPhysicalDeviceFeatures(
596 VkPhysicalDevice physicalDevice,
597 VkPhysicalDeviceFeatures* pFeatures)
598 {
599 memset(pFeatures, 0, sizeof(*pFeatures));
600
601 *pFeatures = (VkPhysicalDeviceFeatures) {
602 .robustBufferAccess = true,
603 .fullDrawIndexUint32 = true,
604 .imageCubeArray = true,
605 .independentBlend = true,
606 .geometryShader = true,
607 .tessellationShader = true,
608 .sampleRateShading = true,
609 .dualSrcBlend = true,
610 .logicOp = true,
611 .multiDrawIndirect = true,
612 .drawIndirectFirstInstance = true,
613 .depthClamp = true,
614 .depthBiasClamp = true,
615 .fillModeNonSolid = true,
616 .depthBounds = true,
617 .wideLines = true,
618 .largePoints = true,
619 .alphaToOne = true,
620 .multiViewport = true,
621 .samplerAnisotropy = true,
622 .textureCompressionETC2 = false,
623 .textureCompressionASTC_LDR = false,
624 .textureCompressionBC = true,
625 .occlusionQueryPrecise = true,
626 .pipelineStatisticsQuery = true,
627 .vertexPipelineStoresAndAtomics = true,
628 .fragmentStoresAndAtomics = true,
629 .shaderTessellationAndGeometryPointSize = true,
630 .shaderImageGatherExtended = true,
631 .shaderStorageImageExtendedFormats = true,
632 .shaderStorageImageMultisample = false,
633 .shaderUniformBufferArrayDynamicIndexing = true,
634 .shaderSampledImageArrayDynamicIndexing = true,
635 .shaderStorageBufferArrayDynamicIndexing = true,
636 .shaderStorageImageArrayDynamicIndexing = true,
637 .shaderStorageImageReadWithoutFormat = true,
638 .shaderStorageImageWriteWithoutFormat = true,
639 .shaderClipDistance = true,
640 .shaderCullDistance = true,
641 .shaderFloat64 = true,
642 .shaderInt64 = true,
643 .shaderInt16 = false,
644 .sparseBinding = true,
645 .variableMultisampleRate = true,
646 .inheritedQueries = true,
647 };
648 }
649
650 void radv_GetPhysicalDeviceFeatures2(
651 VkPhysicalDevice physicalDevice,
652 VkPhysicalDeviceFeatures2KHR *pFeatures)
653 {
654 vk_foreach_struct(ext, pFeatures->pNext) {
655 switch (ext->sType) {
656 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTER_FEATURES_KHR: {
657 VkPhysicalDeviceVariablePointerFeaturesKHR *features = (void *)ext;
658 features->variablePointersStorageBuffer = true;
659 features->variablePointers = false;
660 break;
661 }
662 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES_KHR: {
663 VkPhysicalDeviceMultiviewFeaturesKHR *features = (VkPhysicalDeviceMultiviewFeaturesKHR*)ext;
664 features->multiview = true;
665 features->multiviewGeometryShader = true;
666 features->multiviewTessellationShader = true;
667 break;
668 }
669 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETER_FEATURES: {
670 VkPhysicalDeviceShaderDrawParameterFeatures *features =
671 (VkPhysicalDeviceShaderDrawParameterFeatures*)ext;
672 features->shaderDrawParameters = true;
673 break;
674 }
675 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES: {
676 VkPhysicalDeviceProtectedMemoryFeatures *features =
677 (VkPhysicalDeviceProtectedMemoryFeatures*)ext;
678 features->protectedMemory = false;
679 break;
680 }
681 default:
682 break;
683 }
684 }
685 return radv_GetPhysicalDeviceFeatures(physicalDevice, &pFeatures->features);
686 }
687
688 void radv_GetPhysicalDeviceProperties(
689 VkPhysicalDevice physicalDevice,
690 VkPhysicalDeviceProperties* pProperties)
691 {
692 RADV_FROM_HANDLE(radv_physical_device, pdevice, physicalDevice);
693 VkSampleCountFlags sample_counts = 0xf;
694
695 /* make sure that the entire descriptor set is addressable with a signed
696 * 32-bit int. So the sum of all limits scaled by descriptor size has to
697 * be at most 2 GiB. the combined image & samples object count as one of
698 * both. This limit is for the pipeline layout, not for the set layout, but
699 * there is no set limit, so we just set a pipeline limit. I don't think
700 * any app is going to hit this soon. */
701 size_t max_descriptor_set_size = ((1ull << 31) - 16 * MAX_DYNAMIC_BUFFERS) /
702 (32 /* uniform buffer, 32 due to potential space wasted on alignment */ +
703 32 /* storage buffer, 32 due to potential space wasted on alignment */ +
704 32 /* sampler, largest when combined with image */ +
705 64 /* sampled image */ +
706 64 /* storage image */);
707
708 VkPhysicalDeviceLimits limits = {
709 .maxImageDimension1D = (1 << 14),
710 .maxImageDimension2D = (1 << 14),
711 .maxImageDimension3D = (1 << 11),
712 .maxImageDimensionCube = (1 << 14),
713 .maxImageArrayLayers = (1 << 11),
714 .maxTexelBufferElements = 128 * 1024 * 1024,
715 .maxUniformBufferRange = UINT32_MAX,
716 .maxStorageBufferRange = UINT32_MAX,
717 .maxPushConstantsSize = MAX_PUSH_CONSTANTS_SIZE,
718 .maxMemoryAllocationCount = UINT32_MAX,
719 .maxSamplerAllocationCount = 64 * 1024,
720 .bufferImageGranularity = 64, /* A cache line */
721 .sparseAddressSpaceSize = 0xffffffffu, /* buffer max size */
722 .maxBoundDescriptorSets = MAX_SETS,
723 .maxPerStageDescriptorSamplers = max_descriptor_set_size,
724 .maxPerStageDescriptorUniformBuffers = max_descriptor_set_size,
725 .maxPerStageDescriptorStorageBuffers = max_descriptor_set_size,
726 .maxPerStageDescriptorSampledImages = max_descriptor_set_size,
727 .maxPerStageDescriptorStorageImages = max_descriptor_set_size,
728 .maxPerStageDescriptorInputAttachments = max_descriptor_set_size,
729 .maxPerStageResources = max_descriptor_set_size,
730 .maxDescriptorSetSamplers = max_descriptor_set_size,
731 .maxDescriptorSetUniformBuffers = max_descriptor_set_size,
732 .maxDescriptorSetUniformBuffersDynamic = MAX_DYNAMIC_BUFFERS / 2,
733 .maxDescriptorSetStorageBuffers = max_descriptor_set_size,
734 .maxDescriptorSetStorageBuffersDynamic = MAX_DYNAMIC_BUFFERS / 2,
735 .maxDescriptorSetSampledImages = max_descriptor_set_size,
736 .maxDescriptorSetStorageImages = max_descriptor_set_size,
737 .maxDescriptorSetInputAttachments = max_descriptor_set_size,
738 .maxVertexInputAttributes = 32,
739 .maxVertexInputBindings = 32,
740 .maxVertexInputAttributeOffset = 2047,
741 .maxVertexInputBindingStride = 2048,
742 .maxVertexOutputComponents = 128,
743 .maxTessellationGenerationLevel = 64,
744 .maxTessellationPatchSize = 32,
745 .maxTessellationControlPerVertexInputComponents = 128,
746 .maxTessellationControlPerVertexOutputComponents = 128,
747 .maxTessellationControlPerPatchOutputComponents = 120,
748 .maxTessellationControlTotalOutputComponents = 4096,
749 .maxTessellationEvaluationInputComponents = 128,
750 .maxTessellationEvaluationOutputComponents = 128,
751 .maxGeometryShaderInvocations = 127,
752 .maxGeometryInputComponents = 64,
753 .maxGeometryOutputComponents = 128,
754 .maxGeometryOutputVertices = 256,
755 .maxGeometryTotalOutputComponents = 1024,
756 .maxFragmentInputComponents = 128,
757 .maxFragmentOutputAttachments = 8,
758 .maxFragmentDualSrcAttachments = 1,
759 .maxFragmentCombinedOutputResources = 8,
760 .maxComputeSharedMemorySize = 32768,
761 .maxComputeWorkGroupCount = { 65535, 65535, 65535 },
762 .maxComputeWorkGroupInvocations = 2048,
763 .maxComputeWorkGroupSize = {
764 2048,
765 2048,
766 2048
767 },
768 .subPixelPrecisionBits = 4 /* FIXME */,
769 .subTexelPrecisionBits = 4 /* FIXME */,
770 .mipmapPrecisionBits = 4 /* FIXME */,
771 .maxDrawIndexedIndexValue = UINT32_MAX,
772 .maxDrawIndirectCount = UINT32_MAX,
773 .maxSamplerLodBias = 16,
774 .maxSamplerAnisotropy = 16,
775 .maxViewports = MAX_VIEWPORTS,
776 .maxViewportDimensions = { (1 << 14), (1 << 14) },
777 .viewportBoundsRange = { INT16_MIN, INT16_MAX },
778 .viewportSubPixelBits = 13, /* We take a float? */
779 .minMemoryMapAlignment = 4096, /* A page */
780 .minTexelBufferOffsetAlignment = 1,
781 .minUniformBufferOffsetAlignment = 4,
782 .minStorageBufferOffsetAlignment = 4,
783 .minTexelOffset = -32,
784 .maxTexelOffset = 31,
785 .minTexelGatherOffset = -32,
786 .maxTexelGatherOffset = 31,
787 .minInterpolationOffset = -2,
788 .maxInterpolationOffset = 2,
789 .subPixelInterpolationOffsetBits = 8,
790 .maxFramebufferWidth = (1 << 14),
791 .maxFramebufferHeight = (1 << 14),
792 .maxFramebufferLayers = (1 << 10),
793 .framebufferColorSampleCounts = sample_counts,
794 .framebufferDepthSampleCounts = sample_counts,
795 .framebufferStencilSampleCounts = sample_counts,
796 .framebufferNoAttachmentsSampleCounts = sample_counts,
797 .maxColorAttachments = MAX_RTS,
798 .sampledImageColorSampleCounts = sample_counts,
799 .sampledImageIntegerSampleCounts = VK_SAMPLE_COUNT_1_BIT,
800 .sampledImageDepthSampleCounts = sample_counts,
801 .sampledImageStencilSampleCounts = sample_counts,
802 .storageImageSampleCounts = VK_SAMPLE_COUNT_1_BIT,
803 .maxSampleMaskWords = 1,
804 .timestampComputeAndGraphics = true,
805 .timestampPeriod = 1000000.0 / pdevice->rad_info.clock_crystal_freq,
806 .maxClipDistances = 8,
807 .maxCullDistances = 8,
808 .maxCombinedClipAndCullDistances = 8,
809 .discreteQueuePriorities = 1,
810 .pointSizeRange = { 0.125, 255.875 },
811 .lineWidthRange = { 0.0, 7.9921875 },
812 .pointSizeGranularity = (1.0 / 8.0),
813 .lineWidthGranularity = (1.0 / 128.0),
814 .strictLines = false, /* FINISHME */
815 .standardSampleLocations = true,
816 .optimalBufferCopyOffsetAlignment = 128,
817 .optimalBufferCopyRowPitchAlignment = 128,
818 .nonCoherentAtomSize = 64,
819 };
820
821 *pProperties = (VkPhysicalDeviceProperties) {
822 .apiVersion = radv_physical_device_api_version(pdevice),
823 .driverVersion = vk_get_driver_version(),
824 .vendorID = ATI_VENDOR_ID,
825 .deviceID = pdevice->rad_info.pci_id,
826 .deviceType = pdevice->rad_info.has_dedicated_vram ? VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU : VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU,
827 .limits = limits,
828 .sparseProperties = {0},
829 };
830
831 strcpy(pProperties->deviceName, pdevice->name);
832 memcpy(pProperties->pipelineCacheUUID, pdevice->cache_uuid, VK_UUID_SIZE);
833 }
834
835 void radv_GetPhysicalDeviceProperties2(
836 VkPhysicalDevice physicalDevice,
837 VkPhysicalDeviceProperties2KHR *pProperties)
838 {
839 RADV_FROM_HANDLE(radv_physical_device, pdevice, physicalDevice);
840 radv_GetPhysicalDeviceProperties(physicalDevice, &pProperties->properties);
841
842 vk_foreach_struct(ext, pProperties->pNext) {
843 switch (ext->sType) {
844 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR: {
845 VkPhysicalDevicePushDescriptorPropertiesKHR *properties =
846 (VkPhysicalDevicePushDescriptorPropertiesKHR *) ext;
847 properties->maxPushDescriptors = MAX_PUSH_DESCRIPTORS;
848 break;
849 }
850 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES_KHR: {
851 VkPhysicalDeviceIDPropertiesKHR *properties = (VkPhysicalDeviceIDPropertiesKHR*)ext;
852 memcpy(properties->driverUUID, pdevice->driver_uuid, VK_UUID_SIZE);
853 memcpy(properties->deviceUUID, pdevice->device_uuid, VK_UUID_SIZE);
854 properties->deviceLUIDValid = false;
855 break;
856 }
857 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES_KHR: {
858 VkPhysicalDeviceMultiviewPropertiesKHR *properties = (VkPhysicalDeviceMultiviewPropertiesKHR*)ext;
859 properties->maxMultiviewViewCount = MAX_VIEWS;
860 properties->maxMultiviewInstanceIndex = INT_MAX;
861 break;
862 }
863 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES_KHR: {
864 VkPhysicalDevicePointClippingPropertiesKHR *properties =
865 (VkPhysicalDevicePointClippingPropertiesKHR*)ext;
866 properties->pointClippingBehavior = VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES_KHR;
867 break;
868 }
869 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DISCARD_RECTANGLE_PROPERTIES_EXT: {
870 VkPhysicalDeviceDiscardRectanglePropertiesEXT *properties =
871 (VkPhysicalDeviceDiscardRectanglePropertiesEXT*)ext;
872 properties->maxDiscardRectangles = MAX_DISCARD_RECTANGLES;
873 break;
874 }
875 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_MEMORY_HOST_PROPERTIES_EXT: {
876 VkPhysicalDeviceExternalMemoryHostPropertiesEXT *properties =
877 (VkPhysicalDeviceExternalMemoryHostPropertiesEXT *) ext;
878 properties->minImportedHostPointerAlignment = 4096;
879 break;
880 }
881 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_PROPERTIES: {
882 VkPhysicalDeviceSubgroupProperties *properties =
883 (VkPhysicalDeviceSubgroupProperties*)ext;
884 properties->subgroupSize = 64;
885 properties->supportedStages = VK_SHADER_STAGE_ALL;
886 properties->supportedOperations = VK_SUBGROUP_FEATURE_BASIC_BIT;
887 properties->quadOperationsInAllStages = false;
888 break;
889 }
890 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES: {
891 VkPhysicalDeviceMaintenance3Properties *properties =
892 (VkPhysicalDeviceMaintenance3Properties*)ext;
893 /* Make sure everything is addressable by a signed 32-bit int, and
894 * our largest descriptors are 96 bytes. */
895 properties->maxPerSetDescriptors = (1ull << 31) / 96;
896 /* Our buffer size fields allow only this much */
897 properties->maxMemoryAllocationSize = 0xFFFFFFFFull;
898 break;
899 }
900 default:
901 break;
902 }
903 }
904 }
905
906 static void radv_get_physical_device_queue_family_properties(
907 struct radv_physical_device* pdevice,
908 uint32_t* pCount,
909 VkQueueFamilyProperties** pQueueFamilyProperties)
910 {
911 int num_queue_families = 1;
912 int idx;
913 if (pdevice->rad_info.num_compute_rings > 0 &&
914 !(pdevice->instance->debug_flags & RADV_DEBUG_NO_COMPUTE_QUEUE))
915 num_queue_families++;
916
917 if (pQueueFamilyProperties == NULL) {
918 *pCount = num_queue_families;
919 return;
920 }
921
922 if (!*pCount)
923 return;
924
925 idx = 0;
926 if (*pCount >= 1) {
927 *pQueueFamilyProperties[idx] = (VkQueueFamilyProperties) {
928 .queueFlags = VK_QUEUE_GRAPHICS_BIT |
929 VK_QUEUE_COMPUTE_BIT |
930 VK_QUEUE_TRANSFER_BIT |
931 VK_QUEUE_SPARSE_BINDING_BIT,
932 .queueCount = 1,
933 .timestampValidBits = 64,
934 .minImageTransferGranularity = (VkExtent3D) { 1, 1, 1 },
935 };
936 idx++;
937 }
938
939 if (pdevice->rad_info.num_compute_rings > 0 &&
940 !(pdevice->instance->debug_flags & RADV_DEBUG_NO_COMPUTE_QUEUE)) {
941 if (*pCount > idx) {
942 *pQueueFamilyProperties[idx] = (VkQueueFamilyProperties) {
943 .queueFlags = VK_QUEUE_COMPUTE_BIT |
944 VK_QUEUE_TRANSFER_BIT |
945 VK_QUEUE_SPARSE_BINDING_BIT,
946 .queueCount = pdevice->rad_info.num_compute_rings,
947 .timestampValidBits = 64,
948 .minImageTransferGranularity = (VkExtent3D) { 1, 1, 1 },
949 };
950 idx++;
951 }
952 }
953 *pCount = idx;
954 }
955
956 void radv_GetPhysicalDeviceQueueFamilyProperties(
957 VkPhysicalDevice physicalDevice,
958 uint32_t* pCount,
959 VkQueueFamilyProperties* pQueueFamilyProperties)
960 {
961 RADV_FROM_HANDLE(radv_physical_device, pdevice, physicalDevice);
962 if (!pQueueFamilyProperties) {
963 return radv_get_physical_device_queue_family_properties(pdevice, pCount, NULL);
964 return;
965 }
966 VkQueueFamilyProperties *properties[] = {
967 pQueueFamilyProperties + 0,
968 pQueueFamilyProperties + 1,
969 pQueueFamilyProperties + 2,
970 };
971 radv_get_physical_device_queue_family_properties(pdevice, pCount, properties);
972 assert(*pCount <= 3);
973 }
974
975 void radv_GetPhysicalDeviceQueueFamilyProperties2(
976 VkPhysicalDevice physicalDevice,
977 uint32_t* pCount,
978 VkQueueFamilyProperties2KHR *pQueueFamilyProperties)
979 {
980 RADV_FROM_HANDLE(radv_physical_device, pdevice, physicalDevice);
981 if (!pQueueFamilyProperties) {
982 return radv_get_physical_device_queue_family_properties(pdevice, pCount, NULL);
983 return;
984 }
985 VkQueueFamilyProperties *properties[] = {
986 &pQueueFamilyProperties[0].queueFamilyProperties,
987 &pQueueFamilyProperties[1].queueFamilyProperties,
988 &pQueueFamilyProperties[2].queueFamilyProperties,
989 };
990 radv_get_physical_device_queue_family_properties(pdevice, pCount, properties);
991 assert(*pCount <= 3);
992 }
993
994 void radv_GetPhysicalDeviceMemoryProperties(
995 VkPhysicalDevice physicalDevice,
996 VkPhysicalDeviceMemoryProperties *pMemoryProperties)
997 {
998 RADV_FROM_HANDLE(radv_physical_device, physical_device, physicalDevice);
999
1000 *pMemoryProperties = physical_device->memory_properties;
1001 }
1002
1003 void radv_GetPhysicalDeviceMemoryProperties2(
1004 VkPhysicalDevice physicalDevice,
1005 VkPhysicalDeviceMemoryProperties2KHR *pMemoryProperties)
1006 {
1007 return radv_GetPhysicalDeviceMemoryProperties(physicalDevice,
1008 &pMemoryProperties->memoryProperties);
1009 }
1010
1011 VkResult radv_GetMemoryHostPointerPropertiesEXT(
1012 VkDevice _device,
1013 VkExternalMemoryHandleTypeFlagBitsKHR handleType,
1014 const void *pHostPointer,
1015 VkMemoryHostPointerPropertiesEXT *pMemoryHostPointerProperties)
1016 {
1017 RADV_FROM_HANDLE(radv_device, device, _device);
1018
1019 switch (handleType)
1020 {
1021 case VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT: {
1022 const struct radv_physical_device *physical_device = device->physical_device;
1023 uint32_t memoryTypeBits = 0;
1024 for (int i = 0; i < physical_device->memory_properties.memoryTypeCount; i++) {
1025 if (physical_device->mem_type_indices[i] == RADV_MEM_TYPE_GTT_CACHED) {
1026 memoryTypeBits = (1 << i);
1027 break;
1028 }
1029 }
1030 pMemoryHostPointerProperties->memoryTypeBits = memoryTypeBits;
1031 return VK_SUCCESS;
1032 }
1033 default:
1034 return VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR;
1035 }
1036 }
1037
1038 static enum radeon_ctx_priority
1039 radv_get_queue_global_priority(const VkDeviceQueueGlobalPriorityCreateInfoEXT *pObj)
1040 {
1041 /* Default to MEDIUM when a specific global priority isn't requested */
1042 if (!pObj)
1043 return RADEON_CTX_PRIORITY_MEDIUM;
1044
1045 switch(pObj->globalPriority) {
1046 case VK_QUEUE_GLOBAL_PRIORITY_REALTIME_EXT:
1047 return RADEON_CTX_PRIORITY_REALTIME;
1048 case VK_QUEUE_GLOBAL_PRIORITY_HIGH_EXT:
1049 return RADEON_CTX_PRIORITY_HIGH;
1050 case VK_QUEUE_GLOBAL_PRIORITY_MEDIUM_EXT:
1051 return RADEON_CTX_PRIORITY_MEDIUM;
1052 case VK_QUEUE_GLOBAL_PRIORITY_LOW_EXT:
1053 return RADEON_CTX_PRIORITY_LOW;
1054 default:
1055 unreachable("Illegal global priority value");
1056 return RADEON_CTX_PRIORITY_INVALID;
1057 }
1058 }
1059
1060 static int
1061 radv_queue_init(struct radv_device *device, struct radv_queue *queue,
1062 uint32_t queue_family_index, int idx,
1063 const VkDeviceQueueGlobalPriorityCreateInfoEXT *global_priority)
1064 {
1065 queue->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
1066 queue->device = device;
1067 queue->queue_family_index = queue_family_index;
1068 queue->queue_idx = idx;
1069 queue->priority = radv_get_queue_global_priority(global_priority);
1070
1071 queue->hw_ctx = device->ws->ctx_create(device->ws, queue->priority);
1072 if (!queue->hw_ctx)
1073 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1074
1075 return VK_SUCCESS;
1076 }
1077
1078 static void
1079 radv_queue_finish(struct radv_queue *queue)
1080 {
1081 if (queue->hw_ctx)
1082 queue->device->ws->ctx_destroy(queue->hw_ctx);
1083
1084 if (queue->initial_full_flush_preamble_cs)
1085 queue->device->ws->cs_destroy(queue->initial_full_flush_preamble_cs);
1086 if (queue->initial_preamble_cs)
1087 queue->device->ws->cs_destroy(queue->initial_preamble_cs);
1088 if (queue->continue_preamble_cs)
1089 queue->device->ws->cs_destroy(queue->continue_preamble_cs);
1090 if (queue->descriptor_bo)
1091 queue->device->ws->buffer_destroy(queue->descriptor_bo);
1092 if (queue->scratch_bo)
1093 queue->device->ws->buffer_destroy(queue->scratch_bo);
1094 if (queue->esgs_ring_bo)
1095 queue->device->ws->buffer_destroy(queue->esgs_ring_bo);
1096 if (queue->gsvs_ring_bo)
1097 queue->device->ws->buffer_destroy(queue->gsvs_ring_bo);
1098 if (queue->tess_rings_bo)
1099 queue->device->ws->buffer_destroy(queue->tess_rings_bo);
1100 if (queue->compute_scratch_bo)
1101 queue->device->ws->buffer_destroy(queue->compute_scratch_bo);
1102 }
1103
1104 static void
1105 radv_device_init_gs_info(struct radv_device *device)
1106 {
1107 switch (device->physical_device->rad_info.family) {
1108 case CHIP_OLAND:
1109 case CHIP_HAINAN:
1110 case CHIP_KAVERI:
1111 case CHIP_KABINI:
1112 case CHIP_MULLINS:
1113 case CHIP_ICELAND:
1114 case CHIP_CARRIZO:
1115 case CHIP_STONEY:
1116 device->gs_table_depth = 16;
1117 return;
1118 case CHIP_TAHITI:
1119 case CHIP_PITCAIRN:
1120 case CHIP_VERDE:
1121 case CHIP_BONAIRE:
1122 case CHIP_HAWAII:
1123 case CHIP_TONGA:
1124 case CHIP_FIJI:
1125 case CHIP_POLARIS10:
1126 case CHIP_POLARIS11:
1127 case CHIP_POLARIS12:
1128 case CHIP_VEGA10:
1129 case CHIP_RAVEN:
1130 device->gs_table_depth = 32;
1131 return;
1132 default:
1133 unreachable("unknown GPU");
1134 }
1135 }
1136
1137 static int radv_get_device_extension_index(const char *name)
1138 {
1139 for (unsigned i = 0; i < RADV_DEVICE_EXTENSION_COUNT; ++i) {
1140 if (strcmp(name, radv_device_extensions[i].extensionName) == 0)
1141 return i;
1142 }
1143 return -1;
1144 }
1145
1146 VkResult radv_CreateDevice(
1147 VkPhysicalDevice physicalDevice,
1148 const VkDeviceCreateInfo* pCreateInfo,
1149 const VkAllocationCallbacks* pAllocator,
1150 VkDevice* pDevice)
1151 {
1152 RADV_FROM_HANDLE(radv_physical_device, physical_device, physicalDevice);
1153 VkResult result;
1154 struct radv_device *device;
1155
1156 bool keep_shader_info = false;
1157
1158 /* Check enabled features */
1159 if (pCreateInfo->pEnabledFeatures) {
1160 VkPhysicalDeviceFeatures supported_features;
1161 radv_GetPhysicalDeviceFeatures(physicalDevice, &supported_features);
1162 VkBool32 *supported_feature = (VkBool32 *)&supported_features;
1163 VkBool32 *enabled_feature = (VkBool32 *)pCreateInfo->pEnabledFeatures;
1164 unsigned num_features = sizeof(VkPhysicalDeviceFeatures) / sizeof(VkBool32);
1165 for (uint32_t i = 0; i < num_features; i++) {
1166 if (enabled_feature[i] && !supported_feature[i])
1167 return vk_error(VK_ERROR_FEATURE_NOT_PRESENT);
1168 }
1169 }
1170
1171 device = vk_zalloc2(&physical_device->instance->alloc, pAllocator,
1172 sizeof(*device), 8,
1173 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
1174 if (!device)
1175 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1176
1177 device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
1178 device->instance = physical_device->instance;
1179 device->physical_device = physical_device;
1180
1181 device->ws = physical_device->ws;
1182 if (pAllocator)
1183 device->alloc = *pAllocator;
1184 else
1185 device->alloc = physical_device->instance->alloc;
1186
1187 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
1188 const char *ext_name = pCreateInfo->ppEnabledExtensionNames[i];
1189 int index = radv_get_device_extension_index(ext_name);
1190 if (index < 0 || !physical_device->supported_extensions.extensions[index]) {
1191 vk_free(&device->alloc, device);
1192 return vk_error(VK_ERROR_EXTENSION_NOT_PRESENT);
1193 }
1194
1195 device->enabled_extensions.extensions[index] = true;
1196 }
1197
1198 keep_shader_info = device->enabled_extensions.AMD_shader_info;
1199
1200 mtx_init(&device->shader_slab_mutex, mtx_plain);
1201 list_inithead(&device->shader_slabs);
1202
1203 for (unsigned i = 0; i < pCreateInfo->queueCreateInfoCount; i++) {
1204 const VkDeviceQueueCreateInfo *queue_create = &pCreateInfo->pQueueCreateInfos[i];
1205 uint32_t qfi = queue_create->queueFamilyIndex;
1206 const VkDeviceQueueGlobalPriorityCreateInfoEXT *global_priority =
1207 vk_find_struct_const(queue_create->pNext, DEVICE_QUEUE_GLOBAL_PRIORITY_CREATE_INFO_EXT);
1208
1209 assert(!global_priority || device->physical_device->rad_info.has_ctx_priority);
1210
1211 device->queues[qfi] = vk_alloc(&device->alloc,
1212 queue_create->queueCount * sizeof(struct radv_queue), 8, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
1213 if (!device->queues[qfi]) {
1214 result = VK_ERROR_OUT_OF_HOST_MEMORY;
1215 goto fail;
1216 }
1217
1218 memset(device->queues[qfi], 0, queue_create->queueCount * sizeof(struct radv_queue));
1219
1220 device->queue_count[qfi] = queue_create->queueCount;
1221
1222 for (unsigned q = 0; q < queue_create->queueCount; q++) {
1223 result = radv_queue_init(device, &device->queues[qfi][q], qfi, q, global_priority);
1224 if (result != VK_SUCCESS)
1225 goto fail;
1226 }
1227 }
1228
1229 device->pbb_allowed = device->physical_device->rad_info.chip_class >= GFX9 &&
1230 (device->instance->perftest_flags & RADV_PERFTEST_BINNING);
1231
1232 /* Disabled and not implemented for now. */
1233 device->dfsm_allowed = device->pbb_allowed && false;
1234
1235 #ifdef ANDROID
1236 device->always_use_syncobj = device->physical_device->rad_info.has_syncobj_wait_for_submit;
1237 #endif
1238
1239 device->llvm_supports_spill = true;
1240
1241 /* The maximum number of scratch waves. Scratch space isn't divided
1242 * evenly between CUs. The number is only a function of the number of CUs.
1243 * We can decrease the constant to decrease the scratch buffer size.
1244 *
1245 * sctx->scratch_waves must be >= the maximum posible size of
1246 * 1 threadgroup, so that the hw doesn't hang from being unable
1247 * to start any.
1248 *
1249 * The recommended value is 4 per CU at most. Higher numbers don't
1250 * bring much benefit, but they still occupy chip resources (think
1251 * async compute). I've seen ~2% performance difference between 4 and 32.
1252 */
1253 uint32_t max_threads_per_block = 2048;
1254 device->scratch_waves = MAX2(32 * physical_device->rad_info.num_good_compute_units,
1255 max_threads_per_block / 64);
1256
1257 device->dispatch_initiator = S_00B800_COMPUTE_SHADER_EN(1) |
1258 S_00B800_FORCE_START_AT_000(1);
1259
1260 if (device->physical_device->rad_info.chip_class >= CIK) {
1261 /* If the KMD allows it (there is a KMD hw register for it),
1262 * allow launching waves out-of-order.
1263 */
1264 device->dispatch_initiator |= S_00B800_ORDER_MODE(1);
1265 }
1266
1267 radv_device_init_gs_info(device);
1268
1269 device->tess_offchip_block_dw_size =
1270 device->physical_device->rad_info.family == CHIP_HAWAII ? 4096 : 8192;
1271 device->has_distributed_tess =
1272 device->physical_device->rad_info.chip_class >= VI &&
1273 device->physical_device->rad_info.max_se >= 2;
1274
1275 if (getenv("RADV_TRACE_FILE")) {
1276 keep_shader_info = true;
1277
1278 if (!radv_init_trace(device))
1279 goto fail;
1280 }
1281
1282 device->keep_shader_info = keep_shader_info;
1283
1284 result = radv_device_init_meta(device);
1285 if (result != VK_SUCCESS)
1286 goto fail;
1287
1288 radv_device_init_msaa(device);
1289
1290 for (int family = 0; family < RADV_MAX_QUEUE_FAMILIES; ++family) {
1291 device->empty_cs[family] = device->ws->cs_create(device->ws, family);
1292 switch (family) {
1293 case RADV_QUEUE_GENERAL:
1294 radeon_emit(device->empty_cs[family], PKT3(PKT3_CONTEXT_CONTROL, 1, 0));
1295 radeon_emit(device->empty_cs[family], CONTEXT_CONTROL_LOAD_ENABLE(1));
1296 radeon_emit(device->empty_cs[family], CONTEXT_CONTROL_SHADOW_ENABLE(1));
1297 break;
1298 case RADV_QUEUE_COMPUTE:
1299 radeon_emit(device->empty_cs[family], PKT3(PKT3_NOP, 0, 0));
1300 radeon_emit(device->empty_cs[family], 0);
1301 break;
1302 }
1303 device->ws->cs_finalize(device->empty_cs[family]);
1304 }
1305
1306 if (device->physical_device->rad_info.chip_class >= CIK)
1307 cik_create_gfx_config(device);
1308
1309 VkPipelineCacheCreateInfo ci;
1310 ci.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
1311 ci.pNext = NULL;
1312 ci.flags = 0;
1313 ci.pInitialData = NULL;
1314 ci.initialDataSize = 0;
1315 VkPipelineCache pc;
1316 result = radv_CreatePipelineCache(radv_device_to_handle(device),
1317 &ci, NULL, &pc);
1318 if (result != VK_SUCCESS)
1319 goto fail_meta;
1320
1321 device->mem_cache = radv_pipeline_cache_from_handle(pc);
1322
1323 *pDevice = radv_device_to_handle(device);
1324 return VK_SUCCESS;
1325
1326 fail_meta:
1327 radv_device_finish_meta(device);
1328 fail:
1329 if (device->trace_bo)
1330 device->ws->buffer_destroy(device->trace_bo);
1331
1332 if (device->gfx_init)
1333 device->ws->buffer_destroy(device->gfx_init);
1334
1335 for (unsigned i = 0; i < RADV_MAX_QUEUE_FAMILIES; i++) {
1336 for (unsigned q = 0; q < device->queue_count[i]; q++)
1337 radv_queue_finish(&device->queues[i][q]);
1338 if (device->queue_count[i])
1339 vk_free(&device->alloc, device->queues[i]);
1340 }
1341
1342 vk_free(&device->alloc, device);
1343 return result;
1344 }
1345
1346 void radv_DestroyDevice(
1347 VkDevice _device,
1348 const VkAllocationCallbacks* pAllocator)
1349 {
1350 RADV_FROM_HANDLE(radv_device, device, _device);
1351
1352 if (!device)
1353 return;
1354
1355 if (device->trace_bo)
1356 device->ws->buffer_destroy(device->trace_bo);
1357
1358 if (device->gfx_init)
1359 device->ws->buffer_destroy(device->gfx_init);
1360
1361 for (unsigned i = 0; i < RADV_MAX_QUEUE_FAMILIES; i++) {
1362 for (unsigned q = 0; q < device->queue_count[i]; q++)
1363 radv_queue_finish(&device->queues[i][q]);
1364 if (device->queue_count[i])
1365 vk_free(&device->alloc, device->queues[i]);
1366 if (device->empty_cs[i])
1367 device->ws->cs_destroy(device->empty_cs[i]);
1368 }
1369 radv_device_finish_meta(device);
1370
1371 VkPipelineCache pc = radv_pipeline_cache_to_handle(device->mem_cache);
1372 radv_DestroyPipelineCache(radv_device_to_handle(device), pc, NULL);
1373
1374 radv_destroy_shader_slabs(device);
1375
1376 vk_free(&device->alloc, device);
1377 }
1378
1379 VkResult radv_EnumerateInstanceLayerProperties(
1380 uint32_t* pPropertyCount,
1381 VkLayerProperties* pProperties)
1382 {
1383 if (pProperties == NULL) {
1384 *pPropertyCount = 0;
1385 return VK_SUCCESS;
1386 }
1387
1388 /* None supported at this time */
1389 return vk_error(VK_ERROR_LAYER_NOT_PRESENT);
1390 }
1391
1392 VkResult radv_EnumerateDeviceLayerProperties(
1393 VkPhysicalDevice physicalDevice,
1394 uint32_t* pPropertyCount,
1395 VkLayerProperties* pProperties)
1396 {
1397 if (pProperties == NULL) {
1398 *pPropertyCount = 0;
1399 return VK_SUCCESS;
1400 }
1401
1402 /* None supported at this time */
1403 return vk_error(VK_ERROR_LAYER_NOT_PRESENT);
1404 }
1405
1406 void radv_GetDeviceQueue(
1407 VkDevice _device,
1408 uint32_t queueFamilyIndex,
1409 uint32_t queueIndex,
1410 VkQueue* pQueue)
1411 {
1412 RADV_FROM_HANDLE(radv_device, device, _device);
1413
1414 *pQueue = radv_queue_to_handle(&device->queues[queueFamilyIndex][queueIndex]);
1415 }
1416
1417 static void
1418 fill_geom_tess_rings(struct radv_queue *queue,
1419 uint32_t *map,
1420 bool add_sample_positions,
1421 uint32_t esgs_ring_size,
1422 struct radeon_winsys_bo *esgs_ring_bo,
1423 uint32_t gsvs_ring_size,
1424 struct radeon_winsys_bo *gsvs_ring_bo,
1425 uint32_t tess_factor_ring_size,
1426 uint32_t tess_offchip_ring_offset,
1427 uint32_t tess_offchip_ring_size,
1428 struct radeon_winsys_bo *tess_rings_bo)
1429 {
1430 uint64_t esgs_va = 0, gsvs_va = 0;
1431 uint64_t tess_va = 0, tess_offchip_va = 0;
1432 uint32_t *desc = &map[4];
1433
1434 if (esgs_ring_bo)
1435 esgs_va = radv_buffer_get_va(esgs_ring_bo);
1436 if (gsvs_ring_bo)
1437 gsvs_va = radv_buffer_get_va(gsvs_ring_bo);
1438 if (tess_rings_bo) {
1439 tess_va = radv_buffer_get_va(tess_rings_bo);
1440 tess_offchip_va = tess_va + tess_offchip_ring_offset;
1441 }
1442
1443 /* stride 0, num records - size, add tid, swizzle, elsize4,
1444 index stride 64 */
1445 desc[0] = esgs_va;
1446 desc[1] = S_008F04_BASE_ADDRESS_HI(esgs_va >> 32) |
1447 S_008F04_STRIDE(0) |
1448 S_008F04_SWIZZLE_ENABLE(true);
1449 desc[2] = esgs_ring_size;
1450 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
1451 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
1452 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
1453 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
1454 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
1455 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32) |
1456 S_008F0C_ELEMENT_SIZE(1) |
1457 S_008F0C_INDEX_STRIDE(3) |
1458 S_008F0C_ADD_TID_ENABLE(true);
1459
1460 desc += 4;
1461 /* GS entry for ES->GS ring */
1462 /* stride 0, num records - size, elsize0,
1463 index stride 0 */
1464 desc[0] = esgs_va;
1465 desc[1] = S_008F04_BASE_ADDRESS_HI(esgs_va >> 32)|
1466 S_008F04_STRIDE(0) |
1467 S_008F04_SWIZZLE_ENABLE(false);
1468 desc[2] = esgs_ring_size;
1469 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
1470 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
1471 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
1472 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
1473 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
1474 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32) |
1475 S_008F0C_ELEMENT_SIZE(0) |
1476 S_008F0C_INDEX_STRIDE(0) |
1477 S_008F0C_ADD_TID_ENABLE(false);
1478
1479 desc += 4;
1480 /* VS entry for GS->VS ring */
1481 /* stride 0, num records - size, elsize0,
1482 index stride 0 */
1483 desc[0] = gsvs_va;
1484 desc[1] = S_008F04_BASE_ADDRESS_HI(gsvs_va >> 32)|
1485 S_008F04_STRIDE(0) |
1486 S_008F04_SWIZZLE_ENABLE(false);
1487 desc[2] = gsvs_ring_size;
1488 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
1489 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
1490 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
1491 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
1492 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
1493 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32) |
1494 S_008F0C_ELEMENT_SIZE(0) |
1495 S_008F0C_INDEX_STRIDE(0) |
1496 S_008F0C_ADD_TID_ENABLE(false);
1497 desc += 4;
1498
1499 /* stride gsvs_itemsize, num records 64
1500 elsize 4, index stride 16 */
1501 /* shader will patch stride and desc[2] */
1502 desc[0] = gsvs_va;
1503 desc[1] = S_008F04_BASE_ADDRESS_HI(gsvs_va >> 32)|
1504 S_008F04_STRIDE(0) |
1505 S_008F04_SWIZZLE_ENABLE(true);
1506 desc[2] = 0;
1507 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
1508 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
1509 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
1510 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
1511 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
1512 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32) |
1513 S_008F0C_ELEMENT_SIZE(1) |
1514 S_008F0C_INDEX_STRIDE(1) |
1515 S_008F0C_ADD_TID_ENABLE(true);
1516 desc += 4;
1517
1518 desc[0] = tess_va;
1519 desc[1] = S_008F04_BASE_ADDRESS_HI(tess_va >> 32) |
1520 S_008F04_STRIDE(0) |
1521 S_008F04_SWIZZLE_ENABLE(false);
1522 desc[2] = tess_factor_ring_size;
1523 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
1524 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
1525 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
1526 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
1527 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
1528 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32) |
1529 S_008F0C_ELEMENT_SIZE(0) |
1530 S_008F0C_INDEX_STRIDE(0) |
1531 S_008F0C_ADD_TID_ENABLE(false);
1532 desc += 4;
1533
1534 desc[0] = tess_offchip_va;
1535 desc[1] = S_008F04_BASE_ADDRESS_HI(tess_offchip_va >> 32) |
1536 S_008F04_STRIDE(0) |
1537 S_008F04_SWIZZLE_ENABLE(false);
1538 desc[2] = tess_offchip_ring_size;
1539 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
1540 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
1541 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
1542 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
1543 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
1544 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32) |
1545 S_008F0C_ELEMENT_SIZE(0) |
1546 S_008F0C_INDEX_STRIDE(0) |
1547 S_008F0C_ADD_TID_ENABLE(false);
1548 desc += 4;
1549
1550 /* add sample positions after all rings */
1551 memcpy(desc, queue->device->sample_locations_1x, 8);
1552 desc += 2;
1553 memcpy(desc, queue->device->sample_locations_2x, 16);
1554 desc += 4;
1555 memcpy(desc, queue->device->sample_locations_4x, 32);
1556 desc += 8;
1557 memcpy(desc, queue->device->sample_locations_8x, 64);
1558 desc += 16;
1559 memcpy(desc, queue->device->sample_locations_16x, 128);
1560 }
1561
1562 static unsigned
1563 radv_get_hs_offchip_param(struct radv_device *device, uint32_t *max_offchip_buffers_p)
1564 {
1565 bool double_offchip_buffers = device->physical_device->rad_info.chip_class >= CIK &&
1566 device->physical_device->rad_info.family != CHIP_CARRIZO &&
1567 device->physical_device->rad_info.family != CHIP_STONEY;
1568 unsigned max_offchip_buffers_per_se = double_offchip_buffers ? 128 : 64;
1569 unsigned max_offchip_buffers = max_offchip_buffers_per_se *
1570 device->physical_device->rad_info.max_se;
1571 unsigned offchip_granularity;
1572 unsigned hs_offchip_param;
1573 switch (device->tess_offchip_block_dw_size) {
1574 default:
1575 assert(0);
1576 /* fall through */
1577 case 8192:
1578 offchip_granularity = V_03093C_X_8K_DWORDS;
1579 break;
1580 case 4096:
1581 offchip_granularity = V_03093C_X_4K_DWORDS;
1582 break;
1583 }
1584
1585 switch (device->physical_device->rad_info.chip_class) {
1586 case SI:
1587 max_offchip_buffers = MIN2(max_offchip_buffers, 126);
1588 break;
1589 case CIK:
1590 case VI:
1591 case GFX9:
1592 default:
1593 max_offchip_buffers = MIN2(max_offchip_buffers, 508);
1594 break;
1595 }
1596
1597 *max_offchip_buffers_p = max_offchip_buffers;
1598 if (device->physical_device->rad_info.chip_class >= CIK) {
1599 if (device->physical_device->rad_info.chip_class >= VI)
1600 --max_offchip_buffers;
1601 hs_offchip_param =
1602 S_03093C_OFFCHIP_BUFFERING(max_offchip_buffers) |
1603 S_03093C_OFFCHIP_GRANULARITY(offchip_granularity);
1604 } else {
1605 hs_offchip_param =
1606 S_0089B0_OFFCHIP_BUFFERING(max_offchip_buffers);
1607 }
1608 return hs_offchip_param;
1609 }
1610
1611 static VkResult
1612 radv_get_preamble_cs(struct radv_queue *queue,
1613 uint32_t scratch_size,
1614 uint32_t compute_scratch_size,
1615 uint32_t esgs_ring_size,
1616 uint32_t gsvs_ring_size,
1617 bool needs_tess_rings,
1618 bool needs_sample_positions,
1619 struct radeon_winsys_cs **initial_full_flush_preamble_cs,
1620 struct radeon_winsys_cs **initial_preamble_cs,
1621 struct radeon_winsys_cs **continue_preamble_cs)
1622 {
1623 struct radeon_winsys_bo *scratch_bo = NULL;
1624 struct radeon_winsys_bo *descriptor_bo = NULL;
1625 struct radeon_winsys_bo *compute_scratch_bo = NULL;
1626 struct radeon_winsys_bo *esgs_ring_bo = NULL;
1627 struct radeon_winsys_bo *gsvs_ring_bo = NULL;
1628 struct radeon_winsys_bo *tess_rings_bo = NULL;
1629 struct radeon_winsys_cs *dest_cs[3] = {0};
1630 bool add_tess_rings = false, add_sample_positions = false;
1631 unsigned tess_factor_ring_size = 0, tess_offchip_ring_size = 0;
1632 unsigned max_offchip_buffers;
1633 unsigned hs_offchip_param = 0;
1634 unsigned tess_offchip_ring_offset;
1635 uint32_t ring_bo_flags = RADEON_FLAG_NO_CPU_ACCESS | RADEON_FLAG_NO_INTERPROCESS_SHARING;
1636 if (!queue->has_tess_rings) {
1637 if (needs_tess_rings)
1638 add_tess_rings = true;
1639 }
1640 if (!queue->has_sample_positions) {
1641 if (needs_sample_positions)
1642 add_sample_positions = true;
1643 }
1644 tess_factor_ring_size = 32768 * queue->device->physical_device->rad_info.max_se;
1645 hs_offchip_param = radv_get_hs_offchip_param(queue->device,
1646 &max_offchip_buffers);
1647 tess_offchip_ring_offset = align(tess_factor_ring_size, 64 * 1024);
1648 tess_offchip_ring_size = max_offchip_buffers *
1649 queue->device->tess_offchip_block_dw_size * 4;
1650
1651 if (scratch_size <= queue->scratch_size &&
1652 compute_scratch_size <= queue->compute_scratch_size &&
1653 esgs_ring_size <= queue->esgs_ring_size &&
1654 gsvs_ring_size <= queue->gsvs_ring_size &&
1655 !add_tess_rings && !add_sample_positions &&
1656 queue->initial_preamble_cs) {
1657 *initial_full_flush_preamble_cs = queue->initial_full_flush_preamble_cs;
1658 *initial_preamble_cs = queue->initial_preamble_cs;
1659 *continue_preamble_cs = queue->continue_preamble_cs;
1660 if (!scratch_size && !compute_scratch_size && !esgs_ring_size && !gsvs_ring_size)
1661 *continue_preamble_cs = NULL;
1662 return VK_SUCCESS;
1663 }
1664
1665 if (scratch_size > queue->scratch_size) {
1666 scratch_bo = queue->device->ws->buffer_create(queue->device->ws,
1667 scratch_size,
1668 4096,
1669 RADEON_DOMAIN_VRAM,
1670 ring_bo_flags);
1671 if (!scratch_bo)
1672 goto fail;
1673 } else
1674 scratch_bo = queue->scratch_bo;
1675
1676 if (compute_scratch_size > queue->compute_scratch_size) {
1677 compute_scratch_bo = queue->device->ws->buffer_create(queue->device->ws,
1678 compute_scratch_size,
1679 4096,
1680 RADEON_DOMAIN_VRAM,
1681 ring_bo_flags);
1682 if (!compute_scratch_bo)
1683 goto fail;
1684
1685 } else
1686 compute_scratch_bo = queue->compute_scratch_bo;
1687
1688 if (esgs_ring_size > queue->esgs_ring_size) {
1689 esgs_ring_bo = queue->device->ws->buffer_create(queue->device->ws,
1690 esgs_ring_size,
1691 4096,
1692 RADEON_DOMAIN_VRAM,
1693 ring_bo_flags);
1694 if (!esgs_ring_bo)
1695 goto fail;
1696 } else {
1697 esgs_ring_bo = queue->esgs_ring_bo;
1698 esgs_ring_size = queue->esgs_ring_size;
1699 }
1700
1701 if (gsvs_ring_size > queue->gsvs_ring_size) {
1702 gsvs_ring_bo = queue->device->ws->buffer_create(queue->device->ws,
1703 gsvs_ring_size,
1704 4096,
1705 RADEON_DOMAIN_VRAM,
1706 ring_bo_flags);
1707 if (!gsvs_ring_bo)
1708 goto fail;
1709 } else {
1710 gsvs_ring_bo = queue->gsvs_ring_bo;
1711 gsvs_ring_size = queue->gsvs_ring_size;
1712 }
1713
1714 if (add_tess_rings) {
1715 tess_rings_bo = queue->device->ws->buffer_create(queue->device->ws,
1716 tess_offchip_ring_offset + tess_offchip_ring_size,
1717 256,
1718 RADEON_DOMAIN_VRAM,
1719 ring_bo_flags);
1720 if (!tess_rings_bo)
1721 goto fail;
1722 } else {
1723 tess_rings_bo = queue->tess_rings_bo;
1724 }
1725
1726 if (scratch_bo != queue->scratch_bo ||
1727 esgs_ring_bo != queue->esgs_ring_bo ||
1728 gsvs_ring_bo != queue->gsvs_ring_bo ||
1729 tess_rings_bo != queue->tess_rings_bo ||
1730 add_sample_positions) {
1731 uint32_t size = 0;
1732 if (gsvs_ring_bo || esgs_ring_bo ||
1733 tess_rings_bo || add_sample_positions) {
1734 size = 112; /* 2 dword + 2 padding + 4 dword * 6 */
1735 if (add_sample_positions)
1736 size += 256; /* 32+16+8+4+2+1 samples * 4 * 2 = 248 bytes. */
1737 }
1738 else if (scratch_bo)
1739 size = 8; /* 2 dword */
1740
1741 descriptor_bo = queue->device->ws->buffer_create(queue->device->ws,
1742 size,
1743 4096,
1744 RADEON_DOMAIN_VRAM,
1745 RADEON_FLAG_CPU_ACCESS |
1746 RADEON_FLAG_NO_INTERPROCESS_SHARING |
1747 RADEON_FLAG_READ_ONLY);
1748 if (!descriptor_bo)
1749 goto fail;
1750 } else
1751 descriptor_bo = queue->descriptor_bo;
1752
1753 for(int i = 0; i < 3; ++i) {
1754 struct radeon_winsys_cs *cs = NULL;
1755 cs = queue->device->ws->cs_create(queue->device->ws,
1756 queue->queue_family_index ? RING_COMPUTE : RING_GFX);
1757 if (!cs)
1758 goto fail;
1759
1760 dest_cs[i] = cs;
1761
1762 if (scratch_bo)
1763 radv_cs_add_buffer(queue->device->ws, cs, scratch_bo, 8);
1764
1765 if (esgs_ring_bo)
1766 radv_cs_add_buffer(queue->device->ws, cs, esgs_ring_bo, 8);
1767
1768 if (gsvs_ring_bo)
1769 radv_cs_add_buffer(queue->device->ws, cs, gsvs_ring_bo, 8);
1770
1771 if (tess_rings_bo)
1772 radv_cs_add_buffer(queue->device->ws, cs, tess_rings_bo, 8);
1773
1774 if (descriptor_bo)
1775 radv_cs_add_buffer(queue->device->ws, cs, descriptor_bo, 8);
1776
1777 if (descriptor_bo != queue->descriptor_bo) {
1778 uint32_t *map = (uint32_t*)queue->device->ws->buffer_map(descriptor_bo);
1779
1780 if (scratch_bo) {
1781 uint64_t scratch_va = radv_buffer_get_va(scratch_bo);
1782 uint32_t rsrc1 = S_008F04_BASE_ADDRESS_HI(scratch_va >> 32) |
1783 S_008F04_SWIZZLE_ENABLE(1);
1784 map[0] = scratch_va;
1785 map[1] = rsrc1;
1786 }
1787
1788 if (esgs_ring_bo || gsvs_ring_bo || tess_rings_bo ||
1789 add_sample_positions)
1790 fill_geom_tess_rings(queue, map, add_sample_positions,
1791 esgs_ring_size, esgs_ring_bo,
1792 gsvs_ring_size, gsvs_ring_bo,
1793 tess_factor_ring_size,
1794 tess_offchip_ring_offset,
1795 tess_offchip_ring_size,
1796 tess_rings_bo);
1797
1798 queue->device->ws->buffer_unmap(descriptor_bo);
1799 }
1800
1801 if (esgs_ring_bo || gsvs_ring_bo || tess_rings_bo) {
1802 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1803 radeon_emit(cs, EVENT_TYPE(V_028A90_VS_PARTIAL_FLUSH) | EVENT_INDEX(4));
1804 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1805 radeon_emit(cs, EVENT_TYPE(V_028A90_VGT_FLUSH) | EVENT_INDEX(0));
1806 }
1807
1808 if (esgs_ring_bo || gsvs_ring_bo) {
1809 if (queue->device->physical_device->rad_info.chip_class >= CIK) {
1810 radeon_set_uconfig_reg_seq(cs, R_030900_VGT_ESGS_RING_SIZE, 2);
1811 radeon_emit(cs, esgs_ring_size >> 8);
1812 radeon_emit(cs, gsvs_ring_size >> 8);
1813 } else {
1814 radeon_set_config_reg_seq(cs, R_0088C8_VGT_ESGS_RING_SIZE, 2);
1815 radeon_emit(cs, esgs_ring_size >> 8);
1816 radeon_emit(cs, gsvs_ring_size >> 8);
1817 }
1818 }
1819
1820 if (tess_rings_bo) {
1821 uint64_t tf_va = radv_buffer_get_va(tess_rings_bo);
1822 if (queue->device->physical_device->rad_info.chip_class >= CIK) {
1823 radeon_set_uconfig_reg(cs, R_030938_VGT_TF_RING_SIZE,
1824 S_030938_SIZE(tess_factor_ring_size / 4));
1825 radeon_set_uconfig_reg(cs, R_030940_VGT_TF_MEMORY_BASE,
1826 tf_va >> 8);
1827 if (queue->device->physical_device->rad_info.chip_class >= GFX9) {
1828 radeon_set_uconfig_reg(cs, R_030944_VGT_TF_MEMORY_BASE_HI,
1829 tf_va >> 40);
1830 }
1831 radeon_set_uconfig_reg(cs, R_03093C_VGT_HS_OFFCHIP_PARAM, hs_offchip_param);
1832 } else {
1833 radeon_set_config_reg(cs, R_008988_VGT_TF_RING_SIZE,
1834 S_008988_SIZE(tess_factor_ring_size / 4));
1835 radeon_set_config_reg(cs, R_0089B8_VGT_TF_MEMORY_BASE,
1836 tf_va >> 8);
1837 radeon_set_config_reg(cs, R_0089B0_VGT_HS_OFFCHIP_PARAM,
1838 hs_offchip_param);
1839 }
1840 }
1841
1842 if (descriptor_bo) {
1843 uint64_t va = radv_buffer_get_va(descriptor_bo);
1844 if (queue->device->physical_device->rad_info.chip_class >= GFX9) {
1845 uint32_t regs[] = {R_00B030_SPI_SHADER_USER_DATA_PS_0,
1846 R_00B130_SPI_SHADER_USER_DATA_VS_0,
1847 R_00B208_SPI_SHADER_USER_DATA_ADDR_LO_GS,
1848 R_00B408_SPI_SHADER_USER_DATA_ADDR_LO_HS};
1849
1850 for (int i = 0; i < ARRAY_SIZE(regs); ++i) {
1851 radeon_set_sh_reg_seq(cs, regs[i], 2);
1852 radeon_emit(cs, va);
1853 radeon_emit(cs, va >> 32);
1854 }
1855 } else {
1856 uint32_t regs[] = {R_00B030_SPI_SHADER_USER_DATA_PS_0,
1857 R_00B130_SPI_SHADER_USER_DATA_VS_0,
1858 R_00B230_SPI_SHADER_USER_DATA_GS_0,
1859 R_00B330_SPI_SHADER_USER_DATA_ES_0,
1860 R_00B430_SPI_SHADER_USER_DATA_HS_0,
1861 R_00B530_SPI_SHADER_USER_DATA_LS_0};
1862
1863 for (int i = 0; i < ARRAY_SIZE(regs); ++i) {
1864 radeon_set_sh_reg_seq(cs, regs[i], 2);
1865 radeon_emit(cs, va);
1866 radeon_emit(cs, va >> 32);
1867 }
1868 }
1869 }
1870
1871 if (compute_scratch_bo) {
1872 uint64_t scratch_va = radv_buffer_get_va(compute_scratch_bo);
1873 uint32_t rsrc1 = S_008F04_BASE_ADDRESS_HI(scratch_va >> 32) |
1874 S_008F04_SWIZZLE_ENABLE(1);
1875
1876 radv_cs_add_buffer(queue->device->ws, cs, compute_scratch_bo, 8);
1877
1878 radeon_set_sh_reg_seq(cs, R_00B900_COMPUTE_USER_DATA_0, 2);
1879 radeon_emit(cs, scratch_va);
1880 radeon_emit(cs, rsrc1);
1881 }
1882
1883 if (i == 0) {
1884 si_cs_emit_cache_flush(cs,
1885 queue->device->physical_device->rad_info.chip_class,
1886 NULL, 0,
1887 queue->queue_family_index == RING_COMPUTE &&
1888 queue->device->physical_device->rad_info.chip_class >= CIK,
1889 (queue->queue_family_index == RADV_QUEUE_COMPUTE ? RADV_CMD_FLAG_CS_PARTIAL_FLUSH : (RADV_CMD_FLAG_CS_PARTIAL_FLUSH | RADV_CMD_FLAG_PS_PARTIAL_FLUSH)) |
1890 RADV_CMD_FLAG_INV_ICACHE |
1891 RADV_CMD_FLAG_INV_SMEM_L1 |
1892 RADV_CMD_FLAG_INV_VMEM_L1 |
1893 RADV_CMD_FLAG_INV_GLOBAL_L2);
1894 } else if (i == 1) {
1895 si_cs_emit_cache_flush(cs,
1896 queue->device->physical_device->rad_info.chip_class,
1897 NULL, 0,
1898 queue->queue_family_index == RING_COMPUTE &&
1899 queue->device->physical_device->rad_info.chip_class >= CIK,
1900 RADV_CMD_FLAG_INV_ICACHE |
1901 RADV_CMD_FLAG_INV_SMEM_L1 |
1902 RADV_CMD_FLAG_INV_VMEM_L1 |
1903 RADV_CMD_FLAG_INV_GLOBAL_L2);
1904 }
1905
1906 if (!queue->device->ws->cs_finalize(cs))
1907 goto fail;
1908 }
1909
1910 if (queue->initial_full_flush_preamble_cs)
1911 queue->device->ws->cs_destroy(queue->initial_full_flush_preamble_cs);
1912
1913 if (queue->initial_preamble_cs)
1914 queue->device->ws->cs_destroy(queue->initial_preamble_cs);
1915
1916 if (queue->continue_preamble_cs)
1917 queue->device->ws->cs_destroy(queue->continue_preamble_cs);
1918
1919 queue->initial_full_flush_preamble_cs = dest_cs[0];
1920 queue->initial_preamble_cs = dest_cs[1];
1921 queue->continue_preamble_cs = dest_cs[2];
1922
1923 if (scratch_bo != queue->scratch_bo) {
1924 if (queue->scratch_bo)
1925 queue->device->ws->buffer_destroy(queue->scratch_bo);
1926 queue->scratch_bo = scratch_bo;
1927 queue->scratch_size = scratch_size;
1928 }
1929
1930 if (compute_scratch_bo != queue->compute_scratch_bo) {
1931 if (queue->compute_scratch_bo)
1932 queue->device->ws->buffer_destroy(queue->compute_scratch_bo);
1933 queue->compute_scratch_bo = compute_scratch_bo;
1934 queue->compute_scratch_size = compute_scratch_size;
1935 }
1936
1937 if (esgs_ring_bo != queue->esgs_ring_bo) {
1938 if (queue->esgs_ring_bo)
1939 queue->device->ws->buffer_destroy(queue->esgs_ring_bo);
1940 queue->esgs_ring_bo = esgs_ring_bo;
1941 queue->esgs_ring_size = esgs_ring_size;
1942 }
1943
1944 if (gsvs_ring_bo != queue->gsvs_ring_bo) {
1945 if (queue->gsvs_ring_bo)
1946 queue->device->ws->buffer_destroy(queue->gsvs_ring_bo);
1947 queue->gsvs_ring_bo = gsvs_ring_bo;
1948 queue->gsvs_ring_size = gsvs_ring_size;
1949 }
1950
1951 if (tess_rings_bo != queue->tess_rings_bo) {
1952 queue->tess_rings_bo = tess_rings_bo;
1953 queue->has_tess_rings = true;
1954 }
1955
1956 if (descriptor_bo != queue->descriptor_bo) {
1957 if (queue->descriptor_bo)
1958 queue->device->ws->buffer_destroy(queue->descriptor_bo);
1959
1960 queue->descriptor_bo = descriptor_bo;
1961 }
1962
1963 if (add_sample_positions)
1964 queue->has_sample_positions = true;
1965
1966 *initial_full_flush_preamble_cs = queue->initial_full_flush_preamble_cs;
1967 *initial_preamble_cs = queue->initial_preamble_cs;
1968 *continue_preamble_cs = queue->continue_preamble_cs;
1969 if (!scratch_size && !compute_scratch_size && !esgs_ring_size && !gsvs_ring_size)
1970 *continue_preamble_cs = NULL;
1971 return VK_SUCCESS;
1972 fail:
1973 for (int i = 0; i < ARRAY_SIZE(dest_cs); ++i)
1974 if (dest_cs[i])
1975 queue->device->ws->cs_destroy(dest_cs[i]);
1976 if (descriptor_bo && descriptor_bo != queue->descriptor_bo)
1977 queue->device->ws->buffer_destroy(descriptor_bo);
1978 if (scratch_bo && scratch_bo != queue->scratch_bo)
1979 queue->device->ws->buffer_destroy(scratch_bo);
1980 if (compute_scratch_bo && compute_scratch_bo != queue->compute_scratch_bo)
1981 queue->device->ws->buffer_destroy(compute_scratch_bo);
1982 if (esgs_ring_bo && esgs_ring_bo != queue->esgs_ring_bo)
1983 queue->device->ws->buffer_destroy(esgs_ring_bo);
1984 if (gsvs_ring_bo && gsvs_ring_bo != queue->gsvs_ring_bo)
1985 queue->device->ws->buffer_destroy(gsvs_ring_bo);
1986 if (tess_rings_bo && tess_rings_bo != queue->tess_rings_bo)
1987 queue->device->ws->buffer_destroy(tess_rings_bo);
1988 return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
1989 }
1990
1991 static VkResult radv_alloc_sem_counts(struct radv_winsys_sem_counts *counts,
1992 int num_sems,
1993 const VkSemaphore *sems,
1994 VkFence _fence,
1995 bool reset_temp)
1996 {
1997 int syncobj_idx = 0, sem_idx = 0;
1998
1999 if (num_sems == 0 && _fence == VK_NULL_HANDLE)
2000 return VK_SUCCESS;
2001
2002 for (uint32_t i = 0; i < num_sems; i++) {
2003 RADV_FROM_HANDLE(radv_semaphore, sem, sems[i]);
2004
2005 if (sem->temp_syncobj || sem->syncobj)
2006 counts->syncobj_count++;
2007 else
2008 counts->sem_count++;
2009 }
2010
2011 if (_fence != VK_NULL_HANDLE) {
2012 RADV_FROM_HANDLE(radv_fence, fence, _fence);
2013 if (fence->temp_syncobj || fence->syncobj)
2014 counts->syncobj_count++;
2015 }
2016
2017 if (counts->syncobj_count) {
2018 counts->syncobj = (uint32_t *)malloc(sizeof(uint32_t) * counts->syncobj_count);
2019 if (!counts->syncobj)
2020 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
2021 }
2022
2023 if (counts->sem_count) {
2024 counts->sem = (struct radeon_winsys_sem **)malloc(sizeof(struct radeon_winsys_sem *) * counts->sem_count);
2025 if (!counts->sem) {
2026 free(counts->syncobj);
2027 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
2028 }
2029 }
2030
2031 for (uint32_t i = 0; i < num_sems; i++) {
2032 RADV_FROM_HANDLE(radv_semaphore, sem, sems[i]);
2033
2034 if (sem->temp_syncobj) {
2035 counts->syncobj[syncobj_idx++] = sem->temp_syncobj;
2036 }
2037 else if (sem->syncobj)
2038 counts->syncobj[syncobj_idx++] = sem->syncobj;
2039 else {
2040 assert(sem->sem);
2041 counts->sem[sem_idx++] = sem->sem;
2042 }
2043 }
2044
2045 if (_fence != VK_NULL_HANDLE) {
2046 RADV_FROM_HANDLE(radv_fence, fence, _fence);
2047 if (fence->temp_syncobj)
2048 counts->syncobj[syncobj_idx++] = fence->temp_syncobj;
2049 else if (fence->syncobj)
2050 counts->syncobj[syncobj_idx++] = fence->syncobj;
2051 }
2052
2053 return VK_SUCCESS;
2054 }
2055
2056 void radv_free_sem_info(struct radv_winsys_sem_info *sem_info)
2057 {
2058 free(sem_info->wait.syncobj);
2059 free(sem_info->wait.sem);
2060 free(sem_info->signal.syncobj);
2061 free(sem_info->signal.sem);
2062 }
2063
2064
2065 static void radv_free_temp_syncobjs(struct radv_device *device,
2066 int num_sems,
2067 const VkSemaphore *sems)
2068 {
2069 for (uint32_t i = 0; i < num_sems; i++) {
2070 RADV_FROM_HANDLE(radv_semaphore, sem, sems[i]);
2071
2072 if (sem->temp_syncobj) {
2073 device->ws->destroy_syncobj(device->ws, sem->temp_syncobj);
2074 sem->temp_syncobj = 0;
2075 }
2076 }
2077 }
2078
2079 VkResult radv_alloc_sem_info(struct radv_winsys_sem_info *sem_info,
2080 int num_wait_sems,
2081 const VkSemaphore *wait_sems,
2082 int num_signal_sems,
2083 const VkSemaphore *signal_sems,
2084 VkFence fence)
2085 {
2086 VkResult ret;
2087 memset(sem_info, 0, sizeof(*sem_info));
2088
2089 ret = radv_alloc_sem_counts(&sem_info->wait, num_wait_sems, wait_sems, VK_NULL_HANDLE, true);
2090 if (ret)
2091 return ret;
2092 ret = radv_alloc_sem_counts(&sem_info->signal, num_signal_sems, signal_sems, fence, false);
2093 if (ret)
2094 radv_free_sem_info(sem_info);
2095
2096 /* caller can override these */
2097 sem_info->cs_emit_wait = true;
2098 sem_info->cs_emit_signal = true;
2099 return ret;
2100 }
2101
2102 /* Signals fence as soon as all the work currently put on queue is done. */
2103 static VkResult radv_signal_fence(struct radv_queue *queue,
2104 struct radv_fence *fence)
2105 {
2106 int ret;
2107 VkResult result;
2108 struct radv_winsys_sem_info sem_info;
2109
2110 result = radv_alloc_sem_info(&sem_info, 0, NULL, 0, NULL,
2111 radv_fence_to_handle(fence));
2112 if (result != VK_SUCCESS)
2113 return result;
2114
2115 ret = queue->device->ws->cs_submit(queue->hw_ctx, queue->queue_idx,
2116 &queue->device->empty_cs[queue->queue_family_index],
2117 1, NULL, NULL, &sem_info,
2118 false, fence->fence);
2119 radv_free_sem_info(&sem_info);
2120
2121 /* TODO: find a better error */
2122 if (ret)
2123 return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
2124
2125 return VK_SUCCESS;
2126 }
2127
2128 VkResult radv_QueueSubmit(
2129 VkQueue _queue,
2130 uint32_t submitCount,
2131 const VkSubmitInfo* pSubmits,
2132 VkFence _fence)
2133 {
2134 RADV_FROM_HANDLE(radv_queue, queue, _queue);
2135 RADV_FROM_HANDLE(radv_fence, fence, _fence);
2136 struct radeon_winsys_fence *base_fence = fence ? fence->fence : NULL;
2137 struct radeon_winsys_ctx *ctx = queue->hw_ctx;
2138 int ret;
2139 uint32_t max_cs_submission = queue->device->trace_bo ? 1 : UINT32_MAX;
2140 uint32_t scratch_size = 0;
2141 uint32_t compute_scratch_size = 0;
2142 uint32_t esgs_ring_size = 0, gsvs_ring_size = 0;
2143 struct radeon_winsys_cs *initial_preamble_cs = NULL, *initial_flush_preamble_cs = NULL, *continue_preamble_cs = NULL;
2144 VkResult result;
2145 bool fence_emitted = false;
2146 bool tess_rings_needed = false;
2147 bool sample_positions_needed = false;
2148
2149 /* Do this first so failing to allocate scratch buffers can't result in
2150 * partially executed submissions. */
2151 for (uint32_t i = 0; i < submitCount; i++) {
2152 for (uint32_t j = 0; j < pSubmits[i].commandBufferCount; j++) {
2153 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer,
2154 pSubmits[i].pCommandBuffers[j]);
2155
2156 scratch_size = MAX2(scratch_size, cmd_buffer->scratch_size_needed);
2157 compute_scratch_size = MAX2(compute_scratch_size,
2158 cmd_buffer->compute_scratch_size_needed);
2159 esgs_ring_size = MAX2(esgs_ring_size, cmd_buffer->esgs_ring_size_needed);
2160 gsvs_ring_size = MAX2(gsvs_ring_size, cmd_buffer->gsvs_ring_size_needed);
2161 tess_rings_needed |= cmd_buffer->tess_rings_needed;
2162 sample_positions_needed |= cmd_buffer->sample_positions_needed;
2163 }
2164 }
2165
2166 result = radv_get_preamble_cs(queue, scratch_size, compute_scratch_size,
2167 esgs_ring_size, gsvs_ring_size, tess_rings_needed,
2168 sample_positions_needed, &initial_flush_preamble_cs,
2169 &initial_preamble_cs, &continue_preamble_cs);
2170 if (result != VK_SUCCESS)
2171 return result;
2172
2173 for (uint32_t i = 0; i < submitCount; i++) {
2174 struct radeon_winsys_cs **cs_array;
2175 bool do_flush = !i || pSubmits[i].pWaitDstStageMask;
2176 bool can_patch = true;
2177 uint32_t advance;
2178 struct radv_winsys_sem_info sem_info;
2179
2180 result = radv_alloc_sem_info(&sem_info,
2181 pSubmits[i].waitSemaphoreCount,
2182 pSubmits[i].pWaitSemaphores,
2183 pSubmits[i].signalSemaphoreCount,
2184 pSubmits[i].pSignalSemaphores,
2185 _fence);
2186 if (result != VK_SUCCESS)
2187 return result;
2188
2189 if (!pSubmits[i].commandBufferCount) {
2190 if (pSubmits[i].waitSemaphoreCount || pSubmits[i].signalSemaphoreCount) {
2191 ret = queue->device->ws->cs_submit(ctx, queue->queue_idx,
2192 &queue->device->empty_cs[queue->queue_family_index],
2193 1, NULL, NULL,
2194 &sem_info,
2195 false, base_fence);
2196 if (ret) {
2197 radv_loge("failed to submit CS %d\n", i);
2198 abort();
2199 }
2200 fence_emitted = true;
2201 }
2202 radv_free_sem_info(&sem_info);
2203 continue;
2204 }
2205
2206 cs_array = malloc(sizeof(struct radeon_winsys_cs *) *
2207 (pSubmits[i].commandBufferCount));
2208
2209 for (uint32_t j = 0; j < pSubmits[i].commandBufferCount; j++) {
2210 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer,
2211 pSubmits[i].pCommandBuffers[j]);
2212 assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
2213
2214 cs_array[j] = cmd_buffer->cs;
2215 if ((cmd_buffer->usage_flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT))
2216 can_patch = false;
2217
2218 cmd_buffer->status = RADV_CMD_BUFFER_STATUS_PENDING;
2219 }
2220
2221 for (uint32_t j = 0; j < pSubmits[i].commandBufferCount; j += advance) {
2222 struct radeon_winsys_cs *initial_preamble = (do_flush && !j) ? initial_flush_preamble_cs : initial_preamble_cs;
2223 advance = MIN2(max_cs_submission,
2224 pSubmits[i].commandBufferCount - j);
2225
2226 if (queue->device->trace_bo)
2227 *queue->device->trace_id_ptr = 0;
2228
2229 sem_info.cs_emit_wait = j == 0;
2230 sem_info.cs_emit_signal = j + advance == pSubmits[i].commandBufferCount;
2231
2232 ret = queue->device->ws->cs_submit(ctx, queue->queue_idx, cs_array + j,
2233 advance, initial_preamble, continue_preamble_cs,
2234 &sem_info,
2235 can_patch, base_fence);
2236
2237 if (ret) {
2238 radv_loge("failed to submit CS %d\n", i);
2239 abort();
2240 }
2241 fence_emitted = true;
2242 if (queue->device->trace_bo) {
2243 radv_check_gpu_hangs(queue, cs_array[j]);
2244 }
2245 }
2246
2247 radv_free_temp_syncobjs(queue->device,
2248 pSubmits[i].waitSemaphoreCount,
2249 pSubmits[i].pWaitSemaphores);
2250 radv_free_sem_info(&sem_info);
2251 free(cs_array);
2252 }
2253
2254 if (fence) {
2255 if (!fence_emitted) {
2256 radv_signal_fence(queue, fence);
2257 }
2258 fence->submitted = true;
2259 }
2260
2261 return VK_SUCCESS;
2262 }
2263
2264 VkResult radv_QueueWaitIdle(
2265 VkQueue _queue)
2266 {
2267 RADV_FROM_HANDLE(radv_queue, queue, _queue);
2268
2269 queue->device->ws->ctx_wait_idle(queue->hw_ctx,
2270 radv_queue_family_to_ring(queue->queue_family_index),
2271 queue->queue_idx);
2272 return VK_SUCCESS;
2273 }
2274
2275 VkResult radv_DeviceWaitIdle(
2276 VkDevice _device)
2277 {
2278 RADV_FROM_HANDLE(radv_device, device, _device);
2279
2280 for (unsigned i = 0; i < RADV_MAX_QUEUE_FAMILIES; i++) {
2281 for (unsigned q = 0; q < device->queue_count[i]; q++) {
2282 radv_QueueWaitIdle(radv_queue_to_handle(&device->queues[i][q]));
2283 }
2284 }
2285 return VK_SUCCESS;
2286 }
2287
2288 VkResult radv_EnumerateInstanceExtensionProperties(
2289 const char* pLayerName,
2290 uint32_t* pPropertyCount,
2291 VkExtensionProperties* pProperties)
2292 {
2293 VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
2294
2295 for (int i = 0; i < RADV_INSTANCE_EXTENSION_COUNT; i++) {
2296 if (radv_supported_instance_extensions.extensions[i]) {
2297 vk_outarray_append(&out, prop) {
2298 *prop = radv_instance_extensions[i];
2299 }
2300 }
2301 }
2302
2303 return vk_outarray_status(&out);
2304 }
2305
2306 VkResult radv_EnumerateDeviceExtensionProperties(
2307 VkPhysicalDevice physicalDevice,
2308 const char* pLayerName,
2309 uint32_t* pPropertyCount,
2310 VkExtensionProperties* pProperties)
2311 {
2312 RADV_FROM_HANDLE(radv_physical_device, device, physicalDevice);
2313 VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
2314
2315 for (int i = 0; i < RADV_DEVICE_EXTENSION_COUNT; i++) {
2316 if (device->supported_extensions.extensions[i]) {
2317 vk_outarray_append(&out, prop) {
2318 *prop = radv_device_extensions[i];
2319 }
2320 }
2321 }
2322
2323 return vk_outarray_status(&out);
2324 }
2325
2326 PFN_vkVoidFunction radv_GetInstanceProcAddr(
2327 VkInstance _instance,
2328 const char* pName)
2329 {
2330 RADV_FROM_HANDLE(radv_instance, instance, _instance);
2331
2332 return radv_lookup_entrypoint_checked(pName,
2333 instance ? instance->apiVersion : 0,
2334 instance ? &instance->enabled_extensions : NULL,
2335 NULL);
2336 }
2337
2338 /* The loader wants us to expose a second GetInstanceProcAddr function
2339 * to work around certain LD_PRELOAD issues seen in apps.
2340 */
2341 PUBLIC
2342 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vk_icdGetInstanceProcAddr(
2343 VkInstance instance,
2344 const char* pName);
2345
2346 PUBLIC
2347 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vk_icdGetInstanceProcAddr(
2348 VkInstance instance,
2349 const char* pName)
2350 {
2351 return radv_GetInstanceProcAddr(instance, pName);
2352 }
2353
2354 PFN_vkVoidFunction radv_GetDeviceProcAddr(
2355 VkDevice _device,
2356 const char* pName)
2357 {
2358 RADV_FROM_HANDLE(radv_device, device, _device);
2359
2360 return radv_lookup_entrypoint_checked(pName,
2361 device->instance->apiVersion,
2362 &device->instance->enabled_extensions,
2363 &device->enabled_extensions);
2364 }
2365
2366 bool radv_get_memory_fd(struct radv_device *device,
2367 struct radv_device_memory *memory,
2368 int *pFD)
2369 {
2370 struct radeon_bo_metadata metadata;
2371
2372 if (memory->image) {
2373 radv_init_metadata(device, memory->image, &metadata);
2374 device->ws->buffer_set_metadata(memory->bo, &metadata);
2375 }
2376
2377 return device->ws->buffer_get_fd(device->ws, memory->bo,
2378 pFD);
2379 }
2380
2381 static VkResult radv_alloc_memory(struct radv_device *device,
2382 const VkMemoryAllocateInfo* pAllocateInfo,
2383 const VkAllocationCallbacks* pAllocator,
2384 VkDeviceMemory* pMem)
2385 {
2386 struct radv_device_memory *mem;
2387 VkResult result;
2388 enum radeon_bo_domain domain;
2389 uint32_t flags = 0;
2390 enum radv_mem_type mem_type_index = device->physical_device->mem_type_indices[pAllocateInfo->memoryTypeIndex];
2391
2392 assert(pAllocateInfo->sType == VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO);
2393
2394 if (pAllocateInfo->allocationSize == 0) {
2395 /* Apparently, this is allowed */
2396 *pMem = VK_NULL_HANDLE;
2397 return VK_SUCCESS;
2398 }
2399
2400 const VkImportMemoryFdInfoKHR *import_info =
2401 vk_find_struct_const(pAllocateInfo->pNext, IMPORT_MEMORY_FD_INFO_KHR);
2402 const VkMemoryDedicatedAllocateInfoKHR *dedicate_info =
2403 vk_find_struct_const(pAllocateInfo->pNext, MEMORY_DEDICATED_ALLOCATE_INFO_KHR);
2404 const VkExportMemoryAllocateInfoKHR *export_info =
2405 vk_find_struct_const(pAllocateInfo->pNext, EXPORT_MEMORY_ALLOCATE_INFO_KHR);
2406 const VkImportMemoryHostPointerInfoEXT *host_ptr_info =
2407 vk_find_struct_const(pAllocateInfo->pNext, IMPORT_MEMORY_HOST_POINTER_INFO_EXT);
2408
2409 const struct wsi_memory_allocate_info *wsi_info =
2410 vk_find_struct_const(pAllocateInfo->pNext, WSI_MEMORY_ALLOCATE_INFO_MESA);
2411
2412 mem = vk_alloc2(&device->alloc, pAllocator, sizeof(*mem), 8,
2413 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2414 if (mem == NULL)
2415 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
2416
2417 if (wsi_info && wsi_info->implicit_sync)
2418 flags |= RADEON_FLAG_IMPLICIT_SYNC;
2419
2420 if (dedicate_info) {
2421 mem->image = radv_image_from_handle(dedicate_info->image);
2422 mem->buffer = radv_buffer_from_handle(dedicate_info->buffer);
2423 } else {
2424 mem->image = NULL;
2425 mem->buffer = NULL;
2426 }
2427
2428 mem->user_ptr = NULL;
2429
2430 if (import_info) {
2431 assert(import_info->handleType ==
2432 VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR ||
2433 import_info->handleType ==
2434 VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
2435 mem->bo = device->ws->buffer_from_fd(device->ws, import_info->fd,
2436 NULL, NULL);
2437 if (!mem->bo) {
2438 result = VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR;
2439 goto fail;
2440 } else {
2441 close(import_info->fd);
2442 goto out_success;
2443 }
2444 }
2445
2446 if (host_ptr_info) {
2447 assert(host_ptr_info->handleType == VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT);
2448 assert(mem_type_index == RADV_MEM_TYPE_GTT_CACHED);
2449 mem->bo = device->ws->buffer_from_ptr(device->ws, host_ptr_info->pHostPointer,
2450 pAllocateInfo->allocationSize);
2451 if (!mem->bo) {
2452 result = VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR;
2453 goto fail;
2454 } else {
2455 mem->user_ptr = host_ptr_info->pHostPointer;
2456 goto out_success;
2457 }
2458 }
2459
2460 uint64_t alloc_size = align_u64(pAllocateInfo->allocationSize, 4096);
2461 if (mem_type_index == RADV_MEM_TYPE_GTT_WRITE_COMBINE ||
2462 mem_type_index == RADV_MEM_TYPE_GTT_CACHED)
2463 domain = RADEON_DOMAIN_GTT;
2464 else
2465 domain = RADEON_DOMAIN_VRAM;
2466
2467 if (mem_type_index == RADV_MEM_TYPE_VRAM)
2468 flags |= RADEON_FLAG_NO_CPU_ACCESS;
2469 else
2470 flags |= RADEON_FLAG_CPU_ACCESS;
2471
2472 if (mem_type_index == RADV_MEM_TYPE_GTT_WRITE_COMBINE)
2473 flags |= RADEON_FLAG_GTT_WC;
2474
2475 if (!dedicate_info && !import_info && (!export_info || !export_info->handleTypes))
2476 flags |= RADEON_FLAG_NO_INTERPROCESS_SHARING;
2477
2478 mem->bo = device->ws->buffer_create(device->ws, alloc_size, device->physical_device->rad_info.max_alignment,
2479 domain, flags);
2480
2481 if (!mem->bo) {
2482 result = VK_ERROR_OUT_OF_DEVICE_MEMORY;
2483 goto fail;
2484 }
2485 mem->type_index = mem_type_index;
2486 out_success:
2487 *pMem = radv_device_memory_to_handle(mem);
2488
2489 return VK_SUCCESS;
2490
2491 fail:
2492 vk_free2(&device->alloc, pAllocator, mem);
2493
2494 return result;
2495 }
2496
2497 VkResult radv_AllocateMemory(
2498 VkDevice _device,
2499 const VkMemoryAllocateInfo* pAllocateInfo,
2500 const VkAllocationCallbacks* pAllocator,
2501 VkDeviceMemory* pMem)
2502 {
2503 RADV_FROM_HANDLE(radv_device, device, _device);
2504 return radv_alloc_memory(device, pAllocateInfo, pAllocator, pMem);
2505 }
2506
2507 void radv_FreeMemory(
2508 VkDevice _device,
2509 VkDeviceMemory _mem,
2510 const VkAllocationCallbacks* pAllocator)
2511 {
2512 RADV_FROM_HANDLE(radv_device, device, _device);
2513 RADV_FROM_HANDLE(radv_device_memory, mem, _mem);
2514
2515 if (mem == NULL)
2516 return;
2517
2518 device->ws->buffer_destroy(mem->bo);
2519 mem->bo = NULL;
2520
2521 vk_free2(&device->alloc, pAllocator, mem);
2522 }
2523
2524 VkResult radv_MapMemory(
2525 VkDevice _device,
2526 VkDeviceMemory _memory,
2527 VkDeviceSize offset,
2528 VkDeviceSize size,
2529 VkMemoryMapFlags flags,
2530 void** ppData)
2531 {
2532 RADV_FROM_HANDLE(radv_device, device, _device);
2533 RADV_FROM_HANDLE(radv_device_memory, mem, _memory);
2534
2535 if (mem == NULL) {
2536 *ppData = NULL;
2537 return VK_SUCCESS;
2538 }
2539
2540 if (mem->user_ptr)
2541 *ppData = mem->user_ptr;
2542 else
2543 *ppData = device->ws->buffer_map(mem->bo);
2544
2545 if (*ppData) {
2546 *ppData += offset;
2547 return VK_SUCCESS;
2548 }
2549
2550 return vk_error(VK_ERROR_MEMORY_MAP_FAILED);
2551 }
2552
2553 void radv_UnmapMemory(
2554 VkDevice _device,
2555 VkDeviceMemory _memory)
2556 {
2557 RADV_FROM_HANDLE(radv_device, device, _device);
2558 RADV_FROM_HANDLE(radv_device_memory, mem, _memory);
2559
2560 if (mem == NULL)
2561 return;
2562
2563 if (mem->user_ptr == NULL)
2564 device->ws->buffer_unmap(mem->bo);
2565 }
2566
2567 VkResult radv_FlushMappedMemoryRanges(
2568 VkDevice _device,
2569 uint32_t memoryRangeCount,
2570 const VkMappedMemoryRange* pMemoryRanges)
2571 {
2572 return VK_SUCCESS;
2573 }
2574
2575 VkResult radv_InvalidateMappedMemoryRanges(
2576 VkDevice _device,
2577 uint32_t memoryRangeCount,
2578 const VkMappedMemoryRange* pMemoryRanges)
2579 {
2580 return VK_SUCCESS;
2581 }
2582
2583 void radv_GetBufferMemoryRequirements(
2584 VkDevice _device,
2585 VkBuffer _buffer,
2586 VkMemoryRequirements* pMemoryRequirements)
2587 {
2588 RADV_FROM_HANDLE(radv_device, device, _device);
2589 RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
2590
2591 pMemoryRequirements->memoryTypeBits = (1u << device->physical_device->memory_properties.memoryTypeCount) - 1;
2592
2593 if (buffer->flags & VK_BUFFER_CREATE_SPARSE_BINDING_BIT)
2594 pMemoryRequirements->alignment = 4096;
2595 else
2596 pMemoryRequirements->alignment = 16;
2597
2598 pMemoryRequirements->size = align64(buffer->size, pMemoryRequirements->alignment);
2599 }
2600
2601 void radv_GetBufferMemoryRequirements2(
2602 VkDevice device,
2603 const VkBufferMemoryRequirementsInfo2KHR* pInfo,
2604 VkMemoryRequirements2KHR* pMemoryRequirements)
2605 {
2606 radv_GetBufferMemoryRequirements(device, pInfo->buffer,
2607 &pMemoryRequirements->memoryRequirements);
2608 RADV_FROM_HANDLE(radv_buffer, buffer, pInfo->buffer);
2609 vk_foreach_struct(ext, pMemoryRequirements->pNext) {
2610 switch (ext->sType) {
2611 case VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR: {
2612 VkMemoryDedicatedRequirementsKHR *req =
2613 (VkMemoryDedicatedRequirementsKHR *) ext;
2614 req->requiresDedicatedAllocation = buffer->shareable;
2615 req->prefersDedicatedAllocation = req->requiresDedicatedAllocation;
2616 break;
2617 }
2618 default:
2619 break;
2620 }
2621 }
2622 }
2623
2624 void radv_GetImageMemoryRequirements(
2625 VkDevice _device,
2626 VkImage _image,
2627 VkMemoryRequirements* pMemoryRequirements)
2628 {
2629 RADV_FROM_HANDLE(radv_device, device, _device);
2630 RADV_FROM_HANDLE(radv_image, image, _image);
2631
2632 pMemoryRequirements->memoryTypeBits = (1u << device->physical_device->memory_properties.memoryTypeCount) - 1;
2633
2634 pMemoryRequirements->size = image->size;
2635 pMemoryRequirements->alignment = image->alignment;
2636 }
2637
2638 void radv_GetImageMemoryRequirements2(
2639 VkDevice device,
2640 const VkImageMemoryRequirementsInfo2KHR* pInfo,
2641 VkMemoryRequirements2KHR* pMemoryRequirements)
2642 {
2643 radv_GetImageMemoryRequirements(device, pInfo->image,
2644 &pMemoryRequirements->memoryRequirements);
2645
2646 RADV_FROM_HANDLE(radv_image, image, pInfo->image);
2647
2648 vk_foreach_struct(ext, pMemoryRequirements->pNext) {
2649 switch (ext->sType) {
2650 case VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR: {
2651 VkMemoryDedicatedRequirementsKHR *req =
2652 (VkMemoryDedicatedRequirementsKHR *) ext;
2653 req->requiresDedicatedAllocation = image->shareable;
2654 req->prefersDedicatedAllocation = req->requiresDedicatedAllocation;
2655 break;
2656 }
2657 default:
2658 break;
2659 }
2660 }
2661 }
2662
2663 void radv_GetImageSparseMemoryRequirements(
2664 VkDevice device,
2665 VkImage image,
2666 uint32_t* pSparseMemoryRequirementCount,
2667 VkSparseImageMemoryRequirements* pSparseMemoryRequirements)
2668 {
2669 stub();
2670 }
2671
2672 void radv_GetImageSparseMemoryRequirements2(
2673 VkDevice device,
2674 const VkImageSparseMemoryRequirementsInfo2KHR* pInfo,
2675 uint32_t* pSparseMemoryRequirementCount,
2676 VkSparseImageMemoryRequirements2KHR* pSparseMemoryRequirements)
2677 {
2678 stub();
2679 }
2680
2681 void radv_GetDeviceMemoryCommitment(
2682 VkDevice device,
2683 VkDeviceMemory memory,
2684 VkDeviceSize* pCommittedMemoryInBytes)
2685 {
2686 *pCommittedMemoryInBytes = 0;
2687 }
2688
2689 VkResult radv_BindBufferMemory2(VkDevice device,
2690 uint32_t bindInfoCount,
2691 const VkBindBufferMemoryInfoKHR *pBindInfos)
2692 {
2693 for (uint32_t i = 0; i < bindInfoCount; ++i) {
2694 RADV_FROM_HANDLE(radv_device_memory, mem, pBindInfos[i].memory);
2695 RADV_FROM_HANDLE(radv_buffer, buffer, pBindInfos[i].buffer);
2696
2697 if (mem) {
2698 buffer->bo = mem->bo;
2699 buffer->offset = pBindInfos[i].memoryOffset;
2700 } else {
2701 buffer->bo = NULL;
2702 }
2703 }
2704 return VK_SUCCESS;
2705 }
2706
2707 VkResult radv_BindBufferMemory(
2708 VkDevice device,
2709 VkBuffer buffer,
2710 VkDeviceMemory memory,
2711 VkDeviceSize memoryOffset)
2712 {
2713 const VkBindBufferMemoryInfoKHR info = {
2714 .sType = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR,
2715 .buffer = buffer,
2716 .memory = memory,
2717 .memoryOffset = memoryOffset
2718 };
2719
2720 return radv_BindBufferMemory2(device, 1, &info);
2721 }
2722
2723 VkResult radv_BindImageMemory2(VkDevice device,
2724 uint32_t bindInfoCount,
2725 const VkBindImageMemoryInfoKHR *pBindInfos)
2726 {
2727 for (uint32_t i = 0; i < bindInfoCount; ++i) {
2728 RADV_FROM_HANDLE(radv_device_memory, mem, pBindInfos[i].memory);
2729 RADV_FROM_HANDLE(radv_image, image, pBindInfos[i].image);
2730
2731 if (mem) {
2732 image->bo = mem->bo;
2733 image->offset = pBindInfos[i].memoryOffset;
2734 } else {
2735 image->bo = NULL;
2736 image->offset = 0;
2737 }
2738 }
2739 return VK_SUCCESS;
2740 }
2741
2742
2743 VkResult radv_BindImageMemory(
2744 VkDevice device,
2745 VkImage image,
2746 VkDeviceMemory memory,
2747 VkDeviceSize memoryOffset)
2748 {
2749 const VkBindImageMemoryInfoKHR info = {
2750 .sType = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR,
2751 .image = image,
2752 .memory = memory,
2753 .memoryOffset = memoryOffset
2754 };
2755
2756 return radv_BindImageMemory2(device, 1, &info);
2757 }
2758
2759
2760 static void
2761 radv_sparse_buffer_bind_memory(struct radv_device *device,
2762 const VkSparseBufferMemoryBindInfo *bind)
2763 {
2764 RADV_FROM_HANDLE(radv_buffer, buffer, bind->buffer);
2765
2766 for (uint32_t i = 0; i < bind->bindCount; ++i) {
2767 struct radv_device_memory *mem = NULL;
2768
2769 if (bind->pBinds[i].memory != VK_NULL_HANDLE)
2770 mem = radv_device_memory_from_handle(bind->pBinds[i].memory);
2771
2772 device->ws->buffer_virtual_bind(buffer->bo,
2773 bind->pBinds[i].resourceOffset,
2774 bind->pBinds[i].size,
2775 mem ? mem->bo : NULL,
2776 bind->pBinds[i].memoryOffset);
2777 }
2778 }
2779
2780 static void
2781 radv_sparse_image_opaque_bind_memory(struct radv_device *device,
2782 const VkSparseImageOpaqueMemoryBindInfo *bind)
2783 {
2784 RADV_FROM_HANDLE(radv_image, image, bind->image);
2785
2786 for (uint32_t i = 0; i < bind->bindCount; ++i) {
2787 struct radv_device_memory *mem = NULL;
2788
2789 if (bind->pBinds[i].memory != VK_NULL_HANDLE)
2790 mem = radv_device_memory_from_handle(bind->pBinds[i].memory);
2791
2792 device->ws->buffer_virtual_bind(image->bo,
2793 bind->pBinds[i].resourceOffset,
2794 bind->pBinds[i].size,
2795 mem ? mem->bo : NULL,
2796 bind->pBinds[i].memoryOffset);
2797 }
2798 }
2799
2800 VkResult radv_QueueBindSparse(
2801 VkQueue _queue,
2802 uint32_t bindInfoCount,
2803 const VkBindSparseInfo* pBindInfo,
2804 VkFence _fence)
2805 {
2806 RADV_FROM_HANDLE(radv_fence, fence, _fence);
2807 RADV_FROM_HANDLE(radv_queue, queue, _queue);
2808 struct radeon_winsys_fence *base_fence = fence ? fence->fence : NULL;
2809 bool fence_emitted = false;
2810
2811 for (uint32_t i = 0; i < bindInfoCount; ++i) {
2812 struct radv_winsys_sem_info sem_info;
2813 for (uint32_t j = 0; j < pBindInfo[i].bufferBindCount; ++j) {
2814 radv_sparse_buffer_bind_memory(queue->device,
2815 pBindInfo[i].pBufferBinds + j);
2816 }
2817
2818 for (uint32_t j = 0; j < pBindInfo[i].imageOpaqueBindCount; ++j) {
2819 radv_sparse_image_opaque_bind_memory(queue->device,
2820 pBindInfo[i].pImageOpaqueBinds + j);
2821 }
2822
2823 VkResult result;
2824 result = radv_alloc_sem_info(&sem_info,
2825 pBindInfo[i].waitSemaphoreCount,
2826 pBindInfo[i].pWaitSemaphores,
2827 pBindInfo[i].signalSemaphoreCount,
2828 pBindInfo[i].pSignalSemaphores,
2829 _fence);
2830 if (result != VK_SUCCESS)
2831 return result;
2832
2833 if (pBindInfo[i].waitSemaphoreCount || pBindInfo[i].signalSemaphoreCount) {
2834 queue->device->ws->cs_submit(queue->hw_ctx, queue->queue_idx,
2835 &queue->device->empty_cs[queue->queue_family_index],
2836 1, NULL, NULL,
2837 &sem_info,
2838 false, base_fence);
2839 fence_emitted = true;
2840 if (fence)
2841 fence->submitted = true;
2842 }
2843
2844 radv_free_sem_info(&sem_info);
2845
2846 }
2847
2848 if (fence) {
2849 if (!fence_emitted) {
2850 radv_signal_fence(queue, fence);
2851 }
2852 fence->submitted = true;
2853 }
2854
2855 return VK_SUCCESS;
2856 }
2857
2858 VkResult radv_CreateFence(
2859 VkDevice _device,
2860 const VkFenceCreateInfo* pCreateInfo,
2861 const VkAllocationCallbacks* pAllocator,
2862 VkFence* pFence)
2863 {
2864 RADV_FROM_HANDLE(radv_device, device, _device);
2865 const VkExportFenceCreateInfoKHR *export =
2866 vk_find_struct_const(pCreateInfo->pNext, EXPORT_FENCE_CREATE_INFO_KHR);
2867 VkExternalFenceHandleTypeFlagsKHR handleTypes =
2868 export ? export->handleTypes : 0;
2869
2870 struct radv_fence *fence = vk_alloc2(&device->alloc, pAllocator,
2871 sizeof(*fence), 8,
2872 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2873
2874 if (!fence)
2875 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
2876
2877 fence->submitted = false;
2878 fence->signalled = !!(pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT);
2879 fence->temp_syncobj = 0;
2880 if (device->always_use_syncobj || handleTypes) {
2881 int ret = device->ws->create_syncobj(device->ws, &fence->syncobj);
2882 if (ret) {
2883 vk_free2(&device->alloc, pAllocator, fence);
2884 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
2885 }
2886 if (pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT) {
2887 device->ws->signal_syncobj(device->ws, fence->syncobj);
2888 }
2889 fence->fence = NULL;
2890 } else {
2891 fence->fence = device->ws->create_fence();
2892 if (!fence->fence) {
2893 vk_free2(&device->alloc, pAllocator, fence);
2894 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
2895 }
2896 fence->syncobj = 0;
2897 }
2898
2899 *pFence = radv_fence_to_handle(fence);
2900
2901 return VK_SUCCESS;
2902 }
2903
2904 void radv_DestroyFence(
2905 VkDevice _device,
2906 VkFence _fence,
2907 const VkAllocationCallbacks* pAllocator)
2908 {
2909 RADV_FROM_HANDLE(radv_device, device, _device);
2910 RADV_FROM_HANDLE(radv_fence, fence, _fence);
2911
2912 if (!fence)
2913 return;
2914
2915 if (fence->temp_syncobj)
2916 device->ws->destroy_syncobj(device->ws, fence->temp_syncobj);
2917 if (fence->syncobj)
2918 device->ws->destroy_syncobj(device->ws, fence->syncobj);
2919 if (fence->fence)
2920 device->ws->destroy_fence(fence->fence);
2921 vk_free2(&device->alloc, pAllocator, fence);
2922 }
2923
2924
2925 static uint64_t radv_get_current_time()
2926 {
2927 struct timespec tv;
2928 clock_gettime(CLOCK_MONOTONIC, &tv);
2929 return tv.tv_nsec + tv.tv_sec*1000000000ull;
2930 }
2931
2932 static uint64_t radv_get_absolute_timeout(uint64_t timeout)
2933 {
2934 uint64_t current_time = radv_get_current_time();
2935
2936 timeout = MIN2(UINT64_MAX - current_time, timeout);
2937
2938 return current_time + timeout;
2939 }
2940
2941
2942 static bool radv_all_fences_plain_and_submitted(uint32_t fenceCount, const VkFence *pFences)
2943 {
2944 for (uint32_t i = 0; i < fenceCount; ++i) {
2945 RADV_FROM_HANDLE(radv_fence, fence, pFences[i]);
2946 if (fence->syncobj || fence->temp_syncobj || (!fence->signalled && !fence->submitted))
2947 return false;
2948 }
2949 return true;
2950 }
2951
2952 VkResult radv_WaitForFences(
2953 VkDevice _device,
2954 uint32_t fenceCount,
2955 const VkFence* pFences,
2956 VkBool32 waitAll,
2957 uint64_t timeout)
2958 {
2959 RADV_FROM_HANDLE(radv_device, device, _device);
2960 timeout = radv_get_absolute_timeout(timeout);
2961
2962 if (device->always_use_syncobj) {
2963 uint32_t *handles = malloc(sizeof(uint32_t) * fenceCount);
2964 if (!handles)
2965 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
2966
2967 for (uint32_t i = 0; i < fenceCount; ++i) {
2968 RADV_FROM_HANDLE(radv_fence, fence, pFences[i]);
2969 handles[i] = fence->temp_syncobj ? fence->temp_syncobj : fence->syncobj;
2970 }
2971
2972 bool success = device->ws->wait_syncobj(device->ws, handles, fenceCount, waitAll, timeout);
2973
2974 free(handles);
2975 return success ? VK_SUCCESS : VK_TIMEOUT;
2976 }
2977
2978 if (!waitAll && fenceCount > 1) {
2979 /* Not doing this by default for waitAll, due to needing to allocate twice. */
2980 if (device->physical_device->rad_info.drm_minor >= 10 && radv_all_fences_plain_and_submitted(fenceCount, pFences)) {
2981 uint32_t wait_count = 0;
2982 struct radeon_winsys_fence **fences = malloc(sizeof(struct radeon_winsys_fence *) * fenceCount);
2983 if (!fences)
2984 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
2985
2986 for (uint32_t i = 0; i < fenceCount; ++i) {
2987 RADV_FROM_HANDLE(radv_fence, fence, pFences[i]);
2988
2989 if (fence->signalled) {
2990 free(fences);
2991 return VK_SUCCESS;
2992 }
2993
2994 fences[wait_count++] = fence->fence;
2995 }
2996
2997 bool success = device->ws->fences_wait(device->ws, fences, wait_count,
2998 waitAll, timeout - radv_get_current_time());
2999
3000 free(fences);
3001 return success ? VK_SUCCESS : VK_TIMEOUT;
3002 }
3003
3004 while(radv_get_current_time() <= timeout) {
3005 for (uint32_t i = 0; i < fenceCount; ++i) {
3006 if (radv_GetFenceStatus(_device, pFences[i]) == VK_SUCCESS)
3007 return VK_SUCCESS;
3008 }
3009 }
3010 return VK_TIMEOUT;
3011 }
3012
3013 for (uint32_t i = 0; i < fenceCount; ++i) {
3014 RADV_FROM_HANDLE(radv_fence, fence, pFences[i]);
3015 bool expired = false;
3016
3017 if (fence->temp_syncobj) {
3018 if (!device->ws->wait_syncobj(device->ws, &fence->temp_syncobj, 1, true, timeout))
3019 return VK_TIMEOUT;
3020 continue;
3021 }
3022
3023 if (fence->syncobj) {
3024 if (!device->ws->wait_syncobj(device->ws, &fence->syncobj, 1, true, timeout))
3025 return VK_TIMEOUT;
3026 continue;
3027 }
3028
3029 if (fence->signalled)
3030 continue;
3031
3032 if (!fence->submitted) {
3033 while(radv_get_current_time() <= timeout && !fence->submitted)
3034 /* Do nothing */;
3035
3036 if (!fence->submitted)
3037 return VK_TIMEOUT;
3038
3039 /* Recheck as it may have been set by submitting operations. */
3040 if (fence->signalled)
3041 continue;
3042 }
3043
3044 expired = device->ws->fence_wait(device->ws, fence->fence, true, timeout);
3045 if (!expired)
3046 return VK_TIMEOUT;
3047
3048 fence->signalled = true;
3049 }
3050
3051 return VK_SUCCESS;
3052 }
3053
3054 VkResult radv_ResetFences(VkDevice _device,
3055 uint32_t fenceCount,
3056 const VkFence *pFences)
3057 {
3058 RADV_FROM_HANDLE(radv_device, device, _device);
3059
3060 for (unsigned i = 0; i < fenceCount; ++i) {
3061 RADV_FROM_HANDLE(radv_fence, fence, pFences[i]);
3062 fence->submitted = fence->signalled = false;
3063
3064 /* Per spec, we first restore the permanent payload, and then reset, so
3065 * having a temp syncobj should not skip resetting the permanent syncobj. */
3066 if (fence->temp_syncobj) {
3067 device->ws->destroy_syncobj(device->ws, fence->temp_syncobj);
3068 fence->temp_syncobj = 0;
3069 }
3070
3071 if (fence->syncobj) {
3072 device->ws->reset_syncobj(device->ws, fence->syncobj);
3073 }
3074 }
3075
3076 return VK_SUCCESS;
3077 }
3078
3079 VkResult radv_GetFenceStatus(VkDevice _device, VkFence _fence)
3080 {
3081 RADV_FROM_HANDLE(radv_device, device, _device);
3082 RADV_FROM_HANDLE(radv_fence, fence, _fence);
3083
3084 if (fence->temp_syncobj) {
3085 bool success = device->ws->wait_syncobj(device->ws, &fence->temp_syncobj, 1, true, 0);
3086 return success ? VK_SUCCESS : VK_NOT_READY;
3087 }
3088
3089 if (fence->syncobj) {
3090 bool success = device->ws->wait_syncobj(device->ws, &fence->syncobj, 1, true, 0);
3091 return success ? VK_SUCCESS : VK_NOT_READY;
3092 }
3093
3094 if (fence->signalled)
3095 return VK_SUCCESS;
3096 if (!fence->submitted)
3097 return VK_NOT_READY;
3098 if (!device->ws->fence_wait(device->ws, fence->fence, false, 0))
3099 return VK_NOT_READY;
3100
3101 return VK_SUCCESS;
3102 }
3103
3104
3105 // Queue semaphore functions
3106
3107 VkResult radv_CreateSemaphore(
3108 VkDevice _device,
3109 const VkSemaphoreCreateInfo* pCreateInfo,
3110 const VkAllocationCallbacks* pAllocator,
3111 VkSemaphore* pSemaphore)
3112 {
3113 RADV_FROM_HANDLE(radv_device, device, _device);
3114 const VkExportSemaphoreCreateInfoKHR *export =
3115 vk_find_struct_const(pCreateInfo->pNext, EXPORT_SEMAPHORE_CREATE_INFO_KHR);
3116 VkExternalSemaphoreHandleTypeFlagsKHR handleTypes =
3117 export ? export->handleTypes : 0;
3118
3119 struct radv_semaphore *sem = vk_alloc2(&device->alloc, pAllocator,
3120 sizeof(*sem), 8,
3121 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
3122 if (!sem)
3123 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
3124
3125 sem->temp_syncobj = 0;
3126 /* create a syncobject if we are going to export this semaphore */
3127 if (device->always_use_syncobj || handleTypes) {
3128 assert (device->physical_device->rad_info.has_syncobj);
3129 int ret = device->ws->create_syncobj(device->ws, &sem->syncobj);
3130 if (ret) {
3131 vk_free2(&device->alloc, pAllocator, sem);
3132 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
3133 }
3134 sem->sem = NULL;
3135 } else {
3136 sem->sem = device->ws->create_sem(device->ws);
3137 if (!sem->sem) {
3138 vk_free2(&device->alloc, pAllocator, sem);
3139 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
3140 }
3141 sem->syncobj = 0;
3142 }
3143
3144 *pSemaphore = radv_semaphore_to_handle(sem);
3145 return VK_SUCCESS;
3146 }
3147
3148 void radv_DestroySemaphore(
3149 VkDevice _device,
3150 VkSemaphore _semaphore,
3151 const VkAllocationCallbacks* pAllocator)
3152 {
3153 RADV_FROM_HANDLE(radv_device, device, _device);
3154 RADV_FROM_HANDLE(radv_semaphore, sem, _semaphore);
3155 if (!_semaphore)
3156 return;
3157
3158 if (sem->syncobj)
3159 device->ws->destroy_syncobj(device->ws, sem->syncobj);
3160 else
3161 device->ws->destroy_sem(sem->sem);
3162 vk_free2(&device->alloc, pAllocator, sem);
3163 }
3164
3165 VkResult radv_CreateEvent(
3166 VkDevice _device,
3167 const VkEventCreateInfo* pCreateInfo,
3168 const VkAllocationCallbacks* pAllocator,
3169 VkEvent* pEvent)
3170 {
3171 RADV_FROM_HANDLE(radv_device, device, _device);
3172 struct radv_event *event = vk_alloc2(&device->alloc, pAllocator,
3173 sizeof(*event), 8,
3174 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
3175
3176 if (!event)
3177 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
3178
3179 event->bo = device->ws->buffer_create(device->ws, 8, 8,
3180 RADEON_DOMAIN_GTT,
3181 RADEON_FLAG_VA_UNCACHED | RADEON_FLAG_CPU_ACCESS | RADEON_FLAG_NO_INTERPROCESS_SHARING);
3182 if (!event->bo) {
3183 vk_free2(&device->alloc, pAllocator, event);
3184 return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
3185 }
3186
3187 event->map = (uint64_t*)device->ws->buffer_map(event->bo);
3188
3189 *pEvent = radv_event_to_handle(event);
3190
3191 return VK_SUCCESS;
3192 }
3193
3194 void radv_DestroyEvent(
3195 VkDevice _device,
3196 VkEvent _event,
3197 const VkAllocationCallbacks* pAllocator)
3198 {
3199 RADV_FROM_HANDLE(radv_device, device, _device);
3200 RADV_FROM_HANDLE(radv_event, event, _event);
3201
3202 if (!event)
3203 return;
3204 device->ws->buffer_destroy(event->bo);
3205 vk_free2(&device->alloc, pAllocator, event);
3206 }
3207
3208 VkResult radv_GetEventStatus(
3209 VkDevice _device,
3210 VkEvent _event)
3211 {
3212 RADV_FROM_HANDLE(radv_event, event, _event);
3213
3214 if (*event->map == 1)
3215 return VK_EVENT_SET;
3216 return VK_EVENT_RESET;
3217 }
3218
3219 VkResult radv_SetEvent(
3220 VkDevice _device,
3221 VkEvent _event)
3222 {
3223 RADV_FROM_HANDLE(radv_event, event, _event);
3224 *event->map = 1;
3225
3226 return VK_SUCCESS;
3227 }
3228
3229 VkResult radv_ResetEvent(
3230 VkDevice _device,
3231 VkEvent _event)
3232 {
3233 RADV_FROM_HANDLE(radv_event, event, _event);
3234 *event->map = 0;
3235
3236 return VK_SUCCESS;
3237 }
3238
3239 VkResult radv_CreateBuffer(
3240 VkDevice _device,
3241 const VkBufferCreateInfo* pCreateInfo,
3242 const VkAllocationCallbacks* pAllocator,
3243 VkBuffer* pBuffer)
3244 {
3245 RADV_FROM_HANDLE(radv_device, device, _device);
3246 struct radv_buffer *buffer;
3247
3248 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO);
3249
3250 buffer = vk_alloc2(&device->alloc, pAllocator, sizeof(*buffer), 8,
3251 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
3252 if (buffer == NULL)
3253 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
3254
3255 buffer->size = pCreateInfo->size;
3256 buffer->usage = pCreateInfo->usage;
3257 buffer->bo = NULL;
3258 buffer->offset = 0;
3259 buffer->flags = pCreateInfo->flags;
3260
3261 buffer->shareable = vk_find_struct_const(pCreateInfo->pNext,
3262 EXTERNAL_MEMORY_BUFFER_CREATE_INFO_KHR) != NULL;
3263
3264 if (pCreateInfo->flags & VK_BUFFER_CREATE_SPARSE_BINDING_BIT) {
3265 buffer->bo = device->ws->buffer_create(device->ws,
3266 align64(buffer->size, 4096),
3267 4096, 0, RADEON_FLAG_VIRTUAL);
3268 if (!buffer->bo) {
3269 vk_free2(&device->alloc, pAllocator, buffer);
3270 return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
3271 }
3272 }
3273
3274 *pBuffer = radv_buffer_to_handle(buffer);
3275
3276 return VK_SUCCESS;
3277 }
3278
3279 void radv_DestroyBuffer(
3280 VkDevice _device,
3281 VkBuffer _buffer,
3282 const VkAllocationCallbacks* pAllocator)
3283 {
3284 RADV_FROM_HANDLE(radv_device, device, _device);
3285 RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
3286
3287 if (!buffer)
3288 return;
3289
3290 if (buffer->flags & VK_BUFFER_CREATE_SPARSE_BINDING_BIT)
3291 device->ws->buffer_destroy(buffer->bo);
3292
3293 vk_free2(&device->alloc, pAllocator, buffer);
3294 }
3295
3296 static inline unsigned
3297 si_tile_mode_index(const struct radv_image *image, unsigned level, bool stencil)
3298 {
3299 if (stencil)
3300 return image->surface.u.legacy.stencil_tiling_index[level];
3301 else
3302 return image->surface.u.legacy.tiling_index[level];
3303 }
3304
3305 static uint32_t radv_surface_max_layer_count(struct radv_image_view *iview)
3306 {
3307 return iview->type == VK_IMAGE_VIEW_TYPE_3D ? iview->extent.depth : (iview->base_layer + iview->layer_count);
3308 }
3309
3310 static void
3311 radv_initialise_color_surface(struct radv_device *device,
3312 struct radv_color_buffer_info *cb,
3313 struct radv_image_view *iview)
3314 {
3315 const struct vk_format_description *desc;
3316 unsigned ntype, format, swap, endian;
3317 unsigned blend_clamp = 0, blend_bypass = 0;
3318 uint64_t va;
3319 const struct radeon_surf *surf = &iview->image->surface;
3320
3321 desc = vk_format_description(iview->vk_format);
3322
3323 memset(cb, 0, sizeof(*cb));
3324
3325 /* Intensity is implemented as Red, so treat it that way. */
3326 cb->cb_color_attrib = S_028C74_FORCE_DST_ALPHA_1(desc->swizzle[3] == VK_SWIZZLE_1);
3327
3328 va = radv_buffer_get_va(iview->bo) + iview->image->offset;
3329
3330 cb->cb_color_base = va >> 8;
3331
3332 if (device->physical_device->rad_info.chip_class >= GFX9) {
3333 struct gfx9_surf_meta_flags meta;
3334 if (iview->image->dcc_offset)
3335 meta = iview->image->surface.u.gfx9.dcc;
3336 else
3337 meta = iview->image->surface.u.gfx9.cmask;
3338
3339 cb->cb_color_attrib |= S_028C74_COLOR_SW_MODE(iview->image->surface.u.gfx9.surf.swizzle_mode) |
3340 S_028C74_FMASK_SW_MODE(iview->image->surface.u.gfx9.fmask.swizzle_mode) |
3341 S_028C74_RB_ALIGNED(meta.rb_aligned) |
3342 S_028C74_PIPE_ALIGNED(meta.pipe_aligned);
3343
3344 cb->cb_color_base += iview->image->surface.u.gfx9.surf_offset >> 8;
3345 cb->cb_color_base |= iview->image->surface.tile_swizzle;
3346 } else {
3347 const struct legacy_surf_level *level_info = &surf->u.legacy.level[iview->base_mip];
3348 unsigned pitch_tile_max, slice_tile_max, tile_mode_index;
3349
3350 cb->cb_color_base += level_info->offset >> 8;
3351 if (level_info->mode == RADEON_SURF_MODE_2D)
3352 cb->cb_color_base |= iview->image->surface.tile_swizzle;
3353
3354 pitch_tile_max = level_info->nblk_x / 8 - 1;
3355 slice_tile_max = (level_info->nblk_x * level_info->nblk_y) / 64 - 1;
3356 tile_mode_index = si_tile_mode_index(iview->image, iview->base_mip, false);
3357
3358 cb->cb_color_pitch = S_028C64_TILE_MAX(pitch_tile_max);
3359 cb->cb_color_slice = S_028C68_TILE_MAX(slice_tile_max);
3360 cb->cb_color_cmask_slice = iview->image->cmask.slice_tile_max;
3361
3362 cb->cb_color_attrib |= S_028C74_TILE_MODE_INDEX(tile_mode_index);
3363
3364 if (iview->image->fmask.size) {
3365 if (device->physical_device->rad_info.chip_class >= CIK)
3366 cb->cb_color_pitch |= S_028C64_FMASK_TILE_MAX(iview->image->fmask.pitch_in_pixels / 8 - 1);
3367 cb->cb_color_attrib |= S_028C74_FMASK_TILE_MODE_INDEX(iview->image->fmask.tile_mode_index);
3368 cb->cb_color_fmask_slice = S_028C88_TILE_MAX(iview->image->fmask.slice_tile_max);
3369 } else {
3370 /* This must be set for fast clear to work without FMASK. */
3371 if (device->physical_device->rad_info.chip_class >= CIK)
3372 cb->cb_color_pitch |= S_028C64_FMASK_TILE_MAX(pitch_tile_max);
3373 cb->cb_color_attrib |= S_028C74_FMASK_TILE_MODE_INDEX(tile_mode_index);
3374 cb->cb_color_fmask_slice = S_028C88_TILE_MAX(slice_tile_max);
3375 }
3376 }
3377
3378 /* CMASK variables */
3379 va = radv_buffer_get_va(iview->bo) + iview->image->offset;
3380 va += iview->image->cmask.offset;
3381 cb->cb_color_cmask = va >> 8;
3382
3383 va = radv_buffer_get_va(iview->bo) + iview->image->offset;
3384 va += iview->image->dcc_offset;
3385 cb->cb_dcc_base = va >> 8;
3386 cb->cb_dcc_base |= iview->image->surface.tile_swizzle;
3387
3388 uint32_t max_slice = radv_surface_max_layer_count(iview) - 1;
3389 cb->cb_color_view = S_028C6C_SLICE_START(iview->base_layer) |
3390 S_028C6C_SLICE_MAX(max_slice);
3391
3392 if (iview->image->info.samples > 1) {
3393 unsigned log_samples = util_logbase2(iview->image->info.samples);
3394
3395 cb->cb_color_attrib |= S_028C74_NUM_SAMPLES(log_samples) |
3396 S_028C74_NUM_FRAGMENTS(log_samples);
3397 }
3398
3399 if (iview->image->fmask.size) {
3400 va = radv_buffer_get_va(iview->bo) + iview->image->offset + iview->image->fmask.offset;
3401 cb->cb_color_fmask = va >> 8;
3402 cb->cb_color_fmask |= iview->image->fmask.tile_swizzle;
3403 } else {
3404 cb->cb_color_fmask = cb->cb_color_base;
3405 }
3406
3407 ntype = radv_translate_color_numformat(iview->vk_format,
3408 desc,
3409 vk_format_get_first_non_void_channel(iview->vk_format));
3410 format = radv_translate_colorformat(iview->vk_format);
3411 if (format == V_028C70_COLOR_INVALID || ntype == ~0u)
3412 radv_finishme("Illegal color\n");
3413 swap = radv_translate_colorswap(iview->vk_format, FALSE);
3414 endian = radv_colorformat_endian_swap(format);
3415
3416 /* blend clamp should be set for all NORM/SRGB types */
3417 if (ntype == V_028C70_NUMBER_UNORM ||
3418 ntype == V_028C70_NUMBER_SNORM ||
3419 ntype == V_028C70_NUMBER_SRGB)
3420 blend_clamp = 1;
3421
3422 /* set blend bypass according to docs if SINT/UINT or
3423 8/24 COLOR variants */
3424 if (ntype == V_028C70_NUMBER_UINT || ntype == V_028C70_NUMBER_SINT ||
3425 format == V_028C70_COLOR_8_24 || format == V_028C70_COLOR_24_8 ||
3426 format == V_028C70_COLOR_X24_8_32_FLOAT) {
3427 blend_clamp = 0;
3428 blend_bypass = 1;
3429 }
3430 #if 0
3431 if ((ntype == V_028C70_NUMBER_UINT || ntype == V_028C70_NUMBER_SINT) &&
3432 (format == V_028C70_COLOR_8 ||
3433 format == V_028C70_COLOR_8_8 ||
3434 format == V_028C70_COLOR_8_8_8_8))
3435 ->color_is_int8 = true;
3436 #endif
3437 cb->cb_color_info = S_028C70_FORMAT(format) |
3438 S_028C70_COMP_SWAP(swap) |
3439 S_028C70_BLEND_CLAMP(blend_clamp) |
3440 S_028C70_BLEND_BYPASS(blend_bypass) |
3441 S_028C70_SIMPLE_FLOAT(1) |
3442 S_028C70_ROUND_MODE(ntype != V_028C70_NUMBER_UNORM &&
3443 ntype != V_028C70_NUMBER_SNORM &&
3444 ntype != V_028C70_NUMBER_SRGB &&
3445 format != V_028C70_COLOR_8_24 &&
3446 format != V_028C70_COLOR_24_8) |
3447 S_028C70_NUMBER_TYPE(ntype) |
3448 S_028C70_ENDIAN(endian);
3449 if ((iview->image->info.samples > 1) && iview->image->fmask.size) {
3450 cb->cb_color_info |= S_028C70_COMPRESSION(1);
3451 if (device->physical_device->rad_info.chip_class == SI) {
3452 unsigned fmask_bankh = util_logbase2(iview->image->fmask.bank_height);
3453 cb->cb_color_attrib |= S_028C74_FMASK_BANK_HEIGHT(fmask_bankh);
3454 }
3455 }
3456
3457 if (iview->image->cmask.size &&
3458 !(device->instance->debug_flags & RADV_DEBUG_NO_FAST_CLEARS))
3459 cb->cb_color_info |= S_028C70_FAST_CLEAR(1);
3460
3461 if (radv_vi_dcc_enabled(iview->image, iview->base_mip))
3462 cb->cb_color_info |= S_028C70_DCC_ENABLE(1);
3463
3464 if (device->physical_device->rad_info.chip_class >= VI) {
3465 unsigned max_uncompressed_block_size = V_028C78_MAX_BLOCK_SIZE_256B;
3466 unsigned min_compressed_block_size = V_028C78_MIN_BLOCK_SIZE_32B;
3467 unsigned independent_64b_blocks = 0;
3468 unsigned max_compressed_block_size;
3469
3470 /* amdvlk: [min-compressed-block-size] should be set to 32 for dGPU and
3471 64 for APU because all of our APUs to date use DIMMs which have
3472 a request granularity size of 64B while all other chips have a
3473 32B request size */
3474 if (!device->physical_device->rad_info.has_dedicated_vram)
3475 min_compressed_block_size = V_028C78_MIN_BLOCK_SIZE_64B;
3476
3477 if (iview->image->info.samples > 1) {
3478 if (iview->image->surface.bpe == 1)
3479 max_uncompressed_block_size = V_028C78_MAX_BLOCK_SIZE_64B;
3480 else if (iview->image->surface.bpe == 2)
3481 max_uncompressed_block_size = V_028C78_MAX_BLOCK_SIZE_128B;
3482 }
3483
3484 if (iview->image->usage & (VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
3485 VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT)) {
3486 independent_64b_blocks = 1;
3487 max_compressed_block_size = V_028C78_MAX_BLOCK_SIZE_64B;
3488 } else
3489 max_compressed_block_size = max_uncompressed_block_size;
3490
3491 cb->cb_dcc_control = S_028C78_MAX_UNCOMPRESSED_BLOCK_SIZE(max_uncompressed_block_size) |
3492 S_028C78_MAX_COMPRESSED_BLOCK_SIZE(max_compressed_block_size) |
3493 S_028C78_MIN_COMPRESSED_BLOCK_SIZE(min_compressed_block_size) |
3494 S_028C78_INDEPENDENT_64B_BLOCKS(independent_64b_blocks);
3495 }
3496
3497 /* This must be set for fast clear to work without FMASK. */
3498 if (!iview->image->fmask.size &&
3499 device->physical_device->rad_info.chip_class == SI) {
3500 unsigned bankh = util_logbase2(iview->image->surface.u.legacy.bankh);
3501 cb->cb_color_attrib |= S_028C74_FMASK_BANK_HEIGHT(bankh);
3502 }
3503
3504 if (device->physical_device->rad_info.chip_class >= GFX9) {
3505 unsigned mip0_depth = iview->image->type == VK_IMAGE_TYPE_3D ?
3506 (iview->extent.depth - 1) : (iview->image->info.array_size - 1);
3507
3508 cb->cb_color_view |= S_028C6C_MIP_LEVEL(iview->base_mip);
3509 cb->cb_color_attrib |= S_028C74_MIP0_DEPTH(mip0_depth) |
3510 S_028C74_RESOURCE_TYPE(iview->image->surface.u.gfx9.resource_type);
3511 cb->cb_color_attrib2 = S_028C68_MIP0_WIDTH(iview->extent.width - 1) |
3512 S_028C68_MIP0_HEIGHT(iview->extent.height - 1) |
3513 S_028C68_MAX_MIP(iview->image->info.levels - 1);
3514 }
3515 }
3516
3517 static void
3518 radv_initialise_ds_surface(struct radv_device *device,
3519 struct radv_ds_buffer_info *ds,
3520 struct radv_image_view *iview)
3521 {
3522 unsigned level = iview->base_mip;
3523 unsigned format, stencil_format;
3524 uint64_t va, s_offs, z_offs;
3525 bool stencil_only = false;
3526 memset(ds, 0, sizeof(*ds));
3527 switch (iview->image->vk_format) {
3528 case VK_FORMAT_D24_UNORM_S8_UINT:
3529 case VK_FORMAT_X8_D24_UNORM_PACK32:
3530 ds->pa_su_poly_offset_db_fmt_cntl = S_028B78_POLY_OFFSET_NEG_NUM_DB_BITS(-24);
3531 ds->offset_scale = 2.0f;
3532 break;
3533 case VK_FORMAT_D16_UNORM:
3534 case VK_FORMAT_D16_UNORM_S8_UINT:
3535 ds->pa_su_poly_offset_db_fmt_cntl = S_028B78_POLY_OFFSET_NEG_NUM_DB_BITS(-16);
3536 ds->offset_scale = 4.0f;
3537 break;
3538 case VK_FORMAT_D32_SFLOAT:
3539 case VK_FORMAT_D32_SFLOAT_S8_UINT:
3540 ds->pa_su_poly_offset_db_fmt_cntl = S_028B78_POLY_OFFSET_NEG_NUM_DB_BITS(-23) |
3541 S_028B78_POLY_OFFSET_DB_IS_FLOAT_FMT(1);
3542 ds->offset_scale = 1.0f;
3543 break;
3544 case VK_FORMAT_S8_UINT:
3545 stencil_only = true;
3546 break;
3547 default:
3548 break;
3549 }
3550
3551 format = radv_translate_dbformat(iview->image->vk_format);
3552 stencil_format = iview->image->surface.has_stencil ?
3553 V_028044_STENCIL_8 : V_028044_STENCIL_INVALID;
3554
3555 uint32_t max_slice = radv_surface_max_layer_count(iview) - 1;
3556 ds->db_depth_view = S_028008_SLICE_START(iview->base_layer) |
3557 S_028008_SLICE_MAX(max_slice);
3558
3559 ds->db_htile_data_base = 0;
3560 ds->db_htile_surface = 0;
3561
3562 va = radv_buffer_get_va(iview->bo) + iview->image->offset;
3563 s_offs = z_offs = va;
3564
3565 if (device->physical_device->rad_info.chip_class >= GFX9) {
3566 assert(iview->image->surface.u.gfx9.surf_offset == 0);
3567 s_offs += iview->image->surface.u.gfx9.stencil_offset;
3568
3569 ds->db_z_info = S_028038_FORMAT(format) |
3570 S_028038_NUM_SAMPLES(util_logbase2(iview->image->info.samples)) |
3571 S_028038_SW_MODE(iview->image->surface.u.gfx9.surf.swizzle_mode) |
3572 S_028038_MAXMIP(iview->image->info.levels - 1);
3573 ds->db_stencil_info = S_02803C_FORMAT(stencil_format) |
3574 S_02803C_SW_MODE(iview->image->surface.u.gfx9.stencil.swizzle_mode);
3575
3576 ds->db_z_info2 = S_028068_EPITCH(iview->image->surface.u.gfx9.surf.epitch);
3577 ds->db_stencil_info2 = S_02806C_EPITCH(iview->image->surface.u.gfx9.stencil.epitch);
3578 ds->db_depth_view |= S_028008_MIPID(level);
3579
3580 ds->db_depth_size = S_02801C_X_MAX(iview->image->info.width - 1) |
3581 S_02801C_Y_MAX(iview->image->info.height - 1);
3582
3583 if (radv_htile_enabled(iview->image, level)) {
3584 ds->db_z_info |= S_028038_TILE_SURFACE_ENABLE(1);
3585
3586 if (iview->image->tc_compatible_htile) {
3587 unsigned max_zplanes = 4;
3588
3589 if (iview->vk_format == VK_FORMAT_D16_UNORM &&
3590 iview->image->info.samples > 1)
3591 max_zplanes = 2;
3592
3593 ds->db_z_info |= S_028038_DECOMPRESS_ON_N_ZPLANES(max_zplanes + 1) |
3594 S_028038_ITERATE_FLUSH(1);
3595 ds->db_stencil_info |= S_02803C_ITERATE_FLUSH(1);
3596 }
3597
3598 if (!iview->image->surface.has_stencil)
3599 /* Use all of the htile_buffer for depth if there's no stencil. */
3600 ds->db_stencil_info |= S_02803C_TILE_STENCIL_DISABLE(1);
3601 va = radv_buffer_get_va(iview->bo) + iview->image->offset +
3602 iview->image->htile_offset;
3603 ds->db_htile_data_base = va >> 8;
3604 ds->db_htile_surface = S_028ABC_FULL_CACHE(1) |
3605 S_028ABC_PIPE_ALIGNED(iview->image->surface.u.gfx9.htile.pipe_aligned) |
3606 S_028ABC_RB_ALIGNED(iview->image->surface.u.gfx9.htile.rb_aligned);
3607 }
3608 } else {
3609 const struct legacy_surf_level *level_info = &iview->image->surface.u.legacy.level[level];
3610
3611 if (stencil_only)
3612 level_info = &iview->image->surface.u.legacy.stencil_level[level];
3613
3614 z_offs += iview->image->surface.u.legacy.level[level].offset;
3615 s_offs += iview->image->surface.u.legacy.stencil_level[level].offset;
3616
3617 ds->db_depth_info = S_02803C_ADDR5_SWIZZLE_MASK(!iview->image->tc_compatible_htile);
3618 ds->db_z_info = S_028040_FORMAT(format) | S_028040_ZRANGE_PRECISION(1);
3619 ds->db_stencil_info = S_028044_FORMAT(stencil_format);
3620
3621 if (iview->image->info.samples > 1)
3622 ds->db_z_info |= S_028040_NUM_SAMPLES(util_logbase2(iview->image->info.samples));
3623
3624 if (device->physical_device->rad_info.chip_class >= CIK) {
3625 struct radeon_info *info = &device->physical_device->rad_info;
3626 unsigned tiling_index = iview->image->surface.u.legacy.tiling_index[level];
3627 unsigned stencil_index = iview->image->surface.u.legacy.stencil_tiling_index[level];
3628 unsigned macro_index = iview->image->surface.u.legacy.macro_tile_index;
3629 unsigned tile_mode = info->si_tile_mode_array[tiling_index];
3630 unsigned stencil_tile_mode = info->si_tile_mode_array[stencil_index];
3631 unsigned macro_mode = info->cik_macrotile_mode_array[macro_index];
3632
3633 if (stencil_only)
3634 tile_mode = stencil_tile_mode;
3635
3636 ds->db_depth_info |=
3637 S_02803C_ARRAY_MODE(G_009910_ARRAY_MODE(tile_mode)) |
3638 S_02803C_PIPE_CONFIG(G_009910_PIPE_CONFIG(tile_mode)) |
3639 S_02803C_BANK_WIDTH(G_009990_BANK_WIDTH(macro_mode)) |
3640 S_02803C_BANK_HEIGHT(G_009990_BANK_HEIGHT(macro_mode)) |
3641 S_02803C_MACRO_TILE_ASPECT(G_009990_MACRO_TILE_ASPECT(macro_mode)) |
3642 S_02803C_NUM_BANKS(G_009990_NUM_BANKS(macro_mode));
3643 ds->db_z_info |= S_028040_TILE_SPLIT(G_009910_TILE_SPLIT(tile_mode));
3644 ds->db_stencil_info |= S_028044_TILE_SPLIT(G_009910_TILE_SPLIT(stencil_tile_mode));
3645 } else {
3646 unsigned tile_mode_index = si_tile_mode_index(iview->image, level, false);
3647 ds->db_z_info |= S_028040_TILE_MODE_INDEX(tile_mode_index);
3648 tile_mode_index = si_tile_mode_index(iview->image, level, true);
3649 ds->db_stencil_info |= S_028044_TILE_MODE_INDEX(tile_mode_index);
3650 if (stencil_only)
3651 ds->db_z_info |= S_028040_TILE_MODE_INDEX(tile_mode_index);
3652 }
3653
3654 ds->db_depth_size = S_028058_PITCH_TILE_MAX((level_info->nblk_x / 8) - 1) |
3655 S_028058_HEIGHT_TILE_MAX((level_info->nblk_y / 8) - 1);
3656 ds->db_depth_slice = S_02805C_SLICE_TILE_MAX((level_info->nblk_x * level_info->nblk_y) / 64 - 1);
3657
3658 if (radv_htile_enabled(iview->image, level)) {
3659 ds->db_z_info |= S_028040_TILE_SURFACE_ENABLE(1);
3660
3661 if (!iview->image->surface.has_stencil &&
3662 !iview->image->tc_compatible_htile)
3663 /* Use all of the htile_buffer for depth if there's no stencil. */
3664 ds->db_stencil_info |= S_028044_TILE_STENCIL_DISABLE(1);
3665
3666 va = radv_buffer_get_va(iview->bo) + iview->image->offset +
3667 iview->image->htile_offset;
3668 ds->db_htile_data_base = va >> 8;
3669 ds->db_htile_surface = S_028ABC_FULL_CACHE(1);
3670
3671 if (iview->image->tc_compatible_htile) {
3672 ds->db_htile_surface |= S_028ABC_TC_COMPATIBLE(1);
3673
3674 if (iview->image->info.samples <= 1)
3675 ds->db_z_info |= S_028040_DECOMPRESS_ON_N_ZPLANES(5);
3676 else if (iview->image->info.samples <= 4)
3677 ds->db_z_info |= S_028040_DECOMPRESS_ON_N_ZPLANES(3);
3678 else
3679 ds->db_z_info|= S_028040_DECOMPRESS_ON_N_ZPLANES(2);
3680 }
3681 }
3682 }
3683
3684 ds->db_z_read_base = ds->db_z_write_base = z_offs >> 8;
3685 ds->db_stencil_read_base = ds->db_stencil_write_base = s_offs >> 8;
3686 }
3687
3688 VkResult radv_CreateFramebuffer(
3689 VkDevice _device,
3690 const VkFramebufferCreateInfo* pCreateInfo,
3691 const VkAllocationCallbacks* pAllocator,
3692 VkFramebuffer* pFramebuffer)
3693 {
3694 RADV_FROM_HANDLE(radv_device, device, _device);
3695 struct radv_framebuffer *framebuffer;
3696
3697 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO);
3698
3699 size_t size = sizeof(*framebuffer) +
3700 sizeof(struct radv_attachment_info) * pCreateInfo->attachmentCount;
3701 framebuffer = vk_alloc2(&device->alloc, pAllocator, size, 8,
3702 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
3703 if (framebuffer == NULL)
3704 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
3705
3706 framebuffer->attachment_count = pCreateInfo->attachmentCount;
3707 framebuffer->width = pCreateInfo->width;
3708 framebuffer->height = pCreateInfo->height;
3709 framebuffer->layers = pCreateInfo->layers;
3710 for (uint32_t i = 0; i < pCreateInfo->attachmentCount; i++) {
3711 VkImageView _iview = pCreateInfo->pAttachments[i];
3712 struct radv_image_view *iview = radv_image_view_from_handle(_iview);
3713 framebuffer->attachments[i].attachment = iview;
3714 if (iview->aspect_mask & VK_IMAGE_ASPECT_COLOR_BIT) {
3715 radv_initialise_color_surface(device, &framebuffer->attachments[i].cb, iview);
3716 } else if (iview->aspect_mask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) {
3717 radv_initialise_ds_surface(device, &framebuffer->attachments[i].ds, iview);
3718 }
3719 framebuffer->width = MIN2(framebuffer->width, iview->extent.width);
3720 framebuffer->height = MIN2(framebuffer->height, iview->extent.height);
3721 framebuffer->layers = MIN2(framebuffer->layers, radv_surface_max_layer_count(iview));
3722 }
3723
3724 *pFramebuffer = radv_framebuffer_to_handle(framebuffer);
3725 return VK_SUCCESS;
3726 }
3727
3728 void radv_DestroyFramebuffer(
3729 VkDevice _device,
3730 VkFramebuffer _fb,
3731 const VkAllocationCallbacks* pAllocator)
3732 {
3733 RADV_FROM_HANDLE(radv_device, device, _device);
3734 RADV_FROM_HANDLE(radv_framebuffer, fb, _fb);
3735
3736 if (!fb)
3737 return;
3738 vk_free2(&device->alloc, pAllocator, fb);
3739 }
3740
3741 static unsigned radv_tex_wrap(VkSamplerAddressMode address_mode)
3742 {
3743 switch (address_mode) {
3744 case VK_SAMPLER_ADDRESS_MODE_REPEAT:
3745 return V_008F30_SQ_TEX_WRAP;
3746 case VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT:
3747 return V_008F30_SQ_TEX_MIRROR;
3748 case VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE:
3749 return V_008F30_SQ_TEX_CLAMP_LAST_TEXEL;
3750 case VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER:
3751 return V_008F30_SQ_TEX_CLAMP_BORDER;
3752 case VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE:
3753 return V_008F30_SQ_TEX_MIRROR_ONCE_LAST_TEXEL;
3754 default:
3755 unreachable("illegal tex wrap mode");
3756 break;
3757 }
3758 }
3759
3760 static unsigned
3761 radv_tex_compare(VkCompareOp op)
3762 {
3763 switch (op) {
3764 case VK_COMPARE_OP_NEVER:
3765 return V_008F30_SQ_TEX_DEPTH_COMPARE_NEVER;
3766 case VK_COMPARE_OP_LESS:
3767 return V_008F30_SQ_TEX_DEPTH_COMPARE_LESS;
3768 case VK_COMPARE_OP_EQUAL:
3769 return V_008F30_SQ_TEX_DEPTH_COMPARE_EQUAL;
3770 case VK_COMPARE_OP_LESS_OR_EQUAL:
3771 return V_008F30_SQ_TEX_DEPTH_COMPARE_LESSEQUAL;
3772 case VK_COMPARE_OP_GREATER:
3773 return V_008F30_SQ_TEX_DEPTH_COMPARE_GREATER;
3774 case VK_COMPARE_OP_NOT_EQUAL:
3775 return V_008F30_SQ_TEX_DEPTH_COMPARE_NOTEQUAL;
3776 case VK_COMPARE_OP_GREATER_OR_EQUAL:
3777 return V_008F30_SQ_TEX_DEPTH_COMPARE_GREATEREQUAL;
3778 case VK_COMPARE_OP_ALWAYS:
3779 return V_008F30_SQ_TEX_DEPTH_COMPARE_ALWAYS;
3780 default:
3781 unreachable("illegal compare mode");
3782 break;
3783 }
3784 }
3785
3786 static unsigned
3787 radv_tex_filter(VkFilter filter, unsigned max_ansio)
3788 {
3789 switch (filter) {
3790 case VK_FILTER_NEAREST:
3791 return (max_ansio > 1 ? V_008F38_SQ_TEX_XY_FILTER_ANISO_POINT :
3792 V_008F38_SQ_TEX_XY_FILTER_POINT);
3793 case VK_FILTER_LINEAR:
3794 return (max_ansio > 1 ? V_008F38_SQ_TEX_XY_FILTER_ANISO_BILINEAR :
3795 V_008F38_SQ_TEX_XY_FILTER_BILINEAR);
3796 case VK_FILTER_CUBIC_IMG:
3797 default:
3798 fprintf(stderr, "illegal texture filter");
3799 return 0;
3800 }
3801 }
3802
3803 static unsigned
3804 radv_tex_mipfilter(VkSamplerMipmapMode mode)
3805 {
3806 switch (mode) {
3807 case VK_SAMPLER_MIPMAP_MODE_NEAREST:
3808 return V_008F38_SQ_TEX_Z_FILTER_POINT;
3809 case VK_SAMPLER_MIPMAP_MODE_LINEAR:
3810 return V_008F38_SQ_TEX_Z_FILTER_LINEAR;
3811 default:
3812 return V_008F38_SQ_TEX_Z_FILTER_NONE;
3813 }
3814 }
3815
3816 static unsigned
3817 radv_tex_bordercolor(VkBorderColor bcolor)
3818 {
3819 switch (bcolor) {
3820 case VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK:
3821 case VK_BORDER_COLOR_INT_TRANSPARENT_BLACK:
3822 return V_008F3C_SQ_TEX_BORDER_COLOR_TRANS_BLACK;
3823 case VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK:
3824 case VK_BORDER_COLOR_INT_OPAQUE_BLACK:
3825 return V_008F3C_SQ_TEX_BORDER_COLOR_OPAQUE_BLACK;
3826 case VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE:
3827 case VK_BORDER_COLOR_INT_OPAQUE_WHITE:
3828 return V_008F3C_SQ_TEX_BORDER_COLOR_OPAQUE_WHITE;
3829 default:
3830 break;
3831 }
3832 return 0;
3833 }
3834
3835 static unsigned
3836 radv_tex_aniso_filter(unsigned filter)
3837 {
3838 if (filter < 2)
3839 return 0;
3840 if (filter < 4)
3841 return 1;
3842 if (filter < 8)
3843 return 2;
3844 if (filter < 16)
3845 return 3;
3846 return 4;
3847 }
3848
3849 static void
3850 radv_init_sampler(struct radv_device *device,
3851 struct radv_sampler *sampler,
3852 const VkSamplerCreateInfo *pCreateInfo)
3853 {
3854 uint32_t max_aniso = pCreateInfo->anisotropyEnable && pCreateInfo->maxAnisotropy > 1.0 ?
3855 (uint32_t) pCreateInfo->maxAnisotropy : 0;
3856 uint32_t max_aniso_ratio = radv_tex_aniso_filter(max_aniso);
3857 bool is_vi = (device->physical_device->rad_info.chip_class >= VI);
3858
3859 sampler->state[0] = (S_008F30_CLAMP_X(radv_tex_wrap(pCreateInfo->addressModeU)) |
3860 S_008F30_CLAMP_Y(radv_tex_wrap(pCreateInfo->addressModeV)) |
3861 S_008F30_CLAMP_Z(radv_tex_wrap(pCreateInfo->addressModeW)) |
3862 S_008F30_MAX_ANISO_RATIO(max_aniso_ratio) |
3863 S_008F30_DEPTH_COMPARE_FUNC(radv_tex_compare(pCreateInfo->compareOp)) |
3864 S_008F30_FORCE_UNNORMALIZED(pCreateInfo->unnormalizedCoordinates ? 1 : 0) |
3865 S_008F30_ANISO_THRESHOLD(max_aniso_ratio >> 1) |
3866 S_008F30_ANISO_BIAS(max_aniso_ratio) |
3867 S_008F30_DISABLE_CUBE_WRAP(0) |
3868 S_008F30_COMPAT_MODE(is_vi));
3869 sampler->state[1] = (S_008F34_MIN_LOD(S_FIXED(CLAMP(pCreateInfo->minLod, 0, 15), 8)) |
3870 S_008F34_MAX_LOD(S_FIXED(CLAMP(pCreateInfo->maxLod, 0, 15), 8)) |
3871 S_008F34_PERF_MIP(max_aniso_ratio ? max_aniso_ratio + 6 : 0));
3872 sampler->state[2] = (S_008F38_LOD_BIAS(S_FIXED(CLAMP(pCreateInfo->mipLodBias, -16, 16), 8)) |
3873 S_008F38_XY_MAG_FILTER(radv_tex_filter(pCreateInfo->magFilter, max_aniso)) |
3874 S_008F38_XY_MIN_FILTER(radv_tex_filter(pCreateInfo->minFilter, max_aniso)) |
3875 S_008F38_MIP_FILTER(radv_tex_mipfilter(pCreateInfo->mipmapMode)) |
3876 S_008F38_MIP_POINT_PRECLAMP(0) |
3877 S_008F38_DISABLE_LSB_CEIL(device->physical_device->rad_info.chip_class <= VI) |
3878 S_008F38_FILTER_PREC_FIX(1) |
3879 S_008F38_ANISO_OVERRIDE(is_vi));
3880 sampler->state[3] = (S_008F3C_BORDER_COLOR_PTR(0) |
3881 S_008F3C_BORDER_COLOR_TYPE(radv_tex_bordercolor(pCreateInfo->borderColor)));
3882 }
3883
3884 VkResult radv_CreateSampler(
3885 VkDevice _device,
3886 const VkSamplerCreateInfo* pCreateInfo,
3887 const VkAllocationCallbacks* pAllocator,
3888 VkSampler* pSampler)
3889 {
3890 RADV_FROM_HANDLE(radv_device, device, _device);
3891 struct radv_sampler *sampler;
3892
3893 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO);
3894
3895 sampler = vk_alloc2(&device->alloc, pAllocator, sizeof(*sampler), 8,
3896 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
3897 if (!sampler)
3898 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
3899
3900 radv_init_sampler(device, sampler, pCreateInfo);
3901 *pSampler = radv_sampler_to_handle(sampler);
3902
3903 return VK_SUCCESS;
3904 }
3905
3906 void radv_DestroySampler(
3907 VkDevice _device,
3908 VkSampler _sampler,
3909 const VkAllocationCallbacks* pAllocator)
3910 {
3911 RADV_FROM_HANDLE(radv_device, device, _device);
3912 RADV_FROM_HANDLE(radv_sampler, sampler, _sampler);
3913
3914 if (!sampler)
3915 return;
3916 vk_free2(&device->alloc, pAllocator, sampler);
3917 }
3918
3919 /* vk_icd.h does not declare this function, so we declare it here to
3920 * suppress Wmissing-prototypes.
3921 */
3922 PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
3923 vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion);
3924
3925 PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
3926 vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion)
3927 {
3928 /* For the full details on loader interface versioning, see
3929 * <https://github.com/KhronosGroup/Vulkan-LoaderAndValidationLayers/blob/master/loader/LoaderAndLayerInterface.md>.
3930 * What follows is a condensed summary, to help you navigate the large and
3931 * confusing official doc.
3932 *
3933 * - Loader interface v0 is incompatible with later versions. We don't
3934 * support it.
3935 *
3936 * - In loader interface v1:
3937 * - The first ICD entrypoint called by the loader is
3938 * vk_icdGetInstanceProcAddr(). The ICD must statically expose this
3939 * entrypoint.
3940 * - The ICD must statically expose no other Vulkan symbol unless it is
3941 * linked with -Bsymbolic.
3942 * - Each dispatchable Vulkan handle created by the ICD must be
3943 * a pointer to a struct whose first member is VK_LOADER_DATA. The
3944 * ICD must initialize VK_LOADER_DATA.loadMagic to ICD_LOADER_MAGIC.
3945 * - The loader implements vkCreate{PLATFORM}SurfaceKHR() and
3946 * vkDestroySurfaceKHR(). The ICD must be capable of working with
3947 * such loader-managed surfaces.
3948 *
3949 * - Loader interface v2 differs from v1 in:
3950 * - The first ICD entrypoint called by the loader is
3951 * vk_icdNegotiateLoaderICDInterfaceVersion(). The ICD must
3952 * statically expose this entrypoint.
3953 *
3954 * - Loader interface v3 differs from v2 in:
3955 * - The ICD must implement vkCreate{PLATFORM}SurfaceKHR(),
3956 * vkDestroySurfaceKHR(), and other API which uses VKSurfaceKHR,
3957 * because the loader no longer does so.
3958 */
3959 *pSupportedVersion = MIN2(*pSupportedVersion, 3u);
3960 return VK_SUCCESS;
3961 }
3962
3963 VkResult radv_GetMemoryFdKHR(VkDevice _device,
3964 const VkMemoryGetFdInfoKHR *pGetFdInfo,
3965 int *pFD)
3966 {
3967 RADV_FROM_HANDLE(radv_device, device, _device);
3968 RADV_FROM_HANDLE(radv_device_memory, memory, pGetFdInfo->memory);
3969
3970 assert(pGetFdInfo->sType == VK_STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR);
3971
3972 /* At the moment, we support only the below handle types. */
3973 assert(pGetFdInfo->handleType ==
3974 VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR ||
3975 pGetFdInfo->handleType ==
3976 VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
3977
3978 bool ret = radv_get_memory_fd(device, memory, pFD);
3979 if (ret == false)
3980 return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
3981 return VK_SUCCESS;
3982 }
3983
3984 VkResult radv_GetMemoryFdPropertiesKHR(VkDevice _device,
3985 VkExternalMemoryHandleTypeFlagBitsKHR handleType,
3986 int fd,
3987 VkMemoryFdPropertiesKHR *pMemoryFdProperties)
3988 {
3989 switch (handleType) {
3990 case VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT:
3991 pMemoryFdProperties->memoryTypeBits = (1 << RADV_MEM_TYPE_COUNT) - 1;
3992 return VK_SUCCESS;
3993
3994 default:
3995 /* The valid usage section for this function says:
3996 *
3997 * "handleType must not be one of the handle types defined as
3998 * opaque."
3999 *
4000 * So opaque handle types fall into the default "unsupported" case.
4001 */
4002 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR);
4003 }
4004 }
4005
4006 static VkResult radv_import_opaque_fd(struct radv_device *device,
4007 int fd,
4008 uint32_t *syncobj)
4009 {
4010 uint32_t syncobj_handle = 0;
4011 int ret = device->ws->import_syncobj(device->ws, fd, &syncobj_handle);
4012 if (ret != 0)
4013 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR);
4014
4015 if (*syncobj)
4016 device->ws->destroy_syncobj(device->ws, *syncobj);
4017
4018 *syncobj = syncobj_handle;
4019 close(fd);
4020
4021 return VK_SUCCESS;
4022 }
4023
4024 static VkResult radv_import_sync_fd(struct radv_device *device,
4025 int fd,
4026 uint32_t *syncobj)
4027 {
4028 /* If we create a syncobj we do it locally so that if we have an error, we don't
4029 * leave a syncobj in an undetermined state in the fence. */
4030 uint32_t syncobj_handle = *syncobj;
4031 if (!syncobj_handle) {
4032 int ret = device->ws->create_syncobj(device->ws, &syncobj_handle);
4033 if (ret) {
4034 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR);
4035 }
4036 }
4037
4038 if (fd == -1) {
4039 device->ws->signal_syncobj(device->ws, syncobj_handle);
4040 } else {
4041 int ret = device->ws->import_syncobj_from_sync_file(device->ws, syncobj_handle, fd);
4042 if (ret != 0)
4043 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR);
4044 }
4045
4046 *syncobj = syncobj_handle;
4047 if (fd != -1)
4048 close(fd);
4049
4050 return VK_SUCCESS;
4051 }
4052
4053 VkResult radv_ImportSemaphoreFdKHR(VkDevice _device,
4054 const VkImportSemaphoreFdInfoKHR *pImportSemaphoreFdInfo)
4055 {
4056 RADV_FROM_HANDLE(radv_device, device, _device);
4057 RADV_FROM_HANDLE(radv_semaphore, sem, pImportSemaphoreFdInfo->semaphore);
4058 uint32_t *syncobj_dst = NULL;
4059
4060 if (pImportSemaphoreFdInfo->flags & VK_SEMAPHORE_IMPORT_TEMPORARY_BIT_KHR) {
4061 syncobj_dst = &sem->temp_syncobj;
4062 } else {
4063 syncobj_dst = &sem->syncobj;
4064 }
4065
4066 switch(pImportSemaphoreFdInfo->handleType) {
4067 case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR:
4068 return radv_import_opaque_fd(device, pImportSemaphoreFdInfo->fd, syncobj_dst);
4069 case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT_KHR:
4070 return radv_import_sync_fd(device, pImportSemaphoreFdInfo->fd, syncobj_dst);
4071 default:
4072 unreachable("Unhandled semaphore handle type");
4073 }
4074 }
4075
4076 VkResult radv_GetSemaphoreFdKHR(VkDevice _device,
4077 const VkSemaphoreGetFdInfoKHR *pGetFdInfo,
4078 int *pFd)
4079 {
4080 RADV_FROM_HANDLE(radv_device, device, _device);
4081 RADV_FROM_HANDLE(radv_semaphore, sem, pGetFdInfo->semaphore);
4082 int ret;
4083 uint32_t syncobj_handle;
4084
4085 if (sem->temp_syncobj)
4086 syncobj_handle = sem->temp_syncobj;
4087 else
4088 syncobj_handle = sem->syncobj;
4089
4090 switch(pGetFdInfo->handleType) {
4091 case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR:
4092 ret = device->ws->export_syncobj(device->ws, syncobj_handle, pFd);
4093 break;
4094 case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT_KHR:
4095 ret = device->ws->export_syncobj_to_sync_file(device->ws, syncobj_handle, pFd);
4096 if (!ret) {
4097 if (sem->temp_syncobj) {
4098 close (sem->temp_syncobj);
4099 sem->temp_syncobj = 0;
4100 } else {
4101 device->ws->reset_syncobj(device->ws, syncobj_handle);
4102 }
4103 }
4104 break;
4105 default:
4106 unreachable("Unhandled semaphore handle type");
4107 }
4108
4109 if (ret)
4110 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR);
4111 return VK_SUCCESS;
4112 }
4113
4114 void radv_GetPhysicalDeviceExternalSemaphoreProperties(
4115 VkPhysicalDevice physicalDevice,
4116 const VkPhysicalDeviceExternalSemaphoreInfoKHR* pExternalSemaphoreInfo,
4117 VkExternalSemaphorePropertiesKHR* pExternalSemaphoreProperties)
4118 {
4119 RADV_FROM_HANDLE(radv_physical_device, pdevice, physicalDevice);
4120
4121 /* Require has_syncobj_wait_for_submit for the syncobj signal ioctl introduced at virtually the same time */
4122 if (pdevice->rad_info.has_syncobj_wait_for_submit &&
4123 (pExternalSemaphoreInfo->handleType == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR ||
4124 pExternalSemaphoreInfo->handleType == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT_KHR)) {
4125 pExternalSemaphoreProperties->exportFromImportedHandleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR | VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT_KHR;
4126 pExternalSemaphoreProperties->compatibleHandleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR | VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT_KHR;
4127 pExternalSemaphoreProperties->externalSemaphoreFeatures = VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT_KHR |
4128 VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT_KHR;
4129 } else if (pExternalSemaphoreInfo->handleType == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR) {
4130 pExternalSemaphoreProperties->exportFromImportedHandleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR;
4131 pExternalSemaphoreProperties->compatibleHandleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR;
4132 pExternalSemaphoreProperties->externalSemaphoreFeatures = VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT_KHR |
4133 VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT_KHR;
4134 } else {
4135 pExternalSemaphoreProperties->exportFromImportedHandleTypes = 0;
4136 pExternalSemaphoreProperties->compatibleHandleTypes = 0;
4137 pExternalSemaphoreProperties->externalSemaphoreFeatures = 0;
4138 }
4139 }
4140
4141 VkResult radv_ImportFenceFdKHR(VkDevice _device,
4142 const VkImportFenceFdInfoKHR *pImportFenceFdInfo)
4143 {
4144 RADV_FROM_HANDLE(radv_device, device, _device);
4145 RADV_FROM_HANDLE(radv_fence, fence, pImportFenceFdInfo->fence);
4146 uint32_t *syncobj_dst = NULL;
4147
4148
4149 if (pImportFenceFdInfo->flags & VK_FENCE_IMPORT_TEMPORARY_BIT_KHR) {
4150 syncobj_dst = &fence->temp_syncobj;
4151 } else {
4152 syncobj_dst = &fence->syncobj;
4153 }
4154
4155 switch(pImportFenceFdInfo->handleType) {
4156 case VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR:
4157 return radv_import_opaque_fd(device, pImportFenceFdInfo->fd, syncobj_dst);
4158 case VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT_KHR:
4159 return radv_import_sync_fd(device, pImportFenceFdInfo->fd, syncobj_dst);
4160 default:
4161 unreachable("Unhandled fence handle type");
4162 }
4163 }
4164
4165 VkResult radv_GetFenceFdKHR(VkDevice _device,
4166 const VkFenceGetFdInfoKHR *pGetFdInfo,
4167 int *pFd)
4168 {
4169 RADV_FROM_HANDLE(radv_device, device, _device);
4170 RADV_FROM_HANDLE(radv_fence, fence, pGetFdInfo->fence);
4171 int ret;
4172 uint32_t syncobj_handle;
4173
4174 if (fence->temp_syncobj)
4175 syncobj_handle = fence->temp_syncobj;
4176 else
4177 syncobj_handle = fence->syncobj;
4178
4179 switch(pGetFdInfo->handleType) {
4180 case VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR:
4181 ret = device->ws->export_syncobj(device->ws, syncobj_handle, pFd);
4182 break;
4183 case VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT_KHR:
4184 ret = device->ws->export_syncobj_to_sync_file(device->ws, syncobj_handle, pFd);
4185 if (!ret) {
4186 if (fence->temp_syncobj) {
4187 close (fence->temp_syncobj);
4188 fence->temp_syncobj = 0;
4189 } else {
4190 device->ws->reset_syncobj(device->ws, syncobj_handle);
4191 }
4192 }
4193 break;
4194 default:
4195 unreachable("Unhandled fence handle type");
4196 }
4197
4198 if (ret)
4199 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR);
4200 return VK_SUCCESS;
4201 }
4202
4203 void radv_GetPhysicalDeviceExternalFenceProperties(
4204 VkPhysicalDevice physicalDevice,
4205 const VkPhysicalDeviceExternalFenceInfoKHR* pExternalFenceInfo,
4206 VkExternalFencePropertiesKHR* pExternalFenceProperties)
4207 {
4208 RADV_FROM_HANDLE(radv_physical_device, pdevice, physicalDevice);
4209
4210 if (pdevice->rad_info.has_syncobj_wait_for_submit &&
4211 (pExternalFenceInfo->handleType == VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR ||
4212 pExternalFenceInfo->handleType == VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT_KHR)) {
4213 pExternalFenceProperties->exportFromImportedHandleTypes = VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR | VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT_KHR;
4214 pExternalFenceProperties->compatibleHandleTypes = VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR | VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT_KHR;
4215 pExternalFenceProperties->externalFenceFeatures = VK_EXTERNAL_FENCE_FEATURE_EXPORTABLE_BIT_KHR |
4216 VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT_KHR;
4217 } else {
4218 pExternalFenceProperties->exportFromImportedHandleTypes = 0;
4219 pExternalFenceProperties->compatibleHandleTypes = 0;
4220 pExternalFenceProperties->externalFenceFeatures = 0;
4221 }
4222 }
4223
4224 VkResult
4225 radv_CreateDebugReportCallbackEXT(VkInstance _instance,
4226 const VkDebugReportCallbackCreateInfoEXT* pCreateInfo,
4227 const VkAllocationCallbacks* pAllocator,
4228 VkDebugReportCallbackEXT* pCallback)
4229 {
4230 RADV_FROM_HANDLE(radv_instance, instance, _instance);
4231 return vk_create_debug_report_callback(&instance->debug_report_callbacks,
4232 pCreateInfo, pAllocator, &instance->alloc,
4233 pCallback);
4234 }
4235
4236 void
4237 radv_DestroyDebugReportCallbackEXT(VkInstance _instance,
4238 VkDebugReportCallbackEXT _callback,
4239 const VkAllocationCallbacks* pAllocator)
4240 {
4241 RADV_FROM_HANDLE(radv_instance, instance, _instance);
4242 vk_destroy_debug_report_callback(&instance->debug_report_callbacks,
4243 _callback, pAllocator, &instance->alloc);
4244 }
4245
4246 void
4247 radv_DebugReportMessageEXT(VkInstance _instance,
4248 VkDebugReportFlagsEXT flags,
4249 VkDebugReportObjectTypeEXT objectType,
4250 uint64_t object,
4251 size_t location,
4252 int32_t messageCode,
4253 const char* pLayerPrefix,
4254 const char* pMessage)
4255 {
4256 RADV_FROM_HANDLE(radv_instance, instance, _instance);
4257 vk_debug_report(&instance->debug_report_callbacks, flags, objectType,
4258 object, location, messageCode, pLayerPrefix, pMessage);
4259 }