586a6e6b7cd9dd61851e2b3443f949e719e88cc4
[mesa.git] / src / amd / vulkan / radv_device.c
1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 */
27
28 #include <stdbool.h>
29 #include <string.h>
30 #include <unistd.h>
31 #include <fcntl.h>
32 #include "radv_debug.h"
33 #include "radv_private.h"
34 #include "radv_shader.h"
35 #include "radv_cs.h"
36 #include "util/disk_cache.h"
37 #include "util/strtod.h"
38 #include "vk_util.h"
39 #include <xf86drm.h>
40 #include <amdgpu.h>
41 #include <amdgpu_drm.h>
42 #include "winsys/amdgpu/radv_amdgpu_winsys_public.h"
43 #include "ac_llvm_util.h"
44 #include "vk_format.h"
45 #include "sid.h"
46 #include "gfx9d.h"
47 #include "util/debug.h"
48
49 static int
50 radv_device_get_cache_uuid(enum radeon_family family, void *uuid)
51 {
52 uint32_t mesa_timestamp, llvm_timestamp;
53 uint16_t f = family;
54 memset(uuid, 0, VK_UUID_SIZE);
55 if (!disk_cache_get_function_timestamp(radv_device_get_cache_uuid, &mesa_timestamp) ||
56 !disk_cache_get_function_timestamp(LLVMInitializeAMDGPUTargetInfo, &llvm_timestamp))
57 return -1;
58
59 memcpy(uuid, &mesa_timestamp, 4);
60 memcpy((char*)uuid + 4, &llvm_timestamp, 4);
61 memcpy((char*)uuid + 8, &f, 2);
62 snprintf((char*)uuid + 10, VK_UUID_SIZE - 10, "radv");
63 return 0;
64 }
65
66 static void
67 radv_get_driver_uuid(void *uuid)
68 {
69 ac_compute_driver_uuid(uuid, VK_UUID_SIZE);
70 }
71
72 static void
73 radv_get_device_uuid(struct radeon_info *info, void *uuid)
74 {
75 ac_compute_device_uuid(info, uuid, VK_UUID_SIZE);
76 }
77
78 static void
79 radv_get_device_name(enum radeon_family family, char *name, size_t name_len)
80 {
81 const char *chip_string;
82 char llvm_string[32] = {};
83
84 switch (family) {
85 case CHIP_TAHITI: chip_string = "AMD RADV TAHITI"; break;
86 case CHIP_PITCAIRN: chip_string = "AMD RADV PITCAIRN"; break;
87 case CHIP_VERDE: chip_string = "AMD RADV CAPE VERDE"; break;
88 case CHIP_OLAND: chip_string = "AMD RADV OLAND"; break;
89 case CHIP_HAINAN: chip_string = "AMD RADV HAINAN"; break;
90 case CHIP_BONAIRE: chip_string = "AMD RADV BONAIRE"; break;
91 case CHIP_KAVERI: chip_string = "AMD RADV KAVERI"; break;
92 case CHIP_KABINI: chip_string = "AMD RADV KABINI"; break;
93 case CHIP_HAWAII: chip_string = "AMD RADV HAWAII"; break;
94 case CHIP_MULLINS: chip_string = "AMD RADV MULLINS"; break;
95 case CHIP_TONGA: chip_string = "AMD RADV TONGA"; break;
96 case CHIP_ICELAND: chip_string = "AMD RADV ICELAND"; break;
97 case CHIP_CARRIZO: chip_string = "AMD RADV CARRIZO"; break;
98 case CHIP_FIJI: chip_string = "AMD RADV FIJI"; break;
99 case CHIP_POLARIS10: chip_string = "AMD RADV POLARIS10"; break;
100 case CHIP_POLARIS11: chip_string = "AMD RADV POLARIS11"; break;
101 case CHIP_POLARIS12: chip_string = "AMD RADV POLARIS12"; break;
102 case CHIP_STONEY: chip_string = "AMD RADV STONEY"; break;
103 case CHIP_VEGA10: chip_string = "AMD RADV VEGA"; break;
104 case CHIP_RAVEN: chip_string = "AMD RADV RAVEN"; break;
105 default: chip_string = "AMD RADV unknown"; break;
106 }
107
108 if (HAVE_LLVM > 0) {
109 snprintf(llvm_string, sizeof(llvm_string),
110 " (LLVM %i.%i.%i)", (HAVE_LLVM >> 8) & 0xff,
111 HAVE_LLVM & 0xff, MESA_LLVM_VERSION_PATCH);
112 }
113
114 snprintf(name, name_len, "%s%s", chip_string, llvm_string);
115 }
116
117 static void
118 radv_physical_device_init_mem_types(struct radv_physical_device *device)
119 {
120 STATIC_ASSERT(RADV_MEM_HEAP_COUNT <= VK_MAX_MEMORY_HEAPS);
121 uint64_t visible_vram_size = MIN2(device->rad_info.vram_size,
122 device->rad_info.vram_vis_size);
123
124 int vram_index = -1, visible_vram_index = -1, gart_index = -1;
125 device->memory_properties.memoryHeapCount = 0;
126 if (device->rad_info.vram_size - visible_vram_size > 0) {
127 vram_index = device->memory_properties.memoryHeapCount++;
128 device->memory_properties.memoryHeaps[vram_index] = (VkMemoryHeap) {
129 .size = device->rad_info.vram_size - visible_vram_size,
130 .flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT,
131 };
132 }
133 if (visible_vram_size) {
134 visible_vram_index = device->memory_properties.memoryHeapCount++;
135 device->memory_properties.memoryHeaps[visible_vram_index] = (VkMemoryHeap) {
136 .size = visible_vram_size,
137 .flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT,
138 };
139 }
140 if (device->rad_info.gart_size > 0) {
141 gart_index = device->memory_properties.memoryHeapCount++;
142 device->memory_properties.memoryHeaps[gart_index] = (VkMemoryHeap) {
143 .size = device->rad_info.gart_size,
144 .flags = 0,
145 };
146 }
147
148 STATIC_ASSERT(RADV_MEM_TYPE_COUNT <= VK_MAX_MEMORY_TYPES);
149 unsigned type_count = 0;
150 if (vram_index >= 0) {
151 device->mem_type_indices[type_count] = RADV_MEM_TYPE_VRAM;
152 device->memory_properties.memoryTypes[type_count++] = (VkMemoryType) {
153 .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT,
154 .heapIndex = vram_index,
155 };
156 }
157 if (gart_index >= 0) {
158 device->mem_type_indices[type_count] = RADV_MEM_TYPE_GTT_WRITE_COMBINE;
159 device->memory_properties.memoryTypes[type_count++] = (VkMemoryType) {
160 .propertyFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
161 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
162 .heapIndex = gart_index,
163 };
164 }
165 if (visible_vram_index >= 0) {
166 device->mem_type_indices[type_count] = RADV_MEM_TYPE_VRAM_CPU_ACCESS;
167 device->memory_properties.memoryTypes[type_count++] = (VkMemoryType) {
168 .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
169 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
170 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
171 .heapIndex = visible_vram_index,
172 };
173 }
174 if (gart_index >= 0) {
175 device->mem_type_indices[type_count] = RADV_MEM_TYPE_GTT_CACHED;
176 device->memory_properties.memoryTypes[type_count++] = (VkMemoryType) {
177 .propertyFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
178 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT |
179 VK_MEMORY_PROPERTY_HOST_CACHED_BIT,
180 .heapIndex = gart_index,
181 };
182 }
183 device->memory_properties.memoryTypeCount = type_count;
184 }
185
186 static void
187 radv_handle_env_var_force_family(struct radv_physical_device *device)
188 {
189 const char *family = getenv("RADV_FORCE_FAMILY");
190 unsigned i;
191
192 if (!family)
193 return;
194
195 for (i = CHIP_TAHITI; i < CHIP_LAST; i++) {
196 if (!strcmp(family, ac_get_llvm_processor_name(i))) {
197 /* Override family and chip_class. */
198 device->rad_info.family = i;
199
200 if (i >= CHIP_VEGA10)
201 device->rad_info.chip_class = GFX9;
202 else if (i >= CHIP_TONGA)
203 device->rad_info.chip_class = VI;
204 else if (i >= CHIP_BONAIRE)
205 device->rad_info.chip_class = CIK;
206 else
207 device->rad_info.chip_class = SI;
208
209 return;
210 }
211 }
212
213 fprintf(stderr, "radv: Unknown family: %s\n", family);
214 exit(1);
215 }
216
217 static VkResult
218 radv_physical_device_init(struct radv_physical_device *device,
219 struct radv_instance *instance,
220 drmDevicePtr drm_device)
221 {
222 const char *path = drm_device->nodes[DRM_NODE_RENDER];
223 VkResult result;
224 drmVersionPtr version;
225 int fd;
226
227 fd = open(path, O_RDWR | O_CLOEXEC);
228 if (fd < 0)
229 return vk_error(VK_ERROR_INCOMPATIBLE_DRIVER);
230
231 version = drmGetVersion(fd);
232 if (!version) {
233 close(fd);
234 return vk_errorf(VK_ERROR_INCOMPATIBLE_DRIVER,
235 "failed to get version %s: %m", path);
236 }
237
238 if (strcmp(version->name, "amdgpu")) {
239 drmFreeVersion(version);
240 close(fd);
241 return VK_ERROR_INCOMPATIBLE_DRIVER;
242 }
243 drmFreeVersion(version);
244
245 device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
246 device->instance = instance;
247 assert(strlen(path) < ARRAY_SIZE(device->path));
248 strncpy(device->path, path, ARRAY_SIZE(device->path));
249
250 device->ws = radv_amdgpu_winsys_create(fd, instance->debug_flags,
251 instance->perftest_flags);
252 if (!device->ws) {
253 result = VK_ERROR_INCOMPATIBLE_DRIVER;
254 goto fail;
255 }
256
257 device->local_fd = fd;
258 device->ws->query_info(device->ws, &device->rad_info);
259
260 radv_handle_env_var_force_family(device);
261
262 radv_get_device_name(device->rad_info.family, device->name, sizeof(device->name));
263
264 if (radv_device_get_cache_uuid(device->rad_info.family, device->cache_uuid)) {
265 device->ws->destroy(device->ws);
266 result = vk_errorf(VK_ERROR_INITIALIZATION_FAILED,
267 "cannot generate UUID");
268 goto fail;
269 }
270
271 /* These flags affect shader compilation. */
272 uint64_t shader_env_flags =
273 (device->instance->perftest_flags & RADV_PERFTEST_SISCHED ? 0x1 : 0) |
274 (device->instance->debug_flags & RADV_DEBUG_UNSAFE_MATH ? 0x2 : 0);
275
276 /* The gpu id is already embeded in the uuid so we just pass "radv"
277 * when creating the cache.
278 */
279 char buf[VK_UUID_SIZE * 2 + 1];
280 disk_cache_format_hex_id(buf, device->cache_uuid, VK_UUID_SIZE * 2);
281 device->disk_cache = disk_cache_create(device->name, buf, shader_env_flags);
282
283 fprintf(stderr, "WARNING: radv is not a conformant vulkan implementation, testing use only.\n");
284
285 radv_get_driver_uuid(&device->device_uuid);
286 radv_get_device_uuid(&device->rad_info, &device->device_uuid);
287
288 if (device->rad_info.family == CHIP_STONEY ||
289 device->rad_info.chip_class >= GFX9) {
290 device->has_rbplus = true;
291 device->rbplus_allowed = device->rad_info.family == CHIP_STONEY;
292 }
293
294 /* The mere presense of CLEAR_STATE in the IB causes random GPU hangs
295 * on SI.
296 */
297 device->has_clear_state = device->rad_info.chip_class >= CIK;
298
299 device->cpdma_prefetch_writes_memory = device->rad_info.chip_class <= VI;
300
301 /* Vega10/Raven need a special workaround for a hardware bug. */
302 device->has_scissor_bug = device->rad_info.family == CHIP_VEGA10 ||
303 device->rad_info.family == CHIP_RAVEN;
304
305 radv_physical_device_init_mem_types(device);
306 radv_fill_device_extension_table(device, &device->supported_extensions);
307
308 result = radv_init_wsi(device);
309 if (result != VK_SUCCESS) {
310 device->ws->destroy(device->ws);
311 goto fail;
312 }
313
314 return VK_SUCCESS;
315
316 fail:
317 close(fd);
318 return result;
319 }
320
321 static void
322 radv_physical_device_finish(struct radv_physical_device *device)
323 {
324 radv_finish_wsi(device);
325 device->ws->destroy(device->ws);
326 disk_cache_destroy(device->disk_cache);
327 close(device->local_fd);
328 }
329
330 static void *
331 default_alloc_func(void *pUserData, size_t size, size_t align,
332 VkSystemAllocationScope allocationScope)
333 {
334 return malloc(size);
335 }
336
337 static void *
338 default_realloc_func(void *pUserData, void *pOriginal, size_t size,
339 size_t align, VkSystemAllocationScope allocationScope)
340 {
341 return realloc(pOriginal, size);
342 }
343
344 static void
345 default_free_func(void *pUserData, void *pMemory)
346 {
347 free(pMemory);
348 }
349
350 static const VkAllocationCallbacks default_alloc = {
351 .pUserData = NULL,
352 .pfnAllocation = default_alloc_func,
353 .pfnReallocation = default_realloc_func,
354 .pfnFree = default_free_func,
355 };
356
357 static const struct debug_control radv_debug_options[] = {
358 {"nofastclears", RADV_DEBUG_NO_FAST_CLEARS},
359 {"nodcc", RADV_DEBUG_NO_DCC},
360 {"shaders", RADV_DEBUG_DUMP_SHADERS},
361 {"nocache", RADV_DEBUG_NO_CACHE},
362 {"shaderstats", RADV_DEBUG_DUMP_SHADER_STATS},
363 {"nohiz", RADV_DEBUG_NO_HIZ},
364 {"nocompute", RADV_DEBUG_NO_COMPUTE_QUEUE},
365 {"unsafemath", RADV_DEBUG_UNSAFE_MATH},
366 {"allbos", RADV_DEBUG_ALL_BOS},
367 {"noibs", RADV_DEBUG_NO_IBS},
368 {"spirv", RADV_DEBUG_DUMP_SPIRV},
369 {"vmfaults", RADV_DEBUG_VM_FAULTS},
370 {"zerovram", RADV_DEBUG_ZERO_VRAM},
371 {"syncshaders", RADV_DEBUG_SYNC_SHADERS},
372 {"nosisched", RADV_DEBUG_NO_SISCHED},
373 {"preoptir", RADV_DEBUG_PREOPTIR},
374 {NULL, 0}
375 };
376
377 const char *
378 radv_get_debug_option_name(int id)
379 {
380 assert(id < ARRAY_SIZE(radv_debug_options) - 1);
381 return radv_debug_options[id].string;
382 }
383
384 static const struct debug_control radv_perftest_options[] = {
385 {"nobatchchain", RADV_PERFTEST_NO_BATCHCHAIN},
386 {"sisched", RADV_PERFTEST_SISCHED},
387 {"localbos", RADV_PERFTEST_LOCAL_BOS},
388 {"binning", RADV_PERFTEST_BINNING},
389 {NULL, 0}
390 };
391
392 const char *
393 radv_get_perftest_option_name(int id)
394 {
395 assert(id < ARRAY_SIZE(radv_debug_options) - 1);
396 return radv_perftest_options[id].string;
397 }
398
399 static void
400 radv_handle_per_app_options(struct radv_instance *instance,
401 const VkApplicationInfo *info)
402 {
403 const char *name = info ? info->pApplicationName : NULL;
404
405 if (!name)
406 return;
407
408 if (!strcmp(name, "Talos - Linux - 32bit") ||
409 !strcmp(name, "Talos - Linux - 64bit")) {
410 /* Force enable LLVM sisched for Talos because it looks safe
411 * and it gives few more FPS.
412 */
413 instance->perftest_flags |= RADV_PERFTEST_SISCHED;
414 }
415 }
416
417 static int radv_get_instance_extension_index(const char *name)
418 {
419 for (unsigned i = 0; i < RADV_INSTANCE_EXTENSION_COUNT; ++i) {
420 if (strcmp(name, radv_instance_extensions[i].extensionName) == 0)
421 return i;
422 }
423 return -1;
424 }
425
426
427 VkResult radv_CreateInstance(
428 const VkInstanceCreateInfo* pCreateInfo,
429 const VkAllocationCallbacks* pAllocator,
430 VkInstance* pInstance)
431 {
432 struct radv_instance *instance;
433 VkResult result;
434
435 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO);
436
437 uint32_t client_version;
438 if (pCreateInfo->pApplicationInfo &&
439 pCreateInfo->pApplicationInfo->apiVersion != 0) {
440 client_version = pCreateInfo->pApplicationInfo->apiVersion;
441 } else {
442 client_version = VK_MAKE_VERSION(1, 0, 0);
443 }
444
445 if (VK_MAKE_VERSION(1, 0, 0) > client_version ||
446 client_version > VK_MAKE_VERSION(1, 1, 0xfff)) {
447 return vk_errorf(VK_ERROR_INCOMPATIBLE_DRIVER,
448 "Client requested version %d.%d.%d",
449 VK_VERSION_MAJOR(client_version),
450 VK_VERSION_MINOR(client_version),
451 VK_VERSION_PATCH(client_version));
452 }
453
454 instance = vk_zalloc2(&default_alloc, pAllocator, sizeof(*instance), 8,
455 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
456 if (!instance)
457 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
458
459 instance->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
460
461 if (pAllocator)
462 instance->alloc = *pAllocator;
463 else
464 instance->alloc = default_alloc;
465
466 instance->apiVersion = client_version;
467 instance->physicalDeviceCount = -1;
468
469 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
470 const char *ext_name = pCreateInfo->ppEnabledExtensionNames[i];
471 int index = radv_get_instance_extension_index(ext_name);
472
473 if (index < 0 || !radv_supported_instance_extensions.extensions[index]) {
474 vk_free2(&default_alloc, pAllocator, instance);
475 return vk_error(VK_ERROR_EXTENSION_NOT_PRESENT);
476 }
477
478 instance->enabled_extensions.extensions[index] = true;
479 }
480
481 result = vk_debug_report_instance_init(&instance->debug_report_callbacks);
482 if (result != VK_SUCCESS) {
483 vk_free2(&default_alloc, pAllocator, instance);
484 return vk_error(result);
485 }
486
487 _mesa_locale_init();
488
489 VG(VALGRIND_CREATE_MEMPOOL(instance, 0, false));
490
491 instance->debug_flags = parse_debug_string(getenv("RADV_DEBUG"),
492 radv_debug_options);
493
494 instance->perftest_flags = parse_debug_string(getenv("RADV_PERFTEST"),
495 radv_perftest_options);
496
497 radv_handle_per_app_options(instance, pCreateInfo->pApplicationInfo);
498
499 if (instance->debug_flags & RADV_DEBUG_NO_SISCHED) {
500 /* Disable sisched when the user requests it, this is mostly
501 * useful when the driver force-enable sisched for the given
502 * application.
503 */
504 instance->perftest_flags &= ~RADV_PERFTEST_SISCHED;
505 }
506
507 *pInstance = radv_instance_to_handle(instance);
508
509 return VK_SUCCESS;
510 }
511
512 void radv_DestroyInstance(
513 VkInstance _instance,
514 const VkAllocationCallbacks* pAllocator)
515 {
516 RADV_FROM_HANDLE(radv_instance, instance, _instance);
517
518 if (!instance)
519 return;
520
521 for (int i = 0; i < instance->physicalDeviceCount; ++i) {
522 radv_physical_device_finish(instance->physicalDevices + i);
523 }
524
525 VG(VALGRIND_DESTROY_MEMPOOL(instance));
526
527 _mesa_locale_fini();
528
529 vk_debug_report_instance_destroy(&instance->debug_report_callbacks);
530
531 vk_free(&instance->alloc, instance);
532 }
533
534 static VkResult
535 radv_enumerate_devices(struct radv_instance *instance)
536 {
537 /* TODO: Check for more devices ? */
538 drmDevicePtr devices[8];
539 VkResult result = VK_ERROR_INCOMPATIBLE_DRIVER;
540 int max_devices;
541
542 instance->physicalDeviceCount = 0;
543
544 max_devices = drmGetDevices2(0, devices, ARRAY_SIZE(devices));
545 if (max_devices < 1)
546 return vk_error(VK_ERROR_INCOMPATIBLE_DRIVER);
547
548 for (unsigned i = 0; i < (unsigned)max_devices; i++) {
549 if (devices[i]->available_nodes & 1 << DRM_NODE_RENDER &&
550 devices[i]->bustype == DRM_BUS_PCI &&
551 devices[i]->deviceinfo.pci->vendor_id == ATI_VENDOR_ID) {
552
553 result = radv_physical_device_init(instance->physicalDevices +
554 instance->physicalDeviceCount,
555 instance,
556 devices[i]);
557 if (result == VK_SUCCESS)
558 ++instance->physicalDeviceCount;
559 else if (result != VK_ERROR_INCOMPATIBLE_DRIVER)
560 break;
561 }
562 }
563 drmFreeDevices(devices, max_devices);
564
565 return result;
566 }
567
568 VkResult radv_EnumeratePhysicalDevices(
569 VkInstance _instance,
570 uint32_t* pPhysicalDeviceCount,
571 VkPhysicalDevice* pPhysicalDevices)
572 {
573 RADV_FROM_HANDLE(radv_instance, instance, _instance);
574 VkResult result;
575
576 if (instance->physicalDeviceCount < 0) {
577 result = radv_enumerate_devices(instance);
578 if (result != VK_SUCCESS &&
579 result != VK_ERROR_INCOMPATIBLE_DRIVER)
580 return result;
581 }
582
583 if (!pPhysicalDevices) {
584 *pPhysicalDeviceCount = instance->physicalDeviceCount;
585 } else {
586 *pPhysicalDeviceCount = MIN2(*pPhysicalDeviceCount, instance->physicalDeviceCount);
587 for (unsigned i = 0; i < *pPhysicalDeviceCount; ++i)
588 pPhysicalDevices[i] = radv_physical_device_to_handle(instance->physicalDevices + i);
589 }
590
591 return *pPhysicalDeviceCount < instance->physicalDeviceCount ? VK_INCOMPLETE
592 : VK_SUCCESS;
593 }
594
595 void radv_GetPhysicalDeviceFeatures(
596 VkPhysicalDevice physicalDevice,
597 VkPhysicalDeviceFeatures* pFeatures)
598 {
599 memset(pFeatures, 0, sizeof(*pFeatures));
600
601 *pFeatures = (VkPhysicalDeviceFeatures) {
602 .robustBufferAccess = true,
603 .fullDrawIndexUint32 = true,
604 .imageCubeArray = true,
605 .independentBlend = true,
606 .geometryShader = true,
607 .tessellationShader = true,
608 .sampleRateShading = true,
609 .dualSrcBlend = true,
610 .logicOp = true,
611 .multiDrawIndirect = true,
612 .drawIndirectFirstInstance = true,
613 .depthClamp = true,
614 .depthBiasClamp = true,
615 .fillModeNonSolid = true,
616 .depthBounds = true,
617 .wideLines = true,
618 .largePoints = true,
619 .alphaToOne = true,
620 .multiViewport = true,
621 .samplerAnisotropy = true,
622 .textureCompressionETC2 = false,
623 .textureCompressionASTC_LDR = false,
624 .textureCompressionBC = true,
625 .occlusionQueryPrecise = true,
626 .pipelineStatisticsQuery = true,
627 .vertexPipelineStoresAndAtomics = true,
628 .fragmentStoresAndAtomics = true,
629 .shaderTessellationAndGeometryPointSize = true,
630 .shaderImageGatherExtended = true,
631 .shaderStorageImageExtendedFormats = true,
632 .shaderStorageImageMultisample = false,
633 .shaderUniformBufferArrayDynamicIndexing = true,
634 .shaderSampledImageArrayDynamicIndexing = true,
635 .shaderStorageBufferArrayDynamicIndexing = true,
636 .shaderStorageImageArrayDynamicIndexing = true,
637 .shaderStorageImageReadWithoutFormat = true,
638 .shaderStorageImageWriteWithoutFormat = true,
639 .shaderClipDistance = true,
640 .shaderCullDistance = true,
641 .shaderFloat64 = true,
642 .shaderInt64 = true,
643 .shaderInt16 = false,
644 .sparseBinding = true,
645 .variableMultisampleRate = true,
646 .inheritedQueries = true,
647 };
648 }
649
650 void radv_GetPhysicalDeviceFeatures2(
651 VkPhysicalDevice physicalDevice,
652 VkPhysicalDeviceFeatures2KHR *pFeatures)
653 {
654 vk_foreach_struct(ext, pFeatures->pNext) {
655 switch (ext->sType) {
656 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTER_FEATURES_KHR: {
657 VkPhysicalDeviceVariablePointerFeaturesKHR *features = (void *)ext;
658 features->variablePointersStorageBuffer = true;
659 features->variablePointers = false;
660 break;
661 }
662 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES_KHR: {
663 VkPhysicalDeviceMultiviewFeaturesKHR *features = (VkPhysicalDeviceMultiviewFeaturesKHR*)ext;
664 features->multiview = true;
665 features->multiviewGeometryShader = true;
666 features->multiviewTessellationShader = true;
667 break;
668 }
669 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETER_FEATURES: {
670 VkPhysicalDeviceShaderDrawParameterFeatures *features =
671 (VkPhysicalDeviceShaderDrawParameterFeatures*)ext;
672 features->shaderDrawParameters = true;
673 break;
674 }
675 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES: {
676 VkPhysicalDeviceProtectedMemoryFeatures *features =
677 (VkPhysicalDeviceProtectedMemoryFeatures*)ext;
678 features->protectedMemory = false;
679 break;
680 }
681 default:
682 break;
683 }
684 }
685 return radv_GetPhysicalDeviceFeatures(physicalDevice, &pFeatures->features);
686 }
687
688 void radv_GetPhysicalDeviceProperties(
689 VkPhysicalDevice physicalDevice,
690 VkPhysicalDeviceProperties* pProperties)
691 {
692 RADV_FROM_HANDLE(radv_physical_device, pdevice, physicalDevice);
693 VkSampleCountFlags sample_counts = 0xf;
694
695 /* make sure that the entire descriptor set is addressable with a signed
696 * 32-bit int. So the sum of all limits scaled by descriptor size has to
697 * be at most 2 GiB. the combined image & samples object count as one of
698 * both. This limit is for the pipeline layout, not for the set layout, but
699 * there is no set limit, so we just set a pipeline limit. I don't think
700 * any app is going to hit this soon. */
701 size_t max_descriptor_set_size = ((1ull << 31) - 16 * MAX_DYNAMIC_BUFFERS) /
702 (32 /* uniform buffer, 32 due to potential space wasted on alignment */ +
703 32 /* storage buffer, 32 due to potential space wasted on alignment */ +
704 32 /* sampler, largest when combined with image */ +
705 64 /* sampled image */ +
706 64 /* storage image */);
707
708 VkPhysicalDeviceLimits limits = {
709 .maxImageDimension1D = (1 << 14),
710 .maxImageDimension2D = (1 << 14),
711 .maxImageDimension3D = (1 << 11),
712 .maxImageDimensionCube = (1 << 14),
713 .maxImageArrayLayers = (1 << 11),
714 .maxTexelBufferElements = 128 * 1024 * 1024,
715 .maxUniformBufferRange = UINT32_MAX,
716 .maxStorageBufferRange = UINT32_MAX,
717 .maxPushConstantsSize = MAX_PUSH_CONSTANTS_SIZE,
718 .maxMemoryAllocationCount = UINT32_MAX,
719 .maxSamplerAllocationCount = 64 * 1024,
720 .bufferImageGranularity = 64, /* A cache line */
721 .sparseAddressSpaceSize = 0xffffffffu, /* buffer max size */
722 .maxBoundDescriptorSets = MAX_SETS,
723 .maxPerStageDescriptorSamplers = max_descriptor_set_size,
724 .maxPerStageDescriptorUniformBuffers = max_descriptor_set_size,
725 .maxPerStageDescriptorStorageBuffers = max_descriptor_set_size,
726 .maxPerStageDescriptorSampledImages = max_descriptor_set_size,
727 .maxPerStageDescriptorStorageImages = max_descriptor_set_size,
728 .maxPerStageDescriptorInputAttachments = max_descriptor_set_size,
729 .maxPerStageResources = max_descriptor_set_size,
730 .maxDescriptorSetSamplers = max_descriptor_set_size,
731 .maxDescriptorSetUniformBuffers = max_descriptor_set_size,
732 .maxDescriptorSetUniformBuffersDynamic = MAX_DYNAMIC_BUFFERS / 2,
733 .maxDescriptorSetStorageBuffers = max_descriptor_set_size,
734 .maxDescriptorSetStorageBuffersDynamic = MAX_DYNAMIC_BUFFERS / 2,
735 .maxDescriptorSetSampledImages = max_descriptor_set_size,
736 .maxDescriptorSetStorageImages = max_descriptor_set_size,
737 .maxDescriptorSetInputAttachments = max_descriptor_set_size,
738 .maxVertexInputAttributes = 32,
739 .maxVertexInputBindings = 32,
740 .maxVertexInputAttributeOffset = 2047,
741 .maxVertexInputBindingStride = 2048,
742 .maxVertexOutputComponents = 128,
743 .maxTessellationGenerationLevel = 64,
744 .maxTessellationPatchSize = 32,
745 .maxTessellationControlPerVertexInputComponents = 128,
746 .maxTessellationControlPerVertexOutputComponents = 128,
747 .maxTessellationControlPerPatchOutputComponents = 120,
748 .maxTessellationControlTotalOutputComponents = 4096,
749 .maxTessellationEvaluationInputComponents = 128,
750 .maxTessellationEvaluationOutputComponents = 128,
751 .maxGeometryShaderInvocations = 127,
752 .maxGeometryInputComponents = 64,
753 .maxGeometryOutputComponents = 128,
754 .maxGeometryOutputVertices = 256,
755 .maxGeometryTotalOutputComponents = 1024,
756 .maxFragmentInputComponents = 128,
757 .maxFragmentOutputAttachments = 8,
758 .maxFragmentDualSrcAttachments = 1,
759 .maxFragmentCombinedOutputResources = 8,
760 .maxComputeSharedMemorySize = 32768,
761 .maxComputeWorkGroupCount = { 65535, 65535, 65535 },
762 .maxComputeWorkGroupInvocations = 2048,
763 .maxComputeWorkGroupSize = {
764 2048,
765 2048,
766 2048
767 },
768 .subPixelPrecisionBits = 4 /* FIXME */,
769 .subTexelPrecisionBits = 4 /* FIXME */,
770 .mipmapPrecisionBits = 4 /* FIXME */,
771 .maxDrawIndexedIndexValue = UINT32_MAX,
772 .maxDrawIndirectCount = UINT32_MAX,
773 .maxSamplerLodBias = 16,
774 .maxSamplerAnisotropy = 16,
775 .maxViewports = MAX_VIEWPORTS,
776 .maxViewportDimensions = { (1 << 14), (1 << 14) },
777 .viewportBoundsRange = { INT16_MIN, INT16_MAX },
778 .viewportSubPixelBits = 13, /* We take a float? */
779 .minMemoryMapAlignment = 4096, /* A page */
780 .minTexelBufferOffsetAlignment = 1,
781 .minUniformBufferOffsetAlignment = 4,
782 .minStorageBufferOffsetAlignment = 4,
783 .minTexelOffset = -32,
784 .maxTexelOffset = 31,
785 .minTexelGatherOffset = -32,
786 .maxTexelGatherOffset = 31,
787 .minInterpolationOffset = -2,
788 .maxInterpolationOffset = 2,
789 .subPixelInterpolationOffsetBits = 8,
790 .maxFramebufferWidth = (1 << 14),
791 .maxFramebufferHeight = (1 << 14),
792 .maxFramebufferLayers = (1 << 10),
793 .framebufferColorSampleCounts = sample_counts,
794 .framebufferDepthSampleCounts = sample_counts,
795 .framebufferStencilSampleCounts = sample_counts,
796 .framebufferNoAttachmentsSampleCounts = sample_counts,
797 .maxColorAttachments = MAX_RTS,
798 .sampledImageColorSampleCounts = sample_counts,
799 .sampledImageIntegerSampleCounts = VK_SAMPLE_COUNT_1_BIT,
800 .sampledImageDepthSampleCounts = sample_counts,
801 .sampledImageStencilSampleCounts = sample_counts,
802 .storageImageSampleCounts = VK_SAMPLE_COUNT_1_BIT,
803 .maxSampleMaskWords = 1,
804 .timestampComputeAndGraphics = true,
805 .timestampPeriod = 1000000.0 / pdevice->rad_info.clock_crystal_freq,
806 .maxClipDistances = 8,
807 .maxCullDistances = 8,
808 .maxCombinedClipAndCullDistances = 8,
809 .discreteQueuePriorities = 1,
810 .pointSizeRange = { 0.125, 255.875 },
811 .lineWidthRange = { 0.0, 7.9921875 },
812 .pointSizeGranularity = (1.0 / 8.0),
813 .lineWidthGranularity = (1.0 / 128.0),
814 .strictLines = false, /* FINISHME */
815 .standardSampleLocations = true,
816 .optimalBufferCopyOffsetAlignment = 128,
817 .optimalBufferCopyRowPitchAlignment = 128,
818 .nonCoherentAtomSize = 64,
819 };
820
821 *pProperties = (VkPhysicalDeviceProperties) {
822 .apiVersion = radv_physical_device_api_version(pdevice),
823 .driverVersion = vk_get_driver_version(),
824 .vendorID = ATI_VENDOR_ID,
825 .deviceID = pdevice->rad_info.pci_id,
826 .deviceType = pdevice->rad_info.has_dedicated_vram ? VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU : VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU,
827 .limits = limits,
828 .sparseProperties = {0},
829 };
830
831 strcpy(pProperties->deviceName, pdevice->name);
832 memcpy(pProperties->pipelineCacheUUID, pdevice->cache_uuid, VK_UUID_SIZE);
833 }
834
835 void radv_GetPhysicalDeviceProperties2(
836 VkPhysicalDevice physicalDevice,
837 VkPhysicalDeviceProperties2KHR *pProperties)
838 {
839 RADV_FROM_HANDLE(radv_physical_device, pdevice, physicalDevice);
840 radv_GetPhysicalDeviceProperties(physicalDevice, &pProperties->properties);
841
842 vk_foreach_struct(ext, pProperties->pNext) {
843 switch (ext->sType) {
844 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR: {
845 VkPhysicalDevicePushDescriptorPropertiesKHR *properties =
846 (VkPhysicalDevicePushDescriptorPropertiesKHR *) ext;
847 properties->maxPushDescriptors = MAX_PUSH_DESCRIPTORS;
848 break;
849 }
850 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES_KHR: {
851 VkPhysicalDeviceIDPropertiesKHR *properties = (VkPhysicalDeviceIDPropertiesKHR*)ext;
852 memcpy(properties->driverUUID, pdevice->driver_uuid, VK_UUID_SIZE);
853 memcpy(properties->deviceUUID, pdevice->device_uuid, VK_UUID_SIZE);
854 properties->deviceLUIDValid = false;
855 break;
856 }
857 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES_KHR: {
858 VkPhysicalDeviceMultiviewPropertiesKHR *properties = (VkPhysicalDeviceMultiviewPropertiesKHR*)ext;
859 properties->maxMultiviewViewCount = MAX_VIEWS;
860 properties->maxMultiviewInstanceIndex = INT_MAX;
861 break;
862 }
863 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES_KHR: {
864 VkPhysicalDevicePointClippingPropertiesKHR *properties =
865 (VkPhysicalDevicePointClippingPropertiesKHR*)ext;
866 properties->pointClippingBehavior = VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES_KHR;
867 break;
868 }
869 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DISCARD_RECTANGLE_PROPERTIES_EXT: {
870 VkPhysicalDeviceDiscardRectanglePropertiesEXT *properties =
871 (VkPhysicalDeviceDiscardRectanglePropertiesEXT*)ext;
872 properties->maxDiscardRectangles = MAX_DISCARD_RECTANGLES;
873 break;
874 }
875 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_MEMORY_HOST_PROPERTIES_EXT: {
876 VkPhysicalDeviceExternalMemoryHostPropertiesEXT *properties =
877 (VkPhysicalDeviceExternalMemoryHostPropertiesEXT *) ext;
878 properties->minImportedHostPointerAlignment = 4096;
879 break;
880 }
881 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_PROPERTIES: {
882 VkPhysicalDeviceSubgroupProperties *properties =
883 (VkPhysicalDeviceSubgroupProperties*)ext;
884 properties->subgroupSize = 64;
885 properties->supportedStages = VK_SHADER_STAGE_ALL;
886 properties->supportedOperations = VK_SUBGROUP_FEATURE_BASIC_BIT;
887 properties->quadOperationsInAllStages = false;
888 break;
889 }
890 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES: {
891 VkPhysicalDeviceMaintenance3Properties *properties =
892 (VkPhysicalDeviceMaintenance3Properties*)ext;
893 /* Make sure everything is addressable by a signed 32-bit int, and
894 * our largest descriptors are 96 bytes. */
895 properties->maxPerSetDescriptors = (1ull << 31) / 96;
896 /* Our buffer size fields allow only this much */
897 properties->maxMemoryAllocationSize = 0xFFFFFFFFull;
898 break;
899 }
900 default:
901 break;
902 }
903 }
904 }
905
906 static void radv_get_physical_device_queue_family_properties(
907 struct radv_physical_device* pdevice,
908 uint32_t* pCount,
909 VkQueueFamilyProperties** pQueueFamilyProperties)
910 {
911 int num_queue_families = 1;
912 int idx;
913 if (pdevice->rad_info.num_compute_rings > 0 &&
914 !(pdevice->instance->debug_flags & RADV_DEBUG_NO_COMPUTE_QUEUE))
915 num_queue_families++;
916
917 if (pQueueFamilyProperties == NULL) {
918 *pCount = num_queue_families;
919 return;
920 }
921
922 if (!*pCount)
923 return;
924
925 idx = 0;
926 if (*pCount >= 1) {
927 *pQueueFamilyProperties[idx] = (VkQueueFamilyProperties) {
928 .queueFlags = VK_QUEUE_GRAPHICS_BIT |
929 VK_QUEUE_COMPUTE_BIT |
930 VK_QUEUE_TRANSFER_BIT |
931 VK_QUEUE_SPARSE_BINDING_BIT,
932 .queueCount = 1,
933 .timestampValidBits = 64,
934 .minImageTransferGranularity = (VkExtent3D) { 1, 1, 1 },
935 };
936 idx++;
937 }
938
939 if (pdevice->rad_info.num_compute_rings > 0 &&
940 !(pdevice->instance->debug_flags & RADV_DEBUG_NO_COMPUTE_QUEUE)) {
941 if (*pCount > idx) {
942 *pQueueFamilyProperties[idx] = (VkQueueFamilyProperties) {
943 .queueFlags = VK_QUEUE_COMPUTE_BIT |
944 VK_QUEUE_TRANSFER_BIT |
945 VK_QUEUE_SPARSE_BINDING_BIT,
946 .queueCount = pdevice->rad_info.num_compute_rings,
947 .timestampValidBits = 64,
948 .minImageTransferGranularity = (VkExtent3D) { 1, 1, 1 },
949 };
950 idx++;
951 }
952 }
953 *pCount = idx;
954 }
955
956 void radv_GetPhysicalDeviceQueueFamilyProperties(
957 VkPhysicalDevice physicalDevice,
958 uint32_t* pCount,
959 VkQueueFamilyProperties* pQueueFamilyProperties)
960 {
961 RADV_FROM_HANDLE(radv_physical_device, pdevice, physicalDevice);
962 if (!pQueueFamilyProperties) {
963 return radv_get_physical_device_queue_family_properties(pdevice, pCount, NULL);
964 return;
965 }
966 VkQueueFamilyProperties *properties[] = {
967 pQueueFamilyProperties + 0,
968 pQueueFamilyProperties + 1,
969 pQueueFamilyProperties + 2,
970 };
971 radv_get_physical_device_queue_family_properties(pdevice, pCount, properties);
972 assert(*pCount <= 3);
973 }
974
975 void radv_GetPhysicalDeviceQueueFamilyProperties2(
976 VkPhysicalDevice physicalDevice,
977 uint32_t* pCount,
978 VkQueueFamilyProperties2KHR *pQueueFamilyProperties)
979 {
980 RADV_FROM_HANDLE(radv_physical_device, pdevice, physicalDevice);
981 if (!pQueueFamilyProperties) {
982 return radv_get_physical_device_queue_family_properties(pdevice, pCount, NULL);
983 return;
984 }
985 VkQueueFamilyProperties *properties[] = {
986 &pQueueFamilyProperties[0].queueFamilyProperties,
987 &pQueueFamilyProperties[1].queueFamilyProperties,
988 &pQueueFamilyProperties[2].queueFamilyProperties,
989 };
990 radv_get_physical_device_queue_family_properties(pdevice, pCount, properties);
991 assert(*pCount <= 3);
992 }
993
994 void radv_GetPhysicalDeviceMemoryProperties(
995 VkPhysicalDevice physicalDevice,
996 VkPhysicalDeviceMemoryProperties *pMemoryProperties)
997 {
998 RADV_FROM_HANDLE(radv_physical_device, physical_device, physicalDevice);
999
1000 *pMemoryProperties = physical_device->memory_properties;
1001 }
1002
1003 void radv_GetPhysicalDeviceMemoryProperties2(
1004 VkPhysicalDevice physicalDevice,
1005 VkPhysicalDeviceMemoryProperties2KHR *pMemoryProperties)
1006 {
1007 return radv_GetPhysicalDeviceMemoryProperties(physicalDevice,
1008 &pMemoryProperties->memoryProperties);
1009 }
1010
1011 VkResult radv_GetMemoryHostPointerPropertiesEXT(
1012 VkDevice _device,
1013 VkExternalMemoryHandleTypeFlagBitsKHR handleType,
1014 const void *pHostPointer,
1015 VkMemoryHostPointerPropertiesEXT *pMemoryHostPointerProperties)
1016 {
1017 RADV_FROM_HANDLE(radv_device, device, _device);
1018
1019 switch (handleType)
1020 {
1021 case VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT: {
1022 const struct radv_physical_device *physical_device = device->physical_device;
1023 uint32_t memoryTypeBits = 0;
1024 for (int i = 0; i < physical_device->memory_properties.memoryTypeCount; i++) {
1025 if (physical_device->mem_type_indices[i] == RADV_MEM_TYPE_GTT_CACHED) {
1026 memoryTypeBits = (1 << i);
1027 break;
1028 }
1029 }
1030 pMemoryHostPointerProperties->memoryTypeBits = memoryTypeBits;
1031 return VK_SUCCESS;
1032 }
1033 default:
1034 return VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR;
1035 }
1036 }
1037
1038 static enum radeon_ctx_priority
1039 radv_get_queue_global_priority(const VkDeviceQueueGlobalPriorityCreateInfoEXT *pObj)
1040 {
1041 /* Default to MEDIUM when a specific global priority isn't requested */
1042 if (!pObj)
1043 return RADEON_CTX_PRIORITY_MEDIUM;
1044
1045 switch(pObj->globalPriority) {
1046 case VK_QUEUE_GLOBAL_PRIORITY_REALTIME_EXT:
1047 return RADEON_CTX_PRIORITY_REALTIME;
1048 case VK_QUEUE_GLOBAL_PRIORITY_HIGH_EXT:
1049 return RADEON_CTX_PRIORITY_HIGH;
1050 case VK_QUEUE_GLOBAL_PRIORITY_MEDIUM_EXT:
1051 return RADEON_CTX_PRIORITY_MEDIUM;
1052 case VK_QUEUE_GLOBAL_PRIORITY_LOW_EXT:
1053 return RADEON_CTX_PRIORITY_LOW;
1054 default:
1055 unreachable("Illegal global priority value");
1056 return RADEON_CTX_PRIORITY_INVALID;
1057 }
1058 }
1059
1060 static int
1061 radv_queue_init(struct radv_device *device, struct radv_queue *queue,
1062 uint32_t queue_family_index, int idx,
1063 const VkDeviceQueueGlobalPriorityCreateInfoEXT *global_priority)
1064 {
1065 queue->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
1066 queue->device = device;
1067 queue->queue_family_index = queue_family_index;
1068 queue->queue_idx = idx;
1069 queue->priority = radv_get_queue_global_priority(global_priority);
1070
1071 queue->hw_ctx = device->ws->ctx_create(device->ws, queue->priority);
1072 if (!queue->hw_ctx)
1073 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1074
1075 return VK_SUCCESS;
1076 }
1077
1078 static void
1079 radv_queue_finish(struct radv_queue *queue)
1080 {
1081 if (queue->hw_ctx)
1082 queue->device->ws->ctx_destroy(queue->hw_ctx);
1083
1084 if (queue->initial_full_flush_preamble_cs)
1085 queue->device->ws->cs_destroy(queue->initial_full_flush_preamble_cs);
1086 if (queue->initial_preamble_cs)
1087 queue->device->ws->cs_destroy(queue->initial_preamble_cs);
1088 if (queue->continue_preamble_cs)
1089 queue->device->ws->cs_destroy(queue->continue_preamble_cs);
1090 if (queue->descriptor_bo)
1091 queue->device->ws->buffer_destroy(queue->descriptor_bo);
1092 if (queue->scratch_bo)
1093 queue->device->ws->buffer_destroy(queue->scratch_bo);
1094 if (queue->esgs_ring_bo)
1095 queue->device->ws->buffer_destroy(queue->esgs_ring_bo);
1096 if (queue->gsvs_ring_bo)
1097 queue->device->ws->buffer_destroy(queue->gsvs_ring_bo);
1098 if (queue->tess_rings_bo)
1099 queue->device->ws->buffer_destroy(queue->tess_rings_bo);
1100 if (queue->compute_scratch_bo)
1101 queue->device->ws->buffer_destroy(queue->compute_scratch_bo);
1102 }
1103
1104 static void
1105 radv_device_init_gs_info(struct radv_device *device)
1106 {
1107 switch (device->physical_device->rad_info.family) {
1108 case CHIP_OLAND:
1109 case CHIP_HAINAN:
1110 case CHIP_KAVERI:
1111 case CHIP_KABINI:
1112 case CHIP_MULLINS:
1113 case CHIP_ICELAND:
1114 case CHIP_CARRIZO:
1115 case CHIP_STONEY:
1116 device->gs_table_depth = 16;
1117 return;
1118 case CHIP_TAHITI:
1119 case CHIP_PITCAIRN:
1120 case CHIP_VERDE:
1121 case CHIP_BONAIRE:
1122 case CHIP_HAWAII:
1123 case CHIP_TONGA:
1124 case CHIP_FIJI:
1125 case CHIP_POLARIS10:
1126 case CHIP_POLARIS11:
1127 case CHIP_POLARIS12:
1128 case CHIP_VEGA10:
1129 case CHIP_RAVEN:
1130 device->gs_table_depth = 32;
1131 return;
1132 default:
1133 unreachable("unknown GPU");
1134 }
1135 }
1136
1137 static int radv_get_device_extension_index(const char *name)
1138 {
1139 for (unsigned i = 0; i < RADV_DEVICE_EXTENSION_COUNT; ++i) {
1140 if (strcmp(name, radv_device_extensions[i].extensionName) == 0)
1141 return i;
1142 }
1143 return -1;
1144 }
1145
1146 VkResult radv_CreateDevice(
1147 VkPhysicalDevice physicalDevice,
1148 const VkDeviceCreateInfo* pCreateInfo,
1149 const VkAllocationCallbacks* pAllocator,
1150 VkDevice* pDevice)
1151 {
1152 RADV_FROM_HANDLE(radv_physical_device, physical_device, physicalDevice);
1153 VkResult result;
1154 struct radv_device *device;
1155
1156 bool keep_shader_info = false;
1157
1158 /* Check enabled features */
1159 if (pCreateInfo->pEnabledFeatures) {
1160 VkPhysicalDeviceFeatures supported_features;
1161 radv_GetPhysicalDeviceFeatures(physicalDevice, &supported_features);
1162 VkBool32 *supported_feature = (VkBool32 *)&supported_features;
1163 VkBool32 *enabled_feature = (VkBool32 *)pCreateInfo->pEnabledFeatures;
1164 unsigned num_features = sizeof(VkPhysicalDeviceFeatures) / sizeof(VkBool32);
1165 for (uint32_t i = 0; i < num_features; i++) {
1166 if (enabled_feature[i] && !supported_feature[i])
1167 return vk_error(VK_ERROR_FEATURE_NOT_PRESENT);
1168 }
1169 }
1170
1171 device = vk_zalloc2(&physical_device->instance->alloc, pAllocator,
1172 sizeof(*device), 8,
1173 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
1174 if (!device)
1175 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1176
1177 device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
1178 device->instance = physical_device->instance;
1179 device->physical_device = physical_device;
1180
1181 device->ws = physical_device->ws;
1182 if (pAllocator)
1183 device->alloc = *pAllocator;
1184 else
1185 device->alloc = physical_device->instance->alloc;
1186
1187 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
1188 const char *ext_name = pCreateInfo->ppEnabledExtensionNames[i];
1189 int index = radv_get_device_extension_index(ext_name);
1190 if (index < 0 || !physical_device->supported_extensions.extensions[index]) {
1191 vk_free(&device->alloc, device);
1192 return vk_error(VK_ERROR_EXTENSION_NOT_PRESENT);
1193 }
1194
1195 device->enabled_extensions.extensions[index] = true;
1196 }
1197
1198 keep_shader_info = device->enabled_extensions.AMD_shader_info;
1199
1200 mtx_init(&device->shader_slab_mutex, mtx_plain);
1201 list_inithead(&device->shader_slabs);
1202
1203 for (unsigned i = 0; i < pCreateInfo->queueCreateInfoCount; i++) {
1204 const VkDeviceQueueCreateInfo *queue_create = &pCreateInfo->pQueueCreateInfos[i];
1205 uint32_t qfi = queue_create->queueFamilyIndex;
1206 const VkDeviceQueueGlobalPriorityCreateInfoEXT *global_priority =
1207 vk_find_struct_const(queue_create->pNext, DEVICE_QUEUE_GLOBAL_PRIORITY_CREATE_INFO_EXT);
1208
1209 assert(!global_priority || device->physical_device->rad_info.has_ctx_priority);
1210
1211 device->queues[qfi] = vk_alloc(&device->alloc,
1212 queue_create->queueCount * sizeof(struct radv_queue), 8, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
1213 if (!device->queues[qfi]) {
1214 result = VK_ERROR_OUT_OF_HOST_MEMORY;
1215 goto fail;
1216 }
1217
1218 memset(device->queues[qfi], 0, queue_create->queueCount * sizeof(struct radv_queue));
1219
1220 device->queue_count[qfi] = queue_create->queueCount;
1221
1222 for (unsigned q = 0; q < queue_create->queueCount; q++) {
1223 result = radv_queue_init(device, &device->queues[qfi][q], qfi, q, global_priority);
1224 if (result != VK_SUCCESS)
1225 goto fail;
1226 }
1227 }
1228
1229 device->pbb_allowed = device->physical_device->rad_info.chip_class >= GFX9 &&
1230 (device->instance->perftest_flags & RADV_PERFTEST_BINNING);
1231
1232 /* Disabled and not implemented for now. */
1233 device->dfsm_allowed = device->pbb_allowed && false;
1234
1235 #ifdef ANDROID
1236 device->always_use_syncobj = device->physical_device->rad_info.has_syncobj_wait_for_submit;
1237 #endif
1238
1239 device->llvm_supports_spill = true;
1240
1241 /* The maximum number of scratch waves. Scratch space isn't divided
1242 * evenly between CUs. The number is only a function of the number of CUs.
1243 * We can decrease the constant to decrease the scratch buffer size.
1244 *
1245 * sctx->scratch_waves must be >= the maximum posible size of
1246 * 1 threadgroup, so that the hw doesn't hang from being unable
1247 * to start any.
1248 *
1249 * The recommended value is 4 per CU at most. Higher numbers don't
1250 * bring much benefit, but they still occupy chip resources (think
1251 * async compute). I've seen ~2% performance difference between 4 and 32.
1252 */
1253 uint32_t max_threads_per_block = 2048;
1254 device->scratch_waves = MAX2(32 * physical_device->rad_info.num_good_compute_units,
1255 max_threads_per_block / 64);
1256
1257 device->dispatch_initiator = S_00B800_COMPUTE_SHADER_EN(1) |
1258 S_00B800_FORCE_START_AT_000(1);
1259
1260 if (device->physical_device->rad_info.chip_class >= CIK) {
1261 /* If the KMD allows it (there is a KMD hw register for it),
1262 * allow launching waves out-of-order.
1263 */
1264 device->dispatch_initiator |= S_00B800_ORDER_MODE(1);
1265 }
1266
1267 radv_device_init_gs_info(device);
1268
1269 device->tess_offchip_block_dw_size =
1270 device->physical_device->rad_info.family == CHIP_HAWAII ? 4096 : 8192;
1271 device->has_distributed_tess =
1272 device->physical_device->rad_info.chip_class >= VI &&
1273 device->physical_device->rad_info.max_se >= 2;
1274
1275 if (getenv("RADV_TRACE_FILE")) {
1276 keep_shader_info = true;
1277
1278 if (!radv_init_trace(device))
1279 goto fail;
1280 }
1281
1282 device->keep_shader_info = keep_shader_info;
1283
1284 result = radv_device_init_meta(device);
1285 if (result != VK_SUCCESS)
1286 goto fail;
1287
1288 radv_device_init_msaa(device);
1289
1290 for (int family = 0; family < RADV_MAX_QUEUE_FAMILIES; ++family) {
1291 device->empty_cs[family] = device->ws->cs_create(device->ws, family);
1292 switch (family) {
1293 case RADV_QUEUE_GENERAL:
1294 radeon_emit(device->empty_cs[family], PKT3(PKT3_CONTEXT_CONTROL, 1, 0));
1295 radeon_emit(device->empty_cs[family], CONTEXT_CONTROL_LOAD_ENABLE(1));
1296 radeon_emit(device->empty_cs[family], CONTEXT_CONTROL_SHADOW_ENABLE(1));
1297 break;
1298 case RADV_QUEUE_COMPUTE:
1299 radeon_emit(device->empty_cs[family], PKT3(PKT3_NOP, 0, 0));
1300 radeon_emit(device->empty_cs[family], 0);
1301 break;
1302 }
1303 device->ws->cs_finalize(device->empty_cs[family]);
1304 }
1305
1306 if (device->physical_device->rad_info.chip_class >= CIK)
1307 cik_create_gfx_config(device);
1308
1309 VkPipelineCacheCreateInfo ci;
1310 ci.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
1311 ci.pNext = NULL;
1312 ci.flags = 0;
1313 ci.pInitialData = NULL;
1314 ci.initialDataSize = 0;
1315 VkPipelineCache pc;
1316 result = radv_CreatePipelineCache(radv_device_to_handle(device),
1317 &ci, NULL, &pc);
1318 if (result != VK_SUCCESS)
1319 goto fail_meta;
1320
1321 device->mem_cache = radv_pipeline_cache_from_handle(pc);
1322
1323 *pDevice = radv_device_to_handle(device);
1324 return VK_SUCCESS;
1325
1326 fail_meta:
1327 radv_device_finish_meta(device);
1328 fail:
1329 if (device->trace_bo)
1330 device->ws->buffer_destroy(device->trace_bo);
1331
1332 if (device->gfx_init)
1333 device->ws->buffer_destroy(device->gfx_init);
1334
1335 for (unsigned i = 0; i < RADV_MAX_QUEUE_FAMILIES; i++) {
1336 for (unsigned q = 0; q < device->queue_count[i]; q++)
1337 radv_queue_finish(&device->queues[i][q]);
1338 if (device->queue_count[i])
1339 vk_free(&device->alloc, device->queues[i]);
1340 }
1341
1342 vk_free(&device->alloc, device);
1343 return result;
1344 }
1345
1346 void radv_DestroyDevice(
1347 VkDevice _device,
1348 const VkAllocationCallbacks* pAllocator)
1349 {
1350 RADV_FROM_HANDLE(radv_device, device, _device);
1351
1352 if (!device)
1353 return;
1354
1355 if (device->trace_bo)
1356 device->ws->buffer_destroy(device->trace_bo);
1357
1358 if (device->gfx_init)
1359 device->ws->buffer_destroy(device->gfx_init);
1360
1361 for (unsigned i = 0; i < RADV_MAX_QUEUE_FAMILIES; i++) {
1362 for (unsigned q = 0; q < device->queue_count[i]; q++)
1363 radv_queue_finish(&device->queues[i][q]);
1364 if (device->queue_count[i])
1365 vk_free(&device->alloc, device->queues[i]);
1366 if (device->empty_cs[i])
1367 device->ws->cs_destroy(device->empty_cs[i]);
1368 }
1369 radv_device_finish_meta(device);
1370
1371 VkPipelineCache pc = radv_pipeline_cache_to_handle(device->mem_cache);
1372 radv_DestroyPipelineCache(radv_device_to_handle(device), pc, NULL);
1373
1374 radv_destroy_shader_slabs(device);
1375
1376 vk_free(&device->alloc, device);
1377 }
1378
1379 VkResult radv_EnumerateInstanceLayerProperties(
1380 uint32_t* pPropertyCount,
1381 VkLayerProperties* pProperties)
1382 {
1383 if (pProperties == NULL) {
1384 *pPropertyCount = 0;
1385 return VK_SUCCESS;
1386 }
1387
1388 /* None supported at this time */
1389 return vk_error(VK_ERROR_LAYER_NOT_PRESENT);
1390 }
1391
1392 VkResult radv_EnumerateDeviceLayerProperties(
1393 VkPhysicalDevice physicalDevice,
1394 uint32_t* pPropertyCount,
1395 VkLayerProperties* pProperties)
1396 {
1397 if (pProperties == NULL) {
1398 *pPropertyCount = 0;
1399 return VK_SUCCESS;
1400 }
1401
1402 /* None supported at this time */
1403 return vk_error(VK_ERROR_LAYER_NOT_PRESENT);
1404 }
1405
1406 void radv_GetDeviceQueue2(
1407 VkDevice _device,
1408 const VkDeviceQueueInfo2* pQueueInfo,
1409 VkQueue* pQueue)
1410 {
1411 RADV_FROM_HANDLE(radv_device, device, _device);
1412
1413 *pQueue = radv_queue_to_handle(&device->queues[pQueueInfo->queueFamilyIndex][pQueueInfo->queueIndex]);
1414 }
1415
1416 void radv_GetDeviceQueue(
1417 VkDevice _device,
1418 uint32_t queueFamilyIndex,
1419 uint32_t queueIndex,
1420 VkQueue* pQueue)
1421 {
1422 const VkDeviceQueueInfo2 info = (VkDeviceQueueInfo2) {
1423 .sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2,
1424 .queueFamilyIndex = queueFamilyIndex,
1425 .queueIndex = queueIndex
1426 };
1427
1428 radv_GetDeviceQueue2(_device, &info, pQueue);
1429 }
1430
1431 static void
1432 fill_geom_tess_rings(struct radv_queue *queue,
1433 uint32_t *map,
1434 bool add_sample_positions,
1435 uint32_t esgs_ring_size,
1436 struct radeon_winsys_bo *esgs_ring_bo,
1437 uint32_t gsvs_ring_size,
1438 struct radeon_winsys_bo *gsvs_ring_bo,
1439 uint32_t tess_factor_ring_size,
1440 uint32_t tess_offchip_ring_offset,
1441 uint32_t tess_offchip_ring_size,
1442 struct radeon_winsys_bo *tess_rings_bo)
1443 {
1444 uint64_t esgs_va = 0, gsvs_va = 0;
1445 uint64_t tess_va = 0, tess_offchip_va = 0;
1446 uint32_t *desc = &map[4];
1447
1448 if (esgs_ring_bo)
1449 esgs_va = radv_buffer_get_va(esgs_ring_bo);
1450 if (gsvs_ring_bo)
1451 gsvs_va = radv_buffer_get_va(gsvs_ring_bo);
1452 if (tess_rings_bo) {
1453 tess_va = radv_buffer_get_va(tess_rings_bo);
1454 tess_offchip_va = tess_va + tess_offchip_ring_offset;
1455 }
1456
1457 /* stride 0, num records - size, add tid, swizzle, elsize4,
1458 index stride 64 */
1459 desc[0] = esgs_va;
1460 desc[1] = S_008F04_BASE_ADDRESS_HI(esgs_va >> 32) |
1461 S_008F04_STRIDE(0) |
1462 S_008F04_SWIZZLE_ENABLE(true);
1463 desc[2] = esgs_ring_size;
1464 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
1465 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
1466 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
1467 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
1468 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
1469 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32) |
1470 S_008F0C_ELEMENT_SIZE(1) |
1471 S_008F0C_INDEX_STRIDE(3) |
1472 S_008F0C_ADD_TID_ENABLE(true);
1473
1474 desc += 4;
1475 /* GS entry for ES->GS ring */
1476 /* stride 0, num records - size, elsize0,
1477 index stride 0 */
1478 desc[0] = esgs_va;
1479 desc[1] = S_008F04_BASE_ADDRESS_HI(esgs_va >> 32)|
1480 S_008F04_STRIDE(0) |
1481 S_008F04_SWIZZLE_ENABLE(false);
1482 desc[2] = esgs_ring_size;
1483 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
1484 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
1485 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
1486 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
1487 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
1488 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32) |
1489 S_008F0C_ELEMENT_SIZE(0) |
1490 S_008F0C_INDEX_STRIDE(0) |
1491 S_008F0C_ADD_TID_ENABLE(false);
1492
1493 desc += 4;
1494 /* VS entry for GS->VS ring */
1495 /* stride 0, num records - size, elsize0,
1496 index stride 0 */
1497 desc[0] = gsvs_va;
1498 desc[1] = S_008F04_BASE_ADDRESS_HI(gsvs_va >> 32)|
1499 S_008F04_STRIDE(0) |
1500 S_008F04_SWIZZLE_ENABLE(false);
1501 desc[2] = gsvs_ring_size;
1502 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
1503 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
1504 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
1505 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
1506 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
1507 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32) |
1508 S_008F0C_ELEMENT_SIZE(0) |
1509 S_008F0C_INDEX_STRIDE(0) |
1510 S_008F0C_ADD_TID_ENABLE(false);
1511 desc += 4;
1512
1513 /* stride gsvs_itemsize, num records 64
1514 elsize 4, index stride 16 */
1515 /* shader will patch stride and desc[2] */
1516 desc[0] = gsvs_va;
1517 desc[1] = S_008F04_BASE_ADDRESS_HI(gsvs_va >> 32)|
1518 S_008F04_STRIDE(0) |
1519 S_008F04_SWIZZLE_ENABLE(true);
1520 desc[2] = 0;
1521 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
1522 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
1523 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
1524 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
1525 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
1526 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32) |
1527 S_008F0C_ELEMENT_SIZE(1) |
1528 S_008F0C_INDEX_STRIDE(1) |
1529 S_008F0C_ADD_TID_ENABLE(true);
1530 desc += 4;
1531
1532 desc[0] = tess_va;
1533 desc[1] = S_008F04_BASE_ADDRESS_HI(tess_va >> 32) |
1534 S_008F04_STRIDE(0) |
1535 S_008F04_SWIZZLE_ENABLE(false);
1536 desc[2] = tess_factor_ring_size;
1537 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
1538 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
1539 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
1540 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
1541 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
1542 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32) |
1543 S_008F0C_ELEMENT_SIZE(0) |
1544 S_008F0C_INDEX_STRIDE(0) |
1545 S_008F0C_ADD_TID_ENABLE(false);
1546 desc += 4;
1547
1548 desc[0] = tess_offchip_va;
1549 desc[1] = S_008F04_BASE_ADDRESS_HI(tess_offchip_va >> 32) |
1550 S_008F04_STRIDE(0) |
1551 S_008F04_SWIZZLE_ENABLE(false);
1552 desc[2] = tess_offchip_ring_size;
1553 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
1554 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
1555 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
1556 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
1557 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
1558 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32) |
1559 S_008F0C_ELEMENT_SIZE(0) |
1560 S_008F0C_INDEX_STRIDE(0) |
1561 S_008F0C_ADD_TID_ENABLE(false);
1562 desc += 4;
1563
1564 /* add sample positions after all rings */
1565 memcpy(desc, queue->device->sample_locations_1x, 8);
1566 desc += 2;
1567 memcpy(desc, queue->device->sample_locations_2x, 16);
1568 desc += 4;
1569 memcpy(desc, queue->device->sample_locations_4x, 32);
1570 desc += 8;
1571 memcpy(desc, queue->device->sample_locations_8x, 64);
1572 desc += 16;
1573 memcpy(desc, queue->device->sample_locations_16x, 128);
1574 }
1575
1576 static unsigned
1577 radv_get_hs_offchip_param(struct radv_device *device, uint32_t *max_offchip_buffers_p)
1578 {
1579 bool double_offchip_buffers = device->physical_device->rad_info.chip_class >= CIK &&
1580 device->physical_device->rad_info.family != CHIP_CARRIZO &&
1581 device->physical_device->rad_info.family != CHIP_STONEY;
1582 unsigned max_offchip_buffers_per_se = double_offchip_buffers ? 128 : 64;
1583 unsigned max_offchip_buffers = max_offchip_buffers_per_se *
1584 device->physical_device->rad_info.max_se;
1585 unsigned offchip_granularity;
1586 unsigned hs_offchip_param;
1587 switch (device->tess_offchip_block_dw_size) {
1588 default:
1589 assert(0);
1590 /* fall through */
1591 case 8192:
1592 offchip_granularity = V_03093C_X_8K_DWORDS;
1593 break;
1594 case 4096:
1595 offchip_granularity = V_03093C_X_4K_DWORDS;
1596 break;
1597 }
1598
1599 switch (device->physical_device->rad_info.chip_class) {
1600 case SI:
1601 max_offchip_buffers = MIN2(max_offchip_buffers, 126);
1602 break;
1603 case CIK:
1604 case VI:
1605 case GFX9:
1606 default:
1607 max_offchip_buffers = MIN2(max_offchip_buffers, 508);
1608 break;
1609 }
1610
1611 *max_offchip_buffers_p = max_offchip_buffers;
1612 if (device->physical_device->rad_info.chip_class >= CIK) {
1613 if (device->physical_device->rad_info.chip_class >= VI)
1614 --max_offchip_buffers;
1615 hs_offchip_param =
1616 S_03093C_OFFCHIP_BUFFERING(max_offchip_buffers) |
1617 S_03093C_OFFCHIP_GRANULARITY(offchip_granularity);
1618 } else {
1619 hs_offchip_param =
1620 S_0089B0_OFFCHIP_BUFFERING(max_offchip_buffers);
1621 }
1622 return hs_offchip_param;
1623 }
1624
1625 static VkResult
1626 radv_get_preamble_cs(struct radv_queue *queue,
1627 uint32_t scratch_size,
1628 uint32_t compute_scratch_size,
1629 uint32_t esgs_ring_size,
1630 uint32_t gsvs_ring_size,
1631 bool needs_tess_rings,
1632 bool needs_sample_positions,
1633 struct radeon_winsys_cs **initial_full_flush_preamble_cs,
1634 struct radeon_winsys_cs **initial_preamble_cs,
1635 struct radeon_winsys_cs **continue_preamble_cs)
1636 {
1637 struct radeon_winsys_bo *scratch_bo = NULL;
1638 struct radeon_winsys_bo *descriptor_bo = NULL;
1639 struct radeon_winsys_bo *compute_scratch_bo = NULL;
1640 struct radeon_winsys_bo *esgs_ring_bo = NULL;
1641 struct radeon_winsys_bo *gsvs_ring_bo = NULL;
1642 struct radeon_winsys_bo *tess_rings_bo = NULL;
1643 struct radeon_winsys_cs *dest_cs[3] = {0};
1644 bool add_tess_rings = false, add_sample_positions = false;
1645 unsigned tess_factor_ring_size = 0, tess_offchip_ring_size = 0;
1646 unsigned max_offchip_buffers;
1647 unsigned hs_offchip_param = 0;
1648 unsigned tess_offchip_ring_offset;
1649 uint32_t ring_bo_flags = RADEON_FLAG_NO_CPU_ACCESS | RADEON_FLAG_NO_INTERPROCESS_SHARING;
1650 if (!queue->has_tess_rings) {
1651 if (needs_tess_rings)
1652 add_tess_rings = true;
1653 }
1654 if (!queue->has_sample_positions) {
1655 if (needs_sample_positions)
1656 add_sample_positions = true;
1657 }
1658 tess_factor_ring_size = 32768 * queue->device->physical_device->rad_info.max_se;
1659 hs_offchip_param = radv_get_hs_offchip_param(queue->device,
1660 &max_offchip_buffers);
1661 tess_offchip_ring_offset = align(tess_factor_ring_size, 64 * 1024);
1662 tess_offchip_ring_size = max_offchip_buffers *
1663 queue->device->tess_offchip_block_dw_size * 4;
1664
1665 if (scratch_size <= queue->scratch_size &&
1666 compute_scratch_size <= queue->compute_scratch_size &&
1667 esgs_ring_size <= queue->esgs_ring_size &&
1668 gsvs_ring_size <= queue->gsvs_ring_size &&
1669 !add_tess_rings && !add_sample_positions &&
1670 queue->initial_preamble_cs) {
1671 *initial_full_flush_preamble_cs = queue->initial_full_flush_preamble_cs;
1672 *initial_preamble_cs = queue->initial_preamble_cs;
1673 *continue_preamble_cs = queue->continue_preamble_cs;
1674 if (!scratch_size && !compute_scratch_size && !esgs_ring_size && !gsvs_ring_size)
1675 *continue_preamble_cs = NULL;
1676 return VK_SUCCESS;
1677 }
1678
1679 if (scratch_size > queue->scratch_size) {
1680 scratch_bo = queue->device->ws->buffer_create(queue->device->ws,
1681 scratch_size,
1682 4096,
1683 RADEON_DOMAIN_VRAM,
1684 ring_bo_flags);
1685 if (!scratch_bo)
1686 goto fail;
1687 } else
1688 scratch_bo = queue->scratch_bo;
1689
1690 if (compute_scratch_size > queue->compute_scratch_size) {
1691 compute_scratch_bo = queue->device->ws->buffer_create(queue->device->ws,
1692 compute_scratch_size,
1693 4096,
1694 RADEON_DOMAIN_VRAM,
1695 ring_bo_flags);
1696 if (!compute_scratch_bo)
1697 goto fail;
1698
1699 } else
1700 compute_scratch_bo = queue->compute_scratch_bo;
1701
1702 if (esgs_ring_size > queue->esgs_ring_size) {
1703 esgs_ring_bo = queue->device->ws->buffer_create(queue->device->ws,
1704 esgs_ring_size,
1705 4096,
1706 RADEON_DOMAIN_VRAM,
1707 ring_bo_flags);
1708 if (!esgs_ring_bo)
1709 goto fail;
1710 } else {
1711 esgs_ring_bo = queue->esgs_ring_bo;
1712 esgs_ring_size = queue->esgs_ring_size;
1713 }
1714
1715 if (gsvs_ring_size > queue->gsvs_ring_size) {
1716 gsvs_ring_bo = queue->device->ws->buffer_create(queue->device->ws,
1717 gsvs_ring_size,
1718 4096,
1719 RADEON_DOMAIN_VRAM,
1720 ring_bo_flags);
1721 if (!gsvs_ring_bo)
1722 goto fail;
1723 } else {
1724 gsvs_ring_bo = queue->gsvs_ring_bo;
1725 gsvs_ring_size = queue->gsvs_ring_size;
1726 }
1727
1728 if (add_tess_rings) {
1729 tess_rings_bo = queue->device->ws->buffer_create(queue->device->ws,
1730 tess_offchip_ring_offset + tess_offchip_ring_size,
1731 256,
1732 RADEON_DOMAIN_VRAM,
1733 ring_bo_flags);
1734 if (!tess_rings_bo)
1735 goto fail;
1736 } else {
1737 tess_rings_bo = queue->tess_rings_bo;
1738 }
1739
1740 if (scratch_bo != queue->scratch_bo ||
1741 esgs_ring_bo != queue->esgs_ring_bo ||
1742 gsvs_ring_bo != queue->gsvs_ring_bo ||
1743 tess_rings_bo != queue->tess_rings_bo ||
1744 add_sample_positions) {
1745 uint32_t size = 0;
1746 if (gsvs_ring_bo || esgs_ring_bo ||
1747 tess_rings_bo || add_sample_positions) {
1748 size = 112; /* 2 dword + 2 padding + 4 dword * 6 */
1749 if (add_sample_positions)
1750 size += 256; /* 32+16+8+4+2+1 samples * 4 * 2 = 248 bytes. */
1751 }
1752 else if (scratch_bo)
1753 size = 8; /* 2 dword */
1754
1755 descriptor_bo = queue->device->ws->buffer_create(queue->device->ws,
1756 size,
1757 4096,
1758 RADEON_DOMAIN_VRAM,
1759 RADEON_FLAG_CPU_ACCESS |
1760 RADEON_FLAG_NO_INTERPROCESS_SHARING |
1761 RADEON_FLAG_READ_ONLY);
1762 if (!descriptor_bo)
1763 goto fail;
1764 } else
1765 descriptor_bo = queue->descriptor_bo;
1766
1767 for(int i = 0; i < 3; ++i) {
1768 struct radeon_winsys_cs *cs = NULL;
1769 cs = queue->device->ws->cs_create(queue->device->ws,
1770 queue->queue_family_index ? RING_COMPUTE : RING_GFX);
1771 if (!cs)
1772 goto fail;
1773
1774 dest_cs[i] = cs;
1775
1776 if (scratch_bo)
1777 radv_cs_add_buffer(queue->device->ws, cs, scratch_bo, 8);
1778
1779 if (esgs_ring_bo)
1780 radv_cs_add_buffer(queue->device->ws, cs, esgs_ring_bo, 8);
1781
1782 if (gsvs_ring_bo)
1783 radv_cs_add_buffer(queue->device->ws, cs, gsvs_ring_bo, 8);
1784
1785 if (tess_rings_bo)
1786 radv_cs_add_buffer(queue->device->ws, cs, tess_rings_bo, 8);
1787
1788 if (descriptor_bo)
1789 radv_cs_add_buffer(queue->device->ws, cs, descriptor_bo, 8);
1790
1791 if (descriptor_bo != queue->descriptor_bo) {
1792 uint32_t *map = (uint32_t*)queue->device->ws->buffer_map(descriptor_bo);
1793
1794 if (scratch_bo) {
1795 uint64_t scratch_va = radv_buffer_get_va(scratch_bo);
1796 uint32_t rsrc1 = S_008F04_BASE_ADDRESS_HI(scratch_va >> 32) |
1797 S_008F04_SWIZZLE_ENABLE(1);
1798 map[0] = scratch_va;
1799 map[1] = rsrc1;
1800 }
1801
1802 if (esgs_ring_bo || gsvs_ring_bo || tess_rings_bo ||
1803 add_sample_positions)
1804 fill_geom_tess_rings(queue, map, add_sample_positions,
1805 esgs_ring_size, esgs_ring_bo,
1806 gsvs_ring_size, gsvs_ring_bo,
1807 tess_factor_ring_size,
1808 tess_offchip_ring_offset,
1809 tess_offchip_ring_size,
1810 tess_rings_bo);
1811
1812 queue->device->ws->buffer_unmap(descriptor_bo);
1813 }
1814
1815 if (esgs_ring_bo || gsvs_ring_bo || tess_rings_bo) {
1816 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1817 radeon_emit(cs, EVENT_TYPE(V_028A90_VS_PARTIAL_FLUSH) | EVENT_INDEX(4));
1818 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1819 radeon_emit(cs, EVENT_TYPE(V_028A90_VGT_FLUSH) | EVENT_INDEX(0));
1820 }
1821
1822 if (esgs_ring_bo || gsvs_ring_bo) {
1823 if (queue->device->physical_device->rad_info.chip_class >= CIK) {
1824 radeon_set_uconfig_reg_seq(cs, R_030900_VGT_ESGS_RING_SIZE, 2);
1825 radeon_emit(cs, esgs_ring_size >> 8);
1826 radeon_emit(cs, gsvs_ring_size >> 8);
1827 } else {
1828 radeon_set_config_reg_seq(cs, R_0088C8_VGT_ESGS_RING_SIZE, 2);
1829 radeon_emit(cs, esgs_ring_size >> 8);
1830 radeon_emit(cs, gsvs_ring_size >> 8);
1831 }
1832 }
1833
1834 if (tess_rings_bo) {
1835 uint64_t tf_va = radv_buffer_get_va(tess_rings_bo);
1836 if (queue->device->physical_device->rad_info.chip_class >= CIK) {
1837 radeon_set_uconfig_reg(cs, R_030938_VGT_TF_RING_SIZE,
1838 S_030938_SIZE(tess_factor_ring_size / 4));
1839 radeon_set_uconfig_reg(cs, R_030940_VGT_TF_MEMORY_BASE,
1840 tf_va >> 8);
1841 if (queue->device->physical_device->rad_info.chip_class >= GFX9) {
1842 radeon_set_uconfig_reg(cs, R_030944_VGT_TF_MEMORY_BASE_HI,
1843 tf_va >> 40);
1844 }
1845 radeon_set_uconfig_reg(cs, R_03093C_VGT_HS_OFFCHIP_PARAM, hs_offchip_param);
1846 } else {
1847 radeon_set_config_reg(cs, R_008988_VGT_TF_RING_SIZE,
1848 S_008988_SIZE(tess_factor_ring_size / 4));
1849 radeon_set_config_reg(cs, R_0089B8_VGT_TF_MEMORY_BASE,
1850 tf_va >> 8);
1851 radeon_set_config_reg(cs, R_0089B0_VGT_HS_OFFCHIP_PARAM,
1852 hs_offchip_param);
1853 }
1854 }
1855
1856 if (descriptor_bo) {
1857 uint64_t va = radv_buffer_get_va(descriptor_bo);
1858 if (queue->device->physical_device->rad_info.chip_class >= GFX9) {
1859 uint32_t regs[] = {R_00B030_SPI_SHADER_USER_DATA_PS_0,
1860 R_00B130_SPI_SHADER_USER_DATA_VS_0,
1861 R_00B208_SPI_SHADER_USER_DATA_ADDR_LO_GS,
1862 R_00B408_SPI_SHADER_USER_DATA_ADDR_LO_HS};
1863
1864 for (int i = 0; i < ARRAY_SIZE(regs); ++i) {
1865 radeon_set_sh_reg_seq(cs, regs[i], 2);
1866 radeon_emit(cs, va);
1867 radeon_emit(cs, va >> 32);
1868 }
1869 } else {
1870 uint32_t regs[] = {R_00B030_SPI_SHADER_USER_DATA_PS_0,
1871 R_00B130_SPI_SHADER_USER_DATA_VS_0,
1872 R_00B230_SPI_SHADER_USER_DATA_GS_0,
1873 R_00B330_SPI_SHADER_USER_DATA_ES_0,
1874 R_00B430_SPI_SHADER_USER_DATA_HS_0,
1875 R_00B530_SPI_SHADER_USER_DATA_LS_0};
1876
1877 for (int i = 0; i < ARRAY_SIZE(regs); ++i) {
1878 radeon_set_sh_reg_seq(cs, regs[i], 2);
1879 radeon_emit(cs, va);
1880 radeon_emit(cs, va >> 32);
1881 }
1882 }
1883 }
1884
1885 if (compute_scratch_bo) {
1886 uint64_t scratch_va = radv_buffer_get_va(compute_scratch_bo);
1887 uint32_t rsrc1 = S_008F04_BASE_ADDRESS_HI(scratch_va >> 32) |
1888 S_008F04_SWIZZLE_ENABLE(1);
1889
1890 radv_cs_add_buffer(queue->device->ws, cs, compute_scratch_bo, 8);
1891
1892 radeon_set_sh_reg_seq(cs, R_00B900_COMPUTE_USER_DATA_0, 2);
1893 radeon_emit(cs, scratch_va);
1894 radeon_emit(cs, rsrc1);
1895 }
1896
1897 if (i == 0) {
1898 si_cs_emit_cache_flush(cs,
1899 queue->device->physical_device->rad_info.chip_class,
1900 NULL, 0,
1901 queue->queue_family_index == RING_COMPUTE &&
1902 queue->device->physical_device->rad_info.chip_class >= CIK,
1903 (queue->queue_family_index == RADV_QUEUE_COMPUTE ? RADV_CMD_FLAG_CS_PARTIAL_FLUSH : (RADV_CMD_FLAG_CS_PARTIAL_FLUSH | RADV_CMD_FLAG_PS_PARTIAL_FLUSH)) |
1904 RADV_CMD_FLAG_INV_ICACHE |
1905 RADV_CMD_FLAG_INV_SMEM_L1 |
1906 RADV_CMD_FLAG_INV_VMEM_L1 |
1907 RADV_CMD_FLAG_INV_GLOBAL_L2);
1908 } else if (i == 1) {
1909 si_cs_emit_cache_flush(cs,
1910 queue->device->physical_device->rad_info.chip_class,
1911 NULL, 0,
1912 queue->queue_family_index == RING_COMPUTE &&
1913 queue->device->physical_device->rad_info.chip_class >= CIK,
1914 RADV_CMD_FLAG_INV_ICACHE |
1915 RADV_CMD_FLAG_INV_SMEM_L1 |
1916 RADV_CMD_FLAG_INV_VMEM_L1 |
1917 RADV_CMD_FLAG_INV_GLOBAL_L2);
1918 }
1919
1920 if (!queue->device->ws->cs_finalize(cs))
1921 goto fail;
1922 }
1923
1924 if (queue->initial_full_flush_preamble_cs)
1925 queue->device->ws->cs_destroy(queue->initial_full_flush_preamble_cs);
1926
1927 if (queue->initial_preamble_cs)
1928 queue->device->ws->cs_destroy(queue->initial_preamble_cs);
1929
1930 if (queue->continue_preamble_cs)
1931 queue->device->ws->cs_destroy(queue->continue_preamble_cs);
1932
1933 queue->initial_full_flush_preamble_cs = dest_cs[0];
1934 queue->initial_preamble_cs = dest_cs[1];
1935 queue->continue_preamble_cs = dest_cs[2];
1936
1937 if (scratch_bo != queue->scratch_bo) {
1938 if (queue->scratch_bo)
1939 queue->device->ws->buffer_destroy(queue->scratch_bo);
1940 queue->scratch_bo = scratch_bo;
1941 queue->scratch_size = scratch_size;
1942 }
1943
1944 if (compute_scratch_bo != queue->compute_scratch_bo) {
1945 if (queue->compute_scratch_bo)
1946 queue->device->ws->buffer_destroy(queue->compute_scratch_bo);
1947 queue->compute_scratch_bo = compute_scratch_bo;
1948 queue->compute_scratch_size = compute_scratch_size;
1949 }
1950
1951 if (esgs_ring_bo != queue->esgs_ring_bo) {
1952 if (queue->esgs_ring_bo)
1953 queue->device->ws->buffer_destroy(queue->esgs_ring_bo);
1954 queue->esgs_ring_bo = esgs_ring_bo;
1955 queue->esgs_ring_size = esgs_ring_size;
1956 }
1957
1958 if (gsvs_ring_bo != queue->gsvs_ring_bo) {
1959 if (queue->gsvs_ring_bo)
1960 queue->device->ws->buffer_destroy(queue->gsvs_ring_bo);
1961 queue->gsvs_ring_bo = gsvs_ring_bo;
1962 queue->gsvs_ring_size = gsvs_ring_size;
1963 }
1964
1965 if (tess_rings_bo != queue->tess_rings_bo) {
1966 queue->tess_rings_bo = tess_rings_bo;
1967 queue->has_tess_rings = true;
1968 }
1969
1970 if (descriptor_bo != queue->descriptor_bo) {
1971 if (queue->descriptor_bo)
1972 queue->device->ws->buffer_destroy(queue->descriptor_bo);
1973
1974 queue->descriptor_bo = descriptor_bo;
1975 }
1976
1977 if (add_sample_positions)
1978 queue->has_sample_positions = true;
1979
1980 *initial_full_flush_preamble_cs = queue->initial_full_flush_preamble_cs;
1981 *initial_preamble_cs = queue->initial_preamble_cs;
1982 *continue_preamble_cs = queue->continue_preamble_cs;
1983 if (!scratch_size && !compute_scratch_size && !esgs_ring_size && !gsvs_ring_size)
1984 *continue_preamble_cs = NULL;
1985 return VK_SUCCESS;
1986 fail:
1987 for (int i = 0; i < ARRAY_SIZE(dest_cs); ++i)
1988 if (dest_cs[i])
1989 queue->device->ws->cs_destroy(dest_cs[i]);
1990 if (descriptor_bo && descriptor_bo != queue->descriptor_bo)
1991 queue->device->ws->buffer_destroy(descriptor_bo);
1992 if (scratch_bo && scratch_bo != queue->scratch_bo)
1993 queue->device->ws->buffer_destroy(scratch_bo);
1994 if (compute_scratch_bo && compute_scratch_bo != queue->compute_scratch_bo)
1995 queue->device->ws->buffer_destroy(compute_scratch_bo);
1996 if (esgs_ring_bo && esgs_ring_bo != queue->esgs_ring_bo)
1997 queue->device->ws->buffer_destroy(esgs_ring_bo);
1998 if (gsvs_ring_bo && gsvs_ring_bo != queue->gsvs_ring_bo)
1999 queue->device->ws->buffer_destroy(gsvs_ring_bo);
2000 if (tess_rings_bo && tess_rings_bo != queue->tess_rings_bo)
2001 queue->device->ws->buffer_destroy(tess_rings_bo);
2002 return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
2003 }
2004
2005 static VkResult radv_alloc_sem_counts(struct radv_winsys_sem_counts *counts,
2006 int num_sems,
2007 const VkSemaphore *sems,
2008 VkFence _fence,
2009 bool reset_temp)
2010 {
2011 int syncobj_idx = 0, sem_idx = 0;
2012
2013 if (num_sems == 0 && _fence == VK_NULL_HANDLE)
2014 return VK_SUCCESS;
2015
2016 for (uint32_t i = 0; i < num_sems; i++) {
2017 RADV_FROM_HANDLE(radv_semaphore, sem, sems[i]);
2018
2019 if (sem->temp_syncobj || sem->syncobj)
2020 counts->syncobj_count++;
2021 else
2022 counts->sem_count++;
2023 }
2024
2025 if (_fence != VK_NULL_HANDLE) {
2026 RADV_FROM_HANDLE(radv_fence, fence, _fence);
2027 if (fence->temp_syncobj || fence->syncobj)
2028 counts->syncobj_count++;
2029 }
2030
2031 if (counts->syncobj_count) {
2032 counts->syncobj = (uint32_t *)malloc(sizeof(uint32_t) * counts->syncobj_count);
2033 if (!counts->syncobj)
2034 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
2035 }
2036
2037 if (counts->sem_count) {
2038 counts->sem = (struct radeon_winsys_sem **)malloc(sizeof(struct radeon_winsys_sem *) * counts->sem_count);
2039 if (!counts->sem) {
2040 free(counts->syncobj);
2041 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
2042 }
2043 }
2044
2045 for (uint32_t i = 0; i < num_sems; i++) {
2046 RADV_FROM_HANDLE(radv_semaphore, sem, sems[i]);
2047
2048 if (sem->temp_syncobj) {
2049 counts->syncobj[syncobj_idx++] = sem->temp_syncobj;
2050 }
2051 else if (sem->syncobj)
2052 counts->syncobj[syncobj_idx++] = sem->syncobj;
2053 else {
2054 assert(sem->sem);
2055 counts->sem[sem_idx++] = sem->sem;
2056 }
2057 }
2058
2059 if (_fence != VK_NULL_HANDLE) {
2060 RADV_FROM_HANDLE(radv_fence, fence, _fence);
2061 if (fence->temp_syncobj)
2062 counts->syncobj[syncobj_idx++] = fence->temp_syncobj;
2063 else if (fence->syncobj)
2064 counts->syncobj[syncobj_idx++] = fence->syncobj;
2065 }
2066
2067 return VK_SUCCESS;
2068 }
2069
2070 void radv_free_sem_info(struct radv_winsys_sem_info *sem_info)
2071 {
2072 free(sem_info->wait.syncobj);
2073 free(sem_info->wait.sem);
2074 free(sem_info->signal.syncobj);
2075 free(sem_info->signal.sem);
2076 }
2077
2078
2079 static void radv_free_temp_syncobjs(struct radv_device *device,
2080 int num_sems,
2081 const VkSemaphore *sems)
2082 {
2083 for (uint32_t i = 0; i < num_sems; i++) {
2084 RADV_FROM_HANDLE(radv_semaphore, sem, sems[i]);
2085
2086 if (sem->temp_syncobj) {
2087 device->ws->destroy_syncobj(device->ws, sem->temp_syncobj);
2088 sem->temp_syncobj = 0;
2089 }
2090 }
2091 }
2092
2093 VkResult radv_alloc_sem_info(struct radv_winsys_sem_info *sem_info,
2094 int num_wait_sems,
2095 const VkSemaphore *wait_sems,
2096 int num_signal_sems,
2097 const VkSemaphore *signal_sems,
2098 VkFence fence)
2099 {
2100 VkResult ret;
2101 memset(sem_info, 0, sizeof(*sem_info));
2102
2103 ret = radv_alloc_sem_counts(&sem_info->wait, num_wait_sems, wait_sems, VK_NULL_HANDLE, true);
2104 if (ret)
2105 return ret;
2106 ret = radv_alloc_sem_counts(&sem_info->signal, num_signal_sems, signal_sems, fence, false);
2107 if (ret)
2108 radv_free_sem_info(sem_info);
2109
2110 /* caller can override these */
2111 sem_info->cs_emit_wait = true;
2112 sem_info->cs_emit_signal = true;
2113 return ret;
2114 }
2115
2116 /* Signals fence as soon as all the work currently put on queue is done. */
2117 static VkResult radv_signal_fence(struct radv_queue *queue,
2118 struct radv_fence *fence)
2119 {
2120 int ret;
2121 VkResult result;
2122 struct radv_winsys_sem_info sem_info;
2123
2124 result = radv_alloc_sem_info(&sem_info, 0, NULL, 0, NULL,
2125 radv_fence_to_handle(fence));
2126 if (result != VK_SUCCESS)
2127 return result;
2128
2129 ret = queue->device->ws->cs_submit(queue->hw_ctx, queue->queue_idx,
2130 &queue->device->empty_cs[queue->queue_family_index],
2131 1, NULL, NULL, &sem_info,
2132 false, fence->fence);
2133 radv_free_sem_info(&sem_info);
2134
2135 /* TODO: find a better error */
2136 if (ret)
2137 return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
2138
2139 return VK_SUCCESS;
2140 }
2141
2142 VkResult radv_QueueSubmit(
2143 VkQueue _queue,
2144 uint32_t submitCount,
2145 const VkSubmitInfo* pSubmits,
2146 VkFence _fence)
2147 {
2148 RADV_FROM_HANDLE(radv_queue, queue, _queue);
2149 RADV_FROM_HANDLE(radv_fence, fence, _fence);
2150 struct radeon_winsys_fence *base_fence = fence ? fence->fence : NULL;
2151 struct radeon_winsys_ctx *ctx = queue->hw_ctx;
2152 int ret;
2153 uint32_t max_cs_submission = queue->device->trace_bo ? 1 : UINT32_MAX;
2154 uint32_t scratch_size = 0;
2155 uint32_t compute_scratch_size = 0;
2156 uint32_t esgs_ring_size = 0, gsvs_ring_size = 0;
2157 struct radeon_winsys_cs *initial_preamble_cs = NULL, *initial_flush_preamble_cs = NULL, *continue_preamble_cs = NULL;
2158 VkResult result;
2159 bool fence_emitted = false;
2160 bool tess_rings_needed = false;
2161 bool sample_positions_needed = false;
2162
2163 /* Do this first so failing to allocate scratch buffers can't result in
2164 * partially executed submissions. */
2165 for (uint32_t i = 0; i < submitCount; i++) {
2166 for (uint32_t j = 0; j < pSubmits[i].commandBufferCount; j++) {
2167 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer,
2168 pSubmits[i].pCommandBuffers[j]);
2169
2170 scratch_size = MAX2(scratch_size, cmd_buffer->scratch_size_needed);
2171 compute_scratch_size = MAX2(compute_scratch_size,
2172 cmd_buffer->compute_scratch_size_needed);
2173 esgs_ring_size = MAX2(esgs_ring_size, cmd_buffer->esgs_ring_size_needed);
2174 gsvs_ring_size = MAX2(gsvs_ring_size, cmd_buffer->gsvs_ring_size_needed);
2175 tess_rings_needed |= cmd_buffer->tess_rings_needed;
2176 sample_positions_needed |= cmd_buffer->sample_positions_needed;
2177 }
2178 }
2179
2180 result = radv_get_preamble_cs(queue, scratch_size, compute_scratch_size,
2181 esgs_ring_size, gsvs_ring_size, tess_rings_needed,
2182 sample_positions_needed, &initial_flush_preamble_cs,
2183 &initial_preamble_cs, &continue_preamble_cs);
2184 if (result != VK_SUCCESS)
2185 return result;
2186
2187 for (uint32_t i = 0; i < submitCount; i++) {
2188 struct radeon_winsys_cs **cs_array;
2189 bool do_flush = !i || pSubmits[i].pWaitDstStageMask;
2190 bool can_patch = true;
2191 uint32_t advance;
2192 struct radv_winsys_sem_info sem_info;
2193
2194 result = radv_alloc_sem_info(&sem_info,
2195 pSubmits[i].waitSemaphoreCount,
2196 pSubmits[i].pWaitSemaphores,
2197 pSubmits[i].signalSemaphoreCount,
2198 pSubmits[i].pSignalSemaphores,
2199 _fence);
2200 if (result != VK_SUCCESS)
2201 return result;
2202
2203 if (!pSubmits[i].commandBufferCount) {
2204 if (pSubmits[i].waitSemaphoreCount || pSubmits[i].signalSemaphoreCount) {
2205 ret = queue->device->ws->cs_submit(ctx, queue->queue_idx,
2206 &queue->device->empty_cs[queue->queue_family_index],
2207 1, NULL, NULL,
2208 &sem_info,
2209 false, base_fence);
2210 if (ret) {
2211 radv_loge("failed to submit CS %d\n", i);
2212 abort();
2213 }
2214 fence_emitted = true;
2215 }
2216 radv_free_sem_info(&sem_info);
2217 continue;
2218 }
2219
2220 cs_array = malloc(sizeof(struct radeon_winsys_cs *) *
2221 (pSubmits[i].commandBufferCount));
2222
2223 for (uint32_t j = 0; j < pSubmits[i].commandBufferCount; j++) {
2224 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer,
2225 pSubmits[i].pCommandBuffers[j]);
2226 assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
2227
2228 cs_array[j] = cmd_buffer->cs;
2229 if ((cmd_buffer->usage_flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT))
2230 can_patch = false;
2231
2232 cmd_buffer->status = RADV_CMD_BUFFER_STATUS_PENDING;
2233 }
2234
2235 for (uint32_t j = 0; j < pSubmits[i].commandBufferCount; j += advance) {
2236 struct radeon_winsys_cs *initial_preamble = (do_flush && !j) ? initial_flush_preamble_cs : initial_preamble_cs;
2237 advance = MIN2(max_cs_submission,
2238 pSubmits[i].commandBufferCount - j);
2239
2240 if (queue->device->trace_bo)
2241 *queue->device->trace_id_ptr = 0;
2242
2243 sem_info.cs_emit_wait = j == 0;
2244 sem_info.cs_emit_signal = j + advance == pSubmits[i].commandBufferCount;
2245
2246 ret = queue->device->ws->cs_submit(ctx, queue->queue_idx, cs_array + j,
2247 advance, initial_preamble, continue_preamble_cs,
2248 &sem_info,
2249 can_patch, base_fence);
2250
2251 if (ret) {
2252 radv_loge("failed to submit CS %d\n", i);
2253 abort();
2254 }
2255 fence_emitted = true;
2256 if (queue->device->trace_bo) {
2257 radv_check_gpu_hangs(queue, cs_array[j]);
2258 }
2259 }
2260
2261 radv_free_temp_syncobjs(queue->device,
2262 pSubmits[i].waitSemaphoreCount,
2263 pSubmits[i].pWaitSemaphores);
2264 radv_free_sem_info(&sem_info);
2265 free(cs_array);
2266 }
2267
2268 if (fence) {
2269 if (!fence_emitted) {
2270 radv_signal_fence(queue, fence);
2271 }
2272 fence->submitted = true;
2273 }
2274
2275 return VK_SUCCESS;
2276 }
2277
2278 VkResult radv_QueueWaitIdle(
2279 VkQueue _queue)
2280 {
2281 RADV_FROM_HANDLE(radv_queue, queue, _queue);
2282
2283 queue->device->ws->ctx_wait_idle(queue->hw_ctx,
2284 radv_queue_family_to_ring(queue->queue_family_index),
2285 queue->queue_idx);
2286 return VK_SUCCESS;
2287 }
2288
2289 VkResult radv_DeviceWaitIdle(
2290 VkDevice _device)
2291 {
2292 RADV_FROM_HANDLE(radv_device, device, _device);
2293
2294 for (unsigned i = 0; i < RADV_MAX_QUEUE_FAMILIES; i++) {
2295 for (unsigned q = 0; q < device->queue_count[i]; q++) {
2296 radv_QueueWaitIdle(radv_queue_to_handle(&device->queues[i][q]));
2297 }
2298 }
2299 return VK_SUCCESS;
2300 }
2301
2302 VkResult radv_EnumerateInstanceExtensionProperties(
2303 const char* pLayerName,
2304 uint32_t* pPropertyCount,
2305 VkExtensionProperties* pProperties)
2306 {
2307 VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
2308
2309 for (int i = 0; i < RADV_INSTANCE_EXTENSION_COUNT; i++) {
2310 if (radv_supported_instance_extensions.extensions[i]) {
2311 vk_outarray_append(&out, prop) {
2312 *prop = radv_instance_extensions[i];
2313 }
2314 }
2315 }
2316
2317 return vk_outarray_status(&out);
2318 }
2319
2320 VkResult radv_EnumerateDeviceExtensionProperties(
2321 VkPhysicalDevice physicalDevice,
2322 const char* pLayerName,
2323 uint32_t* pPropertyCount,
2324 VkExtensionProperties* pProperties)
2325 {
2326 RADV_FROM_HANDLE(radv_physical_device, device, physicalDevice);
2327 VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
2328
2329 for (int i = 0; i < RADV_DEVICE_EXTENSION_COUNT; i++) {
2330 if (device->supported_extensions.extensions[i]) {
2331 vk_outarray_append(&out, prop) {
2332 *prop = radv_device_extensions[i];
2333 }
2334 }
2335 }
2336
2337 return vk_outarray_status(&out);
2338 }
2339
2340 PFN_vkVoidFunction radv_GetInstanceProcAddr(
2341 VkInstance _instance,
2342 const char* pName)
2343 {
2344 RADV_FROM_HANDLE(radv_instance, instance, _instance);
2345
2346 return radv_lookup_entrypoint_checked(pName,
2347 instance ? instance->apiVersion : 0,
2348 instance ? &instance->enabled_extensions : NULL,
2349 NULL);
2350 }
2351
2352 /* The loader wants us to expose a second GetInstanceProcAddr function
2353 * to work around certain LD_PRELOAD issues seen in apps.
2354 */
2355 PUBLIC
2356 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vk_icdGetInstanceProcAddr(
2357 VkInstance instance,
2358 const char* pName);
2359
2360 PUBLIC
2361 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vk_icdGetInstanceProcAddr(
2362 VkInstance instance,
2363 const char* pName)
2364 {
2365 return radv_GetInstanceProcAddr(instance, pName);
2366 }
2367
2368 PFN_vkVoidFunction radv_GetDeviceProcAddr(
2369 VkDevice _device,
2370 const char* pName)
2371 {
2372 RADV_FROM_HANDLE(radv_device, device, _device);
2373
2374 return radv_lookup_entrypoint_checked(pName,
2375 device->instance->apiVersion,
2376 &device->instance->enabled_extensions,
2377 &device->enabled_extensions);
2378 }
2379
2380 bool radv_get_memory_fd(struct radv_device *device,
2381 struct radv_device_memory *memory,
2382 int *pFD)
2383 {
2384 struct radeon_bo_metadata metadata;
2385
2386 if (memory->image) {
2387 radv_init_metadata(device, memory->image, &metadata);
2388 device->ws->buffer_set_metadata(memory->bo, &metadata);
2389 }
2390
2391 return device->ws->buffer_get_fd(device->ws, memory->bo,
2392 pFD);
2393 }
2394
2395 static VkResult radv_alloc_memory(struct radv_device *device,
2396 const VkMemoryAllocateInfo* pAllocateInfo,
2397 const VkAllocationCallbacks* pAllocator,
2398 VkDeviceMemory* pMem)
2399 {
2400 struct radv_device_memory *mem;
2401 VkResult result;
2402 enum radeon_bo_domain domain;
2403 uint32_t flags = 0;
2404 enum radv_mem_type mem_type_index = device->physical_device->mem_type_indices[pAllocateInfo->memoryTypeIndex];
2405
2406 assert(pAllocateInfo->sType == VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO);
2407
2408 if (pAllocateInfo->allocationSize == 0) {
2409 /* Apparently, this is allowed */
2410 *pMem = VK_NULL_HANDLE;
2411 return VK_SUCCESS;
2412 }
2413
2414 const VkImportMemoryFdInfoKHR *import_info =
2415 vk_find_struct_const(pAllocateInfo->pNext, IMPORT_MEMORY_FD_INFO_KHR);
2416 const VkMemoryDedicatedAllocateInfoKHR *dedicate_info =
2417 vk_find_struct_const(pAllocateInfo->pNext, MEMORY_DEDICATED_ALLOCATE_INFO_KHR);
2418 const VkExportMemoryAllocateInfoKHR *export_info =
2419 vk_find_struct_const(pAllocateInfo->pNext, EXPORT_MEMORY_ALLOCATE_INFO_KHR);
2420 const VkImportMemoryHostPointerInfoEXT *host_ptr_info =
2421 vk_find_struct_const(pAllocateInfo->pNext, IMPORT_MEMORY_HOST_POINTER_INFO_EXT);
2422
2423 const struct wsi_memory_allocate_info *wsi_info =
2424 vk_find_struct_const(pAllocateInfo->pNext, WSI_MEMORY_ALLOCATE_INFO_MESA);
2425
2426 mem = vk_alloc2(&device->alloc, pAllocator, sizeof(*mem), 8,
2427 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2428 if (mem == NULL)
2429 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
2430
2431 if (wsi_info && wsi_info->implicit_sync)
2432 flags |= RADEON_FLAG_IMPLICIT_SYNC;
2433
2434 if (dedicate_info) {
2435 mem->image = radv_image_from_handle(dedicate_info->image);
2436 mem->buffer = radv_buffer_from_handle(dedicate_info->buffer);
2437 } else {
2438 mem->image = NULL;
2439 mem->buffer = NULL;
2440 }
2441
2442 mem->user_ptr = NULL;
2443
2444 if (import_info) {
2445 assert(import_info->handleType ==
2446 VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR ||
2447 import_info->handleType ==
2448 VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
2449 mem->bo = device->ws->buffer_from_fd(device->ws, import_info->fd,
2450 NULL, NULL);
2451 if (!mem->bo) {
2452 result = VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR;
2453 goto fail;
2454 } else {
2455 close(import_info->fd);
2456 goto out_success;
2457 }
2458 }
2459
2460 if (host_ptr_info) {
2461 assert(host_ptr_info->handleType == VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT);
2462 assert(mem_type_index == RADV_MEM_TYPE_GTT_CACHED);
2463 mem->bo = device->ws->buffer_from_ptr(device->ws, host_ptr_info->pHostPointer,
2464 pAllocateInfo->allocationSize);
2465 if (!mem->bo) {
2466 result = VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR;
2467 goto fail;
2468 } else {
2469 mem->user_ptr = host_ptr_info->pHostPointer;
2470 goto out_success;
2471 }
2472 }
2473
2474 uint64_t alloc_size = align_u64(pAllocateInfo->allocationSize, 4096);
2475 if (mem_type_index == RADV_MEM_TYPE_GTT_WRITE_COMBINE ||
2476 mem_type_index == RADV_MEM_TYPE_GTT_CACHED)
2477 domain = RADEON_DOMAIN_GTT;
2478 else
2479 domain = RADEON_DOMAIN_VRAM;
2480
2481 if (mem_type_index == RADV_MEM_TYPE_VRAM)
2482 flags |= RADEON_FLAG_NO_CPU_ACCESS;
2483 else
2484 flags |= RADEON_FLAG_CPU_ACCESS;
2485
2486 if (mem_type_index == RADV_MEM_TYPE_GTT_WRITE_COMBINE)
2487 flags |= RADEON_FLAG_GTT_WC;
2488
2489 if (!dedicate_info && !import_info && (!export_info || !export_info->handleTypes))
2490 flags |= RADEON_FLAG_NO_INTERPROCESS_SHARING;
2491
2492 mem->bo = device->ws->buffer_create(device->ws, alloc_size, device->physical_device->rad_info.max_alignment,
2493 domain, flags);
2494
2495 if (!mem->bo) {
2496 result = VK_ERROR_OUT_OF_DEVICE_MEMORY;
2497 goto fail;
2498 }
2499 mem->type_index = mem_type_index;
2500 out_success:
2501 *pMem = radv_device_memory_to_handle(mem);
2502
2503 return VK_SUCCESS;
2504
2505 fail:
2506 vk_free2(&device->alloc, pAllocator, mem);
2507
2508 return result;
2509 }
2510
2511 VkResult radv_AllocateMemory(
2512 VkDevice _device,
2513 const VkMemoryAllocateInfo* pAllocateInfo,
2514 const VkAllocationCallbacks* pAllocator,
2515 VkDeviceMemory* pMem)
2516 {
2517 RADV_FROM_HANDLE(radv_device, device, _device);
2518 return radv_alloc_memory(device, pAllocateInfo, pAllocator, pMem);
2519 }
2520
2521 void radv_FreeMemory(
2522 VkDevice _device,
2523 VkDeviceMemory _mem,
2524 const VkAllocationCallbacks* pAllocator)
2525 {
2526 RADV_FROM_HANDLE(radv_device, device, _device);
2527 RADV_FROM_HANDLE(radv_device_memory, mem, _mem);
2528
2529 if (mem == NULL)
2530 return;
2531
2532 device->ws->buffer_destroy(mem->bo);
2533 mem->bo = NULL;
2534
2535 vk_free2(&device->alloc, pAllocator, mem);
2536 }
2537
2538 VkResult radv_MapMemory(
2539 VkDevice _device,
2540 VkDeviceMemory _memory,
2541 VkDeviceSize offset,
2542 VkDeviceSize size,
2543 VkMemoryMapFlags flags,
2544 void** ppData)
2545 {
2546 RADV_FROM_HANDLE(radv_device, device, _device);
2547 RADV_FROM_HANDLE(radv_device_memory, mem, _memory);
2548
2549 if (mem == NULL) {
2550 *ppData = NULL;
2551 return VK_SUCCESS;
2552 }
2553
2554 if (mem->user_ptr)
2555 *ppData = mem->user_ptr;
2556 else
2557 *ppData = device->ws->buffer_map(mem->bo);
2558
2559 if (*ppData) {
2560 *ppData += offset;
2561 return VK_SUCCESS;
2562 }
2563
2564 return vk_error(VK_ERROR_MEMORY_MAP_FAILED);
2565 }
2566
2567 void radv_UnmapMemory(
2568 VkDevice _device,
2569 VkDeviceMemory _memory)
2570 {
2571 RADV_FROM_HANDLE(radv_device, device, _device);
2572 RADV_FROM_HANDLE(radv_device_memory, mem, _memory);
2573
2574 if (mem == NULL)
2575 return;
2576
2577 if (mem->user_ptr == NULL)
2578 device->ws->buffer_unmap(mem->bo);
2579 }
2580
2581 VkResult radv_FlushMappedMemoryRanges(
2582 VkDevice _device,
2583 uint32_t memoryRangeCount,
2584 const VkMappedMemoryRange* pMemoryRanges)
2585 {
2586 return VK_SUCCESS;
2587 }
2588
2589 VkResult radv_InvalidateMappedMemoryRanges(
2590 VkDevice _device,
2591 uint32_t memoryRangeCount,
2592 const VkMappedMemoryRange* pMemoryRanges)
2593 {
2594 return VK_SUCCESS;
2595 }
2596
2597 void radv_GetBufferMemoryRequirements(
2598 VkDevice _device,
2599 VkBuffer _buffer,
2600 VkMemoryRequirements* pMemoryRequirements)
2601 {
2602 RADV_FROM_HANDLE(radv_device, device, _device);
2603 RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
2604
2605 pMemoryRequirements->memoryTypeBits = (1u << device->physical_device->memory_properties.memoryTypeCount) - 1;
2606
2607 if (buffer->flags & VK_BUFFER_CREATE_SPARSE_BINDING_BIT)
2608 pMemoryRequirements->alignment = 4096;
2609 else
2610 pMemoryRequirements->alignment = 16;
2611
2612 pMemoryRequirements->size = align64(buffer->size, pMemoryRequirements->alignment);
2613 }
2614
2615 void radv_GetBufferMemoryRequirements2(
2616 VkDevice device,
2617 const VkBufferMemoryRequirementsInfo2KHR* pInfo,
2618 VkMemoryRequirements2KHR* pMemoryRequirements)
2619 {
2620 radv_GetBufferMemoryRequirements(device, pInfo->buffer,
2621 &pMemoryRequirements->memoryRequirements);
2622 RADV_FROM_HANDLE(radv_buffer, buffer, pInfo->buffer);
2623 vk_foreach_struct(ext, pMemoryRequirements->pNext) {
2624 switch (ext->sType) {
2625 case VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR: {
2626 VkMemoryDedicatedRequirementsKHR *req =
2627 (VkMemoryDedicatedRequirementsKHR *) ext;
2628 req->requiresDedicatedAllocation = buffer->shareable;
2629 req->prefersDedicatedAllocation = req->requiresDedicatedAllocation;
2630 break;
2631 }
2632 default:
2633 break;
2634 }
2635 }
2636 }
2637
2638 void radv_GetImageMemoryRequirements(
2639 VkDevice _device,
2640 VkImage _image,
2641 VkMemoryRequirements* pMemoryRequirements)
2642 {
2643 RADV_FROM_HANDLE(radv_device, device, _device);
2644 RADV_FROM_HANDLE(radv_image, image, _image);
2645
2646 pMemoryRequirements->memoryTypeBits = (1u << device->physical_device->memory_properties.memoryTypeCount) - 1;
2647
2648 pMemoryRequirements->size = image->size;
2649 pMemoryRequirements->alignment = image->alignment;
2650 }
2651
2652 void radv_GetImageMemoryRequirements2(
2653 VkDevice device,
2654 const VkImageMemoryRequirementsInfo2KHR* pInfo,
2655 VkMemoryRequirements2KHR* pMemoryRequirements)
2656 {
2657 radv_GetImageMemoryRequirements(device, pInfo->image,
2658 &pMemoryRequirements->memoryRequirements);
2659
2660 RADV_FROM_HANDLE(radv_image, image, pInfo->image);
2661
2662 vk_foreach_struct(ext, pMemoryRequirements->pNext) {
2663 switch (ext->sType) {
2664 case VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR: {
2665 VkMemoryDedicatedRequirementsKHR *req =
2666 (VkMemoryDedicatedRequirementsKHR *) ext;
2667 req->requiresDedicatedAllocation = image->shareable;
2668 req->prefersDedicatedAllocation = req->requiresDedicatedAllocation;
2669 break;
2670 }
2671 default:
2672 break;
2673 }
2674 }
2675 }
2676
2677 void radv_GetImageSparseMemoryRequirements(
2678 VkDevice device,
2679 VkImage image,
2680 uint32_t* pSparseMemoryRequirementCount,
2681 VkSparseImageMemoryRequirements* pSparseMemoryRequirements)
2682 {
2683 stub();
2684 }
2685
2686 void radv_GetImageSparseMemoryRequirements2(
2687 VkDevice device,
2688 const VkImageSparseMemoryRequirementsInfo2KHR* pInfo,
2689 uint32_t* pSparseMemoryRequirementCount,
2690 VkSparseImageMemoryRequirements2KHR* pSparseMemoryRequirements)
2691 {
2692 stub();
2693 }
2694
2695 void radv_GetDeviceMemoryCommitment(
2696 VkDevice device,
2697 VkDeviceMemory memory,
2698 VkDeviceSize* pCommittedMemoryInBytes)
2699 {
2700 *pCommittedMemoryInBytes = 0;
2701 }
2702
2703 VkResult radv_BindBufferMemory2(VkDevice device,
2704 uint32_t bindInfoCount,
2705 const VkBindBufferMemoryInfoKHR *pBindInfos)
2706 {
2707 for (uint32_t i = 0; i < bindInfoCount; ++i) {
2708 RADV_FROM_HANDLE(radv_device_memory, mem, pBindInfos[i].memory);
2709 RADV_FROM_HANDLE(radv_buffer, buffer, pBindInfos[i].buffer);
2710
2711 if (mem) {
2712 buffer->bo = mem->bo;
2713 buffer->offset = pBindInfos[i].memoryOffset;
2714 } else {
2715 buffer->bo = NULL;
2716 }
2717 }
2718 return VK_SUCCESS;
2719 }
2720
2721 VkResult radv_BindBufferMemory(
2722 VkDevice device,
2723 VkBuffer buffer,
2724 VkDeviceMemory memory,
2725 VkDeviceSize memoryOffset)
2726 {
2727 const VkBindBufferMemoryInfoKHR info = {
2728 .sType = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR,
2729 .buffer = buffer,
2730 .memory = memory,
2731 .memoryOffset = memoryOffset
2732 };
2733
2734 return radv_BindBufferMemory2(device, 1, &info);
2735 }
2736
2737 VkResult radv_BindImageMemory2(VkDevice device,
2738 uint32_t bindInfoCount,
2739 const VkBindImageMemoryInfoKHR *pBindInfos)
2740 {
2741 for (uint32_t i = 0; i < bindInfoCount; ++i) {
2742 RADV_FROM_HANDLE(radv_device_memory, mem, pBindInfos[i].memory);
2743 RADV_FROM_HANDLE(radv_image, image, pBindInfos[i].image);
2744
2745 if (mem) {
2746 image->bo = mem->bo;
2747 image->offset = pBindInfos[i].memoryOffset;
2748 } else {
2749 image->bo = NULL;
2750 image->offset = 0;
2751 }
2752 }
2753 return VK_SUCCESS;
2754 }
2755
2756
2757 VkResult radv_BindImageMemory(
2758 VkDevice device,
2759 VkImage image,
2760 VkDeviceMemory memory,
2761 VkDeviceSize memoryOffset)
2762 {
2763 const VkBindImageMemoryInfoKHR info = {
2764 .sType = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR,
2765 .image = image,
2766 .memory = memory,
2767 .memoryOffset = memoryOffset
2768 };
2769
2770 return radv_BindImageMemory2(device, 1, &info);
2771 }
2772
2773
2774 static void
2775 radv_sparse_buffer_bind_memory(struct radv_device *device,
2776 const VkSparseBufferMemoryBindInfo *bind)
2777 {
2778 RADV_FROM_HANDLE(radv_buffer, buffer, bind->buffer);
2779
2780 for (uint32_t i = 0; i < bind->bindCount; ++i) {
2781 struct radv_device_memory *mem = NULL;
2782
2783 if (bind->pBinds[i].memory != VK_NULL_HANDLE)
2784 mem = radv_device_memory_from_handle(bind->pBinds[i].memory);
2785
2786 device->ws->buffer_virtual_bind(buffer->bo,
2787 bind->pBinds[i].resourceOffset,
2788 bind->pBinds[i].size,
2789 mem ? mem->bo : NULL,
2790 bind->pBinds[i].memoryOffset);
2791 }
2792 }
2793
2794 static void
2795 radv_sparse_image_opaque_bind_memory(struct radv_device *device,
2796 const VkSparseImageOpaqueMemoryBindInfo *bind)
2797 {
2798 RADV_FROM_HANDLE(radv_image, image, bind->image);
2799
2800 for (uint32_t i = 0; i < bind->bindCount; ++i) {
2801 struct radv_device_memory *mem = NULL;
2802
2803 if (bind->pBinds[i].memory != VK_NULL_HANDLE)
2804 mem = radv_device_memory_from_handle(bind->pBinds[i].memory);
2805
2806 device->ws->buffer_virtual_bind(image->bo,
2807 bind->pBinds[i].resourceOffset,
2808 bind->pBinds[i].size,
2809 mem ? mem->bo : NULL,
2810 bind->pBinds[i].memoryOffset);
2811 }
2812 }
2813
2814 VkResult radv_QueueBindSparse(
2815 VkQueue _queue,
2816 uint32_t bindInfoCount,
2817 const VkBindSparseInfo* pBindInfo,
2818 VkFence _fence)
2819 {
2820 RADV_FROM_HANDLE(radv_fence, fence, _fence);
2821 RADV_FROM_HANDLE(radv_queue, queue, _queue);
2822 struct radeon_winsys_fence *base_fence = fence ? fence->fence : NULL;
2823 bool fence_emitted = false;
2824
2825 for (uint32_t i = 0; i < bindInfoCount; ++i) {
2826 struct radv_winsys_sem_info sem_info;
2827 for (uint32_t j = 0; j < pBindInfo[i].bufferBindCount; ++j) {
2828 radv_sparse_buffer_bind_memory(queue->device,
2829 pBindInfo[i].pBufferBinds + j);
2830 }
2831
2832 for (uint32_t j = 0; j < pBindInfo[i].imageOpaqueBindCount; ++j) {
2833 radv_sparse_image_opaque_bind_memory(queue->device,
2834 pBindInfo[i].pImageOpaqueBinds + j);
2835 }
2836
2837 VkResult result;
2838 result = radv_alloc_sem_info(&sem_info,
2839 pBindInfo[i].waitSemaphoreCount,
2840 pBindInfo[i].pWaitSemaphores,
2841 pBindInfo[i].signalSemaphoreCount,
2842 pBindInfo[i].pSignalSemaphores,
2843 _fence);
2844 if (result != VK_SUCCESS)
2845 return result;
2846
2847 if (pBindInfo[i].waitSemaphoreCount || pBindInfo[i].signalSemaphoreCount) {
2848 queue->device->ws->cs_submit(queue->hw_ctx, queue->queue_idx,
2849 &queue->device->empty_cs[queue->queue_family_index],
2850 1, NULL, NULL,
2851 &sem_info,
2852 false, base_fence);
2853 fence_emitted = true;
2854 if (fence)
2855 fence->submitted = true;
2856 }
2857
2858 radv_free_sem_info(&sem_info);
2859
2860 }
2861
2862 if (fence) {
2863 if (!fence_emitted) {
2864 radv_signal_fence(queue, fence);
2865 }
2866 fence->submitted = true;
2867 }
2868
2869 return VK_SUCCESS;
2870 }
2871
2872 VkResult radv_CreateFence(
2873 VkDevice _device,
2874 const VkFenceCreateInfo* pCreateInfo,
2875 const VkAllocationCallbacks* pAllocator,
2876 VkFence* pFence)
2877 {
2878 RADV_FROM_HANDLE(radv_device, device, _device);
2879 const VkExportFenceCreateInfoKHR *export =
2880 vk_find_struct_const(pCreateInfo->pNext, EXPORT_FENCE_CREATE_INFO_KHR);
2881 VkExternalFenceHandleTypeFlagsKHR handleTypes =
2882 export ? export->handleTypes : 0;
2883
2884 struct radv_fence *fence = vk_alloc2(&device->alloc, pAllocator,
2885 sizeof(*fence), 8,
2886 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2887
2888 if (!fence)
2889 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
2890
2891 fence->submitted = false;
2892 fence->signalled = !!(pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT);
2893 fence->temp_syncobj = 0;
2894 if (device->always_use_syncobj || handleTypes) {
2895 int ret = device->ws->create_syncobj(device->ws, &fence->syncobj);
2896 if (ret) {
2897 vk_free2(&device->alloc, pAllocator, fence);
2898 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
2899 }
2900 if (pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT) {
2901 device->ws->signal_syncobj(device->ws, fence->syncobj);
2902 }
2903 fence->fence = NULL;
2904 } else {
2905 fence->fence = device->ws->create_fence();
2906 if (!fence->fence) {
2907 vk_free2(&device->alloc, pAllocator, fence);
2908 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
2909 }
2910 fence->syncobj = 0;
2911 }
2912
2913 *pFence = radv_fence_to_handle(fence);
2914
2915 return VK_SUCCESS;
2916 }
2917
2918 void radv_DestroyFence(
2919 VkDevice _device,
2920 VkFence _fence,
2921 const VkAllocationCallbacks* pAllocator)
2922 {
2923 RADV_FROM_HANDLE(radv_device, device, _device);
2924 RADV_FROM_HANDLE(radv_fence, fence, _fence);
2925
2926 if (!fence)
2927 return;
2928
2929 if (fence->temp_syncobj)
2930 device->ws->destroy_syncobj(device->ws, fence->temp_syncobj);
2931 if (fence->syncobj)
2932 device->ws->destroy_syncobj(device->ws, fence->syncobj);
2933 if (fence->fence)
2934 device->ws->destroy_fence(fence->fence);
2935 vk_free2(&device->alloc, pAllocator, fence);
2936 }
2937
2938
2939 static uint64_t radv_get_current_time()
2940 {
2941 struct timespec tv;
2942 clock_gettime(CLOCK_MONOTONIC, &tv);
2943 return tv.tv_nsec + tv.tv_sec*1000000000ull;
2944 }
2945
2946 static uint64_t radv_get_absolute_timeout(uint64_t timeout)
2947 {
2948 uint64_t current_time = radv_get_current_time();
2949
2950 timeout = MIN2(UINT64_MAX - current_time, timeout);
2951
2952 return current_time + timeout;
2953 }
2954
2955
2956 static bool radv_all_fences_plain_and_submitted(uint32_t fenceCount, const VkFence *pFences)
2957 {
2958 for (uint32_t i = 0; i < fenceCount; ++i) {
2959 RADV_FROM_HANDLE(radv_fence, fence, pFences[i]);
2960 if (fence->syncobj || fence->temp_syncobj || (!fence->signalled && !fence->submitted))
2961 return false;
2962 }
2963 return true;
2964 }
2965
2966 VkResult radv_WaitForFences(
2967 VkDevice _device,
2968 uint32_t fenceCount,
2969 const VkFence* pFences,
2970 VkBool32 waitAll,
2971 uint64_t timeout)
2972 {
2973 RADV_FROM_HANDLE(radv_device, device, _device);
2974 timeout = radv_get_absolute_timeout(timeout);
2975
2976 if (device->always_use_syncobj) {
2977 uint32_t *handles = malloc(sizeof(uint32_t) * fenceCount);
2978 if (!handles)
2979 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
2980
2981 for (uint32_t i = 0; i < fenceCount; ++i) {
2982 RADV_FROM_HANDLE(radv_fence, fence, pFences[i]);
2983 handles[i] = fence->temp_syncobj ? fence->temp_syncobj : fence->syncobj;
2984 }
2985
2986 bool success = device->ws->wait_syncobj(device->ws, handles, fenceCount, waitAll, timeout);
2987
2988 free(handles);
2989 return success ? VK_SUCCESS : VK_TIMEOUT;
2990 }
2991
2992 if (!waitAll && fenceCount > 1) {
2993 /* Not doing this by default for waitAll, due to needing to allocate twice. */
2994 if (device->physical_device->rad_info.drm_minor >= 10 && radv_all_fences_plain_and_submitted(fenceCount, pFences)) {
2995 uint32_t wait_count = 0;
2996 struct radeon_winsys_fence **fences = malloc(sizeof(struct radeon_winsys_fence *) * fenceCount);
2997 if (!fences)
2998 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
2999
3000 for (uint32_t i = 0; i < fenceCount; ++i) {
3001 RADV_FROM_HANDLE(radv_fence, fence, pFences[i]);
3002
3003 if (fence->signalled) {
3004 free(fences);
3005 return VK_SUCCESS;
3006 }
3007
3008 fences[wait_count++] = fence->fence;
3009 }
3010
3011 bool success = device->ws->fences_wait(device->ws, fences, wait_count,
3012 waitAll, timeout - radv_get_current_time());
3013
3014 free(fences);
3015 return success ? VK_SUCCESS : VK_TIMEOUT;
3016 }
3017
3018 while(radv_get_current_time() <= timeout) {
3019 for (uint32_t i = 0; i < fenceCount; ++i) {
3020 if (radv_GetFenceStatus(_device, pFences[i]) == VK_SUCCESS)
3021 return VK_SUCCESS;
3022 }
3023 }
3024 return VK_TIMEOUT;
3025 }
3026
3027 for (uint32_t i = 0; i < fenceCount; ++i) {
3028 RADV_FROM_HANDLE(radv_fence, fence, pFences[i]);
3029 bool expired = false;
3030
3031 if (fence->temp_syncobj) {
3032 if (!device->ws->wait_syncobj(device->ws, &fence->temp_syncobj, 1, true, timeout))
3033 return VK_TIMEOUT;
3034 continue;
3035 }
3036
3037 if (fence->syncobj) {
3038 if (!device->ws->wait_syncobj(device->ws, &fence->syncobj, 1, true, timeout))
3039 return VK_TIMEOUT;
3040 continue;
3041 }
3042
3043 if (fence->signalled)
3044 continue;
3045
3046 if (!fence->submitted) {
3047 while(radv_get_current_time() <= timeout && !fence->submitted)
3048 /* Do nothing */;
3049
3050 if (!fence->submitted)
3051 return VK_TIMEOUT;
3052
3053 /* Recheck as it may have been set by submitting operations. */
3054 if (fence->signalled)
3055 continue;
3056 }
3057
3058 expired = device->ws->fence_wait(device->ws, fence->fence, true, timeout);
3059 if (!expired)
3060 return VK_TIMEOUT;
3061
3062 fence->signalled = true;
3063 }
3064
3065 return VK_SUCCESS;
3066 }
3067
3068 VkResult radv_ResetFences(VkDevice _device,
3069 uint32_t fenceCount,
3070 const VkFence *pFences)
3071 {
3072 RADV_FROM_HANDLE(radv_device, device, _device);
3073
3074 for (unsigned i = 0; i < fenceCount; ++i) {
3075 RADV_FROM_HANDLE(radv_fence, fence, pFences[i]);
3076 fence->submitted = fence->signalled = false;
3077
3078 /* Per spec, we first restore the permanent payload, and then reset, so
3079 * having a temp syncobj should not skip resetting the permanent syncobj. */
3080 if (fence->temp_syncobj) {
3081 device->ws->destroy_syncobj(device->ws, fence->temp_syncobj);
3082 fence->temp_syncobj = 0;
3083 }
3084
3085 if (fence->syncobj) {
3086 device->ws->reset_syncobj(device->ws, fence->syncobj);
3087 }
3088 }
3089
3090 return VK_SUCCESS;
3091 }
3092
3093 VkResult radv_GetFenceStatus(VkDevice _device, VkFence _fence)
3094 {
3095 RADV_FROM_HANDLE(radv_device, device, _device);
3096 RADV_FROM_HANDLE(radv_fence, fence, _fence);
3097
3098 if (fence->temp_syncobj) {
3099 bool success = device->ws->wait_syncobj(device->ws, &fence->temp_syncobj, 1, true, 0);
3100 return success ? VK_SUCCESS : VK_NOT_READY;
3101 }
3102
3103 if (fence->syncobj) {
3104 bool success = device->ws->wait_syncobj(device->ws, &fence->syncobj, 1, true, 0);
3105 return success ? VK_SUCCESS : VK_NOT_READY;
3106 }
3107
3108 if (fence->signalled)
3109 return VK_SUCCESS;
3110 if (!fence->submitted)
3111 return VK_NOT_READY;
3112 if (!device->ws->fence_wait(device->ws, fence->fence, false, 0))
3113 return VK_NOT_READY;
3114
3115 return VK_SUCCESS;
3116 }
3117
3118
3119 // Queue semaphore functions
3120
3121 VkResult radv_CreateSemaphore(
3122 VkDevice _device,
3123 const VkSemaphoreCreateInfo* pCreateInfo,
3124 const VkAllocationCallbacks* pAllocator,
3125 VkSemaphore* pSemaphore)
3126 {
3127 RADV_FROM_HANDLE(radv_device, device, _device);
3128 const VkExportSemaphoreCreateInfoKHR *export =
3129 vk_find_struct_const(pCreateInfo->pNext, EXPORT_SEMAPHORE_CREATE_INFO_KHR);
3130 VkExternalSemaphoreHandleTypeFlagsKHR handleTypes =
3131 export ? export->handleTypes : 0;
3132
3133 struct radv_semaphore *sem = vk_alloc2(&device->alloc, pAllocator,
3134 sizeof(*sem), 8,
3135 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
3136 if (!sem)
3137 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
3138
3139 sem->temp_syncobj = 0;
3140 /* create a syncobject if we are going to export this semaphore */
3141 if (device->always_use_syncobj || handleTypes) {
3142 assert (device->physical_device->rad_info.has_syncobj);
3143 int ret = device->ws->create_syncobj(device->ws, &sem->syncobj);
3144 if (ret) {
3145 vk_free2(&device->alloc, pAllocator, sem);
3146 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
3147 }
3148 sem->sem = NULL;
3149 } else {
3150 sem->sem = device->ws->create_sem(device->ws);
3151 if (!sem->sem) {
3152 vk_free2(&device->alloc, pAllocator, sem);
3153 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
3154 }
3155 sem->syncobj = 0;
3156 }
3157
3158 *pSemaphore = radv_semaphore_to_handle(sem);
3159 return VK_SUCCESS;
3160 }
3161
3162 void radv_DestroySemaphore(
3163 VkDevice _device,
3164 VkSemaphore _semaphore,
3165 const VkAllocationCallbacks* pAllocator)
3166 {
3167 RADV_FROM_HANDLE(radv_device, device, _device);
3168 RADV_FROM_HANDLE(radv_semaphore, sem, _semaphore);
3169 if (!_semaphore)
3170 return;
3171
3172 if (sem->syncobj)
3173 device->ws->destroy_syncobj(device->ws, sem->syncobj);
3174 else
3175 device->ws->destroy_sem(sem->sem);
3176 vk_free2(&device->alloc, pAllocator, sem);
3177 }
3178
3179 VkResult radv_CreateEvent(
3180 VkDevice _device,
3181 const VkEventCreateInfo* pCreateInfo,
3182 const VkAllocationCallbacks* pAllocator,
3183 VkEvent* pEvent)
3184 {
3185 RADV_FROM_HANDLE(radv_device, device, _device);
3186 struct radv_event *event = vk_alloc2(&device->alloc, pAllocator,
3187 sizeof(*event), 8,
3188 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
3189
3190 if (!event)
3191 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
3192
3193 event->bo = device->ws->buffer_create(device->ws, 8, 8,
3194 RADEON_DOMAIN_GTT,
3195 RADEON_FLAG_VA_UNCACHED | RADEON_FLAG_CPU_ACCESS | RADEON_FLAG_NO_INTERPROCESS_SHARING);
3196 if (!event->bo) {
3197 vk_free2(&device->alloc, pAllocator, event);
3198 return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
3199 }
3200
3201 event->map = (uint64_t*)device->ws->buffer_map(event->bo);
3202
3203 *pEvent = radv_event_to_handle(event);
3204
3205 return VK_SUCCESS;
3206 }
3207
3208 void radv_DestroyEvent(
3209 VkDevice _device,
3210 VkEvent _event,
3211 const VkAllocationCallbacks* pAllocator)
3212 {
3213 RADV_FROM_HANDLE(radv_device, device, _device);
3214 RADV_FROM_HANDLE(radv_event, event, _event);
3215
3216 if (!event)
3217 return;
3218 device->ws->buffer_destroy(event->bo);
3219 vk_free2(&device->alloc, pAllocator, event);
3220 }
3221
3222 VkResult radv_GetEventStatus(
3223 VkDevice _device,
3224 VkEvent _event)
3225 {
3226 RADV_FROM_HANDLE(radv_event, event, _event);
3227
3228 if (*event->map == 1)
3229 return VK_EVENT_SET;
3230 return VK_EVENT_RESET;
3231 }
3232
3233 VkResult radv_SetEvent(
3234 VkDevice _device,
3235 VkEvent _event)
3236 {
3237 RADV_FROM_HANDLE(radv_event, event, _event);
3238 *event->map = 1;
3239
3240 return VK_SUCCESS;
3241 }
3242
3243 VkResult radv_ResetEvent(
3244 VkDevice _device,
3245 VkEvent _event)
3246 {
3247 RADV_FROM_HANDLE(radv_event, event, _event);
3248 *event->map = 0;
3249
3250 return VK_SUCCESS;
3251 }
3252
3253 VkResult radv_CreateBuffer(
3254 VkDevice _device,
3255 const VkBufferCreateInfo* pCreateInfo,
3256 const VkAllocationCallbacks* pAllocator,
3257 VkBuffer* pBuffer)
3258 {
3259 RADV_FROM_HANDLE(radv_device, device, _device);
3260 struct radv_buffer *buffer;
3261
3262 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO);
3263
3264 buffer = vk_alloc2(&device->alloc, pAllocator, sizeof(*buffer), 8,
3265 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
3266 if (buffer == NULL)
3267 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
3268
3269 buffer->size = pCreateInfo->size;
3270 buffer->usage = pCreateInfo->usage;
3271 buffer->bo = NULL;
3272 buffer->offset = 0;
3273 buffer->flags = pCreateInfo->flags;
3274
3275 buffer->shareable = vk_find_struct_const(pCreateInfo->pNext,
3276 EXTERNAL_MEMORY_BUFFER_CREATE_INFO_KHR) != NULL;
3277
3278 if (pCreateInfo->flags & VK_BUFFER_CREATE_SPARSE_BINDING_BIT) {
3279 buffer->bo = device->ws->buffer_create(device->ws,
3280 align64(buffer->size, 4096),
3281 4096, 0, RADEON_FLAG_VIRTUAL);
3282 if (!buffer->bo) {
3283 vk_free2(&device->alloc, pAllocator, buffer);
3284 return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
3285 }
3286 }
3287
3288 *pBuffer = radv_buffer_to_handle(buffer);
3289
3290 return VK_SUCCESS;
3291 }
3292
3293 void radv_DestroyBuffer(
3294 VkDevice _device,
3295 VkBuffer _buffer,
3296 const VkAllocationCallbacks* pAllocator)
3297 {
3298 RADV_FROM_HANDLE(radv_device, device, _device);
3299 RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
3300
3301 if (!buffer)
3302 return;
3303
3304 if (buffer->flags & VK_BUFFER_CREATE_SPARSE_BINDING_BIT)
3305 device->ws->buffer_destroy(buffer->bo);
3306
3307 vk_free2(&device->alloc, pAllocator, buffer);
3308 }
3309
3310 static inline unsigned
3311 si_tile_mode_index(const struct radv_image *image, unsigned level, bool stencil)
3312 {
3313 if (stencil)
3314 return image->surface.u.legacy.stencil_tiling_index[level];
3315 else
3316 return image->surface.u.legacy.tiling_index[level];
3317 }
3318
3319 static uint32_t radv_surface_max_layer_count(struct radv_image_view *iview)
3320 {
3321 return iview->type == VK_IMAGE_VIEW_TYPE_3D ? iview->extent.depth : (iview->base_layer + iview->layer_count);
3322 }
3323
3324 static void
3325 radv_initialise_color_surface(struct radv_device *device,
3326 struct radv_color_buffer_info *cb,
3327 struct radv_image_view *iview)
3328 {
3329 const struct vk_format_description *desc;
3330 unsigned ntype, format, swap, endian;
3331 unsigned blend_clamp = 0, blend_bypass = 0;
3332 uint64_t va;
3333 const struct radeon_surf *surf = &iview->image->surface;
3334
3335 desc = vk_format_description(iview->vk_format);
3336
3337 memset(cb, 0, sizeof(*cb));
3338
3339 /* Intensity is implemented as Red, so treat it that way. */
3340 cb->cb_color_attrib = S_028C74_FORCE_DST_ALPHA_1(desc->swizzle[3] == VK_SWIZZLE_1);
3341
3342 va = radv_buffer_get_va(iview->bo) + iview->image->offset;
3343
3344 cb->cb_color_base = va >> 8;
3345
3346 if (device->physical_device->rad_info.chip_class >= GFX9) {
3347 struct gfx9_surf_meta_flags meta;
3348 if (iview->image->dcc_offset)
3349 meta = iview->image->surface.u.gfx9.dcc;
3350 else
3351 meta = iview->image->surface.u.gfx9.cmask;
3352
3353 cb->cb_color_attrib |= S_028C74_COLOR_SW_MODE(iview->image->surface.u.gfx9.surf.swizzle_mode) |
3354 S_028C74_FMASK_SW_MODE(iview->image->surface.u.gfx9.fmask.swizzle_mode) |
3355 S_028C74_RB_ALIGNED(meta.rb_aligned) |
3356 S_028C74_PIPE_ALIGNED(meta.pipe_aligned);
3357
3358 cb->cb_color_base += iview->image->surface.u.gfx9.surf_offset >> 8;
3359 cb->cb_color_base |= iview->image->surface.tile_swizzle;
3360 } else {
3361 const struct legacy_surf_level *level_info = &surf->u.legacy.level[iview->base_mip];
3362 unsigned pitch_tile_max, slice_tile_max, tile_mode_index;
3363
3364 cb->cb_color_base += level_info->offset >> 8;
3365 if (level_info->mode == RADEON_SURF_MODE_2D)
3366 cb->cb_color_base |= iview->image->surface.tile_swizzle;
3367
3368 pitch_tile_max = level_info->nblk_x / 8 - 1;
3369 slice_tile_max = (level_info->nblk_x * level_info->nblk_y) / 64 - 1;
3370 tile_mode_index = si_tile_mode_index(iview->image, iview->base_mip, false);
3371
3372 cb->cb_color_pitch = S_028C64_TILE_MAX(pitch_tile_max);
3373 cb->cb_color_slice = S_028C68_TILE_MAX(slice_tile_max);
3374 cb->cb_color_cmask_slice = iview->image->cmask.slice_tile_max;
3375
3376 cb->cb_color_attrib |= S_028C74_TILE_MODE_INDEX(tile_mode_index);
3377
3378 if (iview->image->fmask.size) {
3379 if (device->physical_device->rad_info.chip_class >= CIK)
3380 cb->cb_color_pitch |= S_028C64_FMASK_TILE_MAX(iview->image->fmask.pitch_in_pixels / 8 - 1);
3381 cb->cb_color_attrib |= S_028C74_FMASK_TILE_MODE_INDEX(iview->image->fmask.tile_mode_index);
3382 cb->cb_color_fmask_slice = S_028C88_TILE_MAX(iview->image->fmask.slice_tile_max);
3383 } else {
3384 /* This must be set for fast clear to work without FMASK. */
3385 if (device->physical_device->rad_info.chip_class >= CIK)
3386 cb->cb_color_pitch |= S_028C64_FMASK_TILE_MAX(pitch_tile_max);
3387 cb->cb_color_attrib |= S_028C74_FMASK_TILE_MODE_INDEX(tile_mode_index);
3388 cb->cb_color_fmask_slice = S_028C88_TILE_MAX(slice_tile_max);
3389 }
3390 }
3391
3392 /* CMASK variables */
3393 va = radv_buffer_get_va(iview->bo) + iview->image->offset;
3394 va += iview->image->cmask.offset;
3395 cb->cb_color_cmask = va >> 8;
3396
3397 va = radv_buffer_get_va(iview->bo) + iview->image->offset;
3398 va += iview->image->dcc_offset;
3399 cb->cb_dcc_base = va >> 8;
3400 cb->cb_dcc_base |= iview->image->surface.tile_swizzle;
3401
3402 uint32_t max_slice = radv_surface_max_layer_count(iview) - 1;
3403 cb->cb_color_view = S_028C6C_SLICE_START(iview->base_layer) |
3404 S_028C6C_SLICE_MAX(max_slice);
3405
3406 if (iview->image->info.samples > 1) {
3407 unsigned log_samples = util_logbase2(iview->image->info.samples);
3408
3409 cb->cb_color_attrib |= S_028C74_NUM_SAMPLES(log_samples) |
3410 S_028C74_NUM_FRAGMENTS(log_samples);
3411 }
3412
3413 if (iview->image->fmask.size) {
3414 va = radv_buffer_get_va(iview->bo) + iview->image->offset + iview->image->fmask.offset;
3415 cb->cb_color_fmask = va >> 8;
3416 cb->cb_color_fmask |= iview->image->fmask.tile_swizzle;
3417 } else {
3418 cb->cb_color_fmask = cb->cb_color_base;
3419 }
3420
3421 ntype = radv_translate_color_numformat(iview->vk_format,
3422 desc,
3423 vk_format_get_first_non_void_channel(iview->vk_format));
3424 format = radv_translate_colorformat(iview->vk_format);
3425 if (format == V_028C70_COLOR_INVALID || ntype == ~0u)
3426 radv_finishme("Illegal color\n");
3427 swap = radv_translate_colorswap(iview->vk_format, FALSE);
3428 endian = radv_colorformat_endian_swap(format);
3429
3430 /* blend clamp should be set for all NORM/SRGB types */
3431 if (ntype == V_028C70_NUMBER_UNORM ||
3432 ntype == V_028C70_NUMBER_SNORM ||
3433 ntype == V_028C70_NUMBER_SRGB)
3434 blend_clamp = 1;
3435
3436 /* set blend bypass according to docs if SINT/UINT or
3437 8/24 COLOR variants */
3438 if (ntype == V_028C70_NUMBER_UINT || ntype == V_028C70_NUMBER_SINT ||
3439 format == V_028C70_COLOR_8_24 || format == V_028C70_COLOR_24_8 ||
3440 format == V_028C70_COLOR_X24_8_32_FLOAT) {
3441 blend_clamp = 0;
3442 blend_bypass = 1;
3443 }
3444 #if 0
3445 if ((ntype == V_028C70_NUMBER_UINT || ntype == V_028C70_NUMBER_SINT) &&
3446 (format == V_028C70_COLOR_8 ||
3447 format == V_028C70_COLOR_8_8 ||
3448 format == V_028C70_COLOR_8_8_8_8))
3449 ->color_is_int8 = true;
3450 #endif
3451 cb->cb_color_info = S_028C70_FORMAT(format) |
3452 S_028C70_COMP_SWAP(swap) |
3453 S_028C70_BLEND_CLAMP(blend_clamp) |
3454 S_028C70_BLEND_BYPASS(blend_bypass) |
3455 S_028C70_SIMPLE_FLOAT(1) |
3456 S_028C70_ROUND_MODE(ntype != V_028C70_NUMBER_UNORM &&
3457 ntype != V_028C70_NUMBER_SNORM &&
3458 ntype != V_028C70_NUMBER_SRGB &&
3459 format != V_028C70_COLOR_8_24 &&
3460 format != V_028C70_COLOR_24_8) |
3461 S_028C70_NUMBER_TYPE(ntype) |
3462 S_028C70_ENDIAN(endian);
3463 if ((iview->image->info.samples > 1) && iview->image->fmask.size) {
3464 cb->cb_color_info |= S_028C70_COMPRESSION(1);
3465 if (device->physical_device->rad_info.chip_class == SI) {
3466 unsigned fmask_bankh = util_logbase2(iview->image->fmask.bank_height);
3467 cb->cb_color_attrib |= S_028C74_FMASK_BANK_HEIGHT(fmask_bankh);
3468 }
3469 }
3470
3471 if (iview->image->cmask.size &&
3472 !(device->instance->debug_flags & RADV_DEBUG_NO_FAST_CLEARS))
3473 cb->cb_color_info |= S_028C70_FAST_CLEAR(1);
3474
3475 if (radv_vi_dcc_enabled(iview->image, iview->base_mip))
3476 cb->cb_color_info |= S_028C70_DCC_ENABLE(1);
3477
3478 if (device->physical_device->rad_info.chip_class >= VI) {
3479 unsigned max_uncompressed_block_size = V_028C78_MAX_BLOCK_SIZE_256B;
3480 unsigned min_compressed_block_size = V_028C78_MIN_BLOCK_SIZE_32B;
3481 unsigned independent_64b_blocks = 0;
3482 unsigned max_compressed_block_size;
3483
3484 /* amdvlk: [min-compressed-block-size] should be set to 32 for dGPU and
3485 64 for APU because all of our APUs to date use DIMMs which have
3486 a request granularity size of 64B while all other chips have a
3487 32B request size */
3488 if (!device->physical_device->rad_info.has_dedicated_vram)
3489 min_compressed_block_size = V_028C78_MIN_BLOCK_SIZE_64B;
3490
3491 if (iview->image->info.samples > 1) {
3492 if (iview->image->surface.bpe == 1)
3493 max_uncompressed_block_size = V_028C78_MAX_BLOCK_SIZE_64B;
3494 else if (iview->image->surface.bpe == 2)
3495 max_uncompressed_block_size = V_028C78_MAX_BLOCK_SIZE_128B;
3496 }
3497
3498 if (iview->image->usage & (VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
3499 VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT)) {
3500 independent_64b_blocks = 1;
3501 max_compressed_block_size = V_028C78_MAX_BLOCK_SIZE_64B;
3502 } else
3503 max_compressed_block_size = max_uncompressed_block_size;
3504
3505 cb->cb_dcc_control = S_028C78_MAX_UNCOMPRESSED_BLOCK_SIZE(max_uncompressed_block_size) |
3506 S_028C78_MAX_COMPRESSED_BLOCK_SIZE(max_compressed_block_size) |
3507 S_028C78_MIN_COMPRESSED_BLOCK_SIZE(min_compressed_block_size) |
3508 S_028C78_INDEPENDENT_64B_BLOCKS(independent_64b_blocks);
3509 }
3510
3511 /* This must be set for fast clear to work without FMASK. */
3512 if (!iview->image->fmask.size &&
3513 device->physical_device->rad_info.chip_class == SI) {
3514 unsigned bankh = util_logbase2(iview->image->surface.u.legacy.bankh);
3515 cb->cb_color_attrib |= S_028C74_FMASK_BANK_HEIGHT(bankh);
3516 }
3517
3518 if (device->physical_device->rad_info.chip_class >= GFX9) {
3519 unsigned mip0_depth = iview->image->type == VK_IMAGE_TYPE_3D ?
3520 (iview->extent.depth - 1) : (iview->image->info.array_size - 1);
3521
3522 cb->cb_color_view |= S_028C6C_MIP_LEVEL(iview->base_mip);
3523 cb->cb_color_attrib |= S_028C74_MIP0_DEPTH(mip0_depth) |
3524 S_028C74_RESOURCE_TYPE(iview->image->surface.u.gfx9.resource_type);
3525 cb->cb_color_attrib2 = S_028C68_MIP0_WIDTH(iview->extent.width - 1) |
3526 S_028C68_MIP0_HEIGHT(iview->extent.height - 1) |
3527 S_028C68_MAX_MIP(iview->image->info.levels - 1);
3528 }
3529 }
3530
3531 static void
3532 radv_initialise_ds_surface(struct radv_device *device,
3533 struct radv_ds_buffer_info *ds,
3534 struct radv_image_view *iview)
3535 {
3536 unsigned level = iview->base_mip;
3537 unsigned format, stencil_format;
3538 uint64_t va, s_offs, z_offs;
3539 bool stencil_only = false;
3540 memset(ds, 0, sizeof(*ds));
3541 switch (iview->image->vk_format) {
3542 case VK_FORMAT_D24_UNORM_S8_UINT:
3543 case VK_FORMAT_X8_D24_UNORM_PACK32:
3544 ds->pa_su_poly_offset_db_fmt_cntl = S_028B78_POLY_OFFSET_NEG_NUM_DB_BITS(-24);
3545 ds->offset_scale = 2.0f;
3546 break;
3547 case VK_FORMAT_D16_UNORM:
3548 case VK_FORMAT_D16_UNORM_S8_UINT:
3549 ds->pa_su_poly_offset_db_fmt_cntl = S_028B78_POLY_OFFSET_NEG_NUM_DB_BITS(-16);
3550 ds->offset_scale = 4.0f;
3551 break;
3552 case VK_FORMAT_D32_SFLOAT:
3553 case VK_FORMAT_D32_SFLOAT_S8_UINT:
3554 ds->pa_su_poly_offset_db_fmt_cntl = S_028B78_POLY_OFFSET_NEG_NUM_DB_BITS(-23) |
3555 S_028B78_POLY_OFFSET_DB_IS_FLOAT_FMT(1);
3556 ds->offset_scale = 1.0f;
3557 break;
3558 case VK_FORMAT_S8_UINT:
3559 stencil_only = true;
3560 break;
3561 default:
3562 break;
3563 }
3564
3565 format = radv_translate_dbformat(iview->image->vk_format);
3566 stencil_format = iview->image->surface.has_stencil ?
3567 V_028044_STENCIL_8 : V_028044_STENCIL_INVALID;
3568
3569 uint32_t max_slice = radv_surface_max_layer_count(iview) - 1;
3570 ds->db_depth_view = S_028008_SLICE_START(iview->base_layer) |
3571 S_028008_SLICE_MAX(max_slice);
3572
3573 ds->db_htile_data_base = 0;
3574 ds->db_htile_surface = 0;
3575
3576 va = radv_buffer_get_va(iview->bo) + iview->image->offset;
3577 s_offs = z_offs = va;
3578
3579 if (device->physical_device->rad_info.chip_class >= GFX9) {
3580 assert(iview->image->surface.u.gfx9.surf_offset == 0);
3581 s_offs += iview->image->surface.u.gfx9.stencil_offset;
3582
3583 ds->db_z_info = S_028038_FORMAT(format) |
3584 S_028038_NUM_SAMPLES(util_logbase2(iview->image->info.samples)) |
3585 S_028038_SW_MODE(iview->image->surface.u.gfx9.surf.swizzle_mode) |
3586 S_028038_MAXMIP(iview->image->info.levels - 1);
3587 ds->db_stencil_info = S_02803C_FORMAT(stencil_format) |
3588 S_02803C_SW_MODE(iview->image->surface.u.gfx9.stencil.swizzle_mode);
3589
3590 ds->db_z_info2 = S_028068_EPITCH(iview->image->surface.u.gfx9.surf.epitch);
3591 ds->db_stencil_info2 = S_02806C_EPITCH(iview->image->surface.u.gfx9.stencil.epitch);
3592 ds->db_depth_view |= S_028008_MIPID(level);
3593
3594 ds->db_depth_size = S_02801C_X_MAX(iview->image->info.width - 1) |
3595 S_02801C_Y_MAX(iview->image->info.height - 1);
3596
3597 if (radv_htile_enabled(iview->image, level)) {
3598 ds->db_z_info |= S_028038_TILE_SURFACE_ENABLE(1);
3599
3600 if (iview->image->tc_compatible_htile) {
3601 unsigned max_zplanes = 4;
3602
3603 if (iview->vk_format == VK_FORMAT_D16_UNORM &&
3604 iview->image->info.samples > 1)
3605 max_zplanes = 2;
3606
3607 ds->db_z_info |= S_028038_DECOMPRESS_ON_N_ZPLANES(max_zplanes + 1) |
3608 S_028038_ITERATE_FLUSH(1);
3609 ds->db_stencil_info |= S_02803C_ITERATE_FLUSH(1);
3610 }
3611
3612 if (!iview->image->surface.has_stencil)
3613 /* Use all of the htile_buffer for depth if there's no stencil. */
3614 ds->db_stencil_info |= S_02803C_TILE_STENCIL_DISABLE(1);
3615 va = radv_buffer_get_va(iview->bo) + iview->image->offset +
3616 iview->image->htile_offset;
3617 ds->db_htile_data_base = va >> 8;
3618 ds->db_htile_surface = S_028ABC_FULL_CACHE(1) |
3619 S_028ABC_PIPE_ALIGNED(iview->image->surface.u.gfx9.htile.pipe_aligned) |
3620 S_028ABC_RB_ALIGNED(iview->image->surface.u.gfx9.htile.rb_aligned);
3621 }
3622 } else {
3623 const struct legacy_surf_level *level_info = &iview->image->surface.u.legacy.level[level];
3624
3625 if (stencil_only)
3626 level_info = &iview->image->surface.u.legacy.stencil_level[level];
3627
3628 z_offs += iview->image->surface.u.legacy.level[level].offset;
3629 s_offs += iview->image->surface.u.legacy.stencil_level[level].offset;
3630
3631 ds->db_depth_info = S_02803C_ADDR5_SWIZZLE_MASK(!iview->image->tc_compatible_htile);
3632 ds->db_z_info = S_028040_FORMAT(format) | S_028040_ZRANGE_PRECISION(1);
3633 ds->db_stencil_info = S_028044_FORMAT(stencil_format);
3634
3635 if (iview->image->info.samples > 1)
3636 ds->db_z_info |= S_028040_NUM_SAMPLES(util_logbase2(iview->image->info.samples));
3637
3638 if (device->physical_device->rad_info.chip_class >= CIK) {
3639 struct radeon_info *info = &device->physical_device->rad_info;
3640 unsigned tiling_index = iview->image->surface.u.legacy.tiling_index[level];
3641 unsigned stencil_index = iview->image->surface.u.legacy.stencil_tiling_index[level];
3642 unsigned macro_index = iview->image->surface.u.legacy.macro_tile_index;
3643 unsigned tile_mode = info->si_tile_mode_array[tiling_index];
3644 unsigned stencil_tile_mode = info->si_tile_mode_array[stencil_index];
3645 unsigned macro_mode = info->cik_macrotile_mode_array[macro_index];
3646
3647 if (stencil_only)
3648 tile_mode = stencil_tile_mode;
3649
3650 ds->db_depth_info |=
3651 S_02803C_ARRAY_MODE(G_009910_ARRAY_MODE(tile_mode)) |
3652 S_02803C_PIPE_CONFIG(G_009910_PIPE_CONFIG(tile_mode)) |
3653 S_02803C_BANK_WIDTH(G_009990_BANK_WIDTH(macro_mode)) |
3654 S_02803C_BANK_HEIGHT(G_009990_BANK_HEIGHT(macro_mode)) |
3655 S_02803C_MACRO_TILE_ASPECT(G_009990_MACRO_TILE_ASPECT(macro_mode)) |
3656 S_02803C_NUM_BANKS(G_009990_NUM_BANKS(macro_mode));
3657 ds->db_z_info |= S_028040_TILE_SPLIT(G_009910_TILE_SPLIT(tile_mode));
3658 ds->db_stencil_info |= S_028044_TILE_SPLIT(G_009910_TILE_SPLIT(stencil_tile_mode));
3659 } else {
3660 unsigned tile_mode_index = si_tile_mode_index(iview->image, level, false);
3661 ds->db_z_info |= S_028040_TILE_MODE_INDEX(tile_mode_index);
3662 tile_mode_index = si_tile_mode_index(iview->image, level, true);
3663 ds->db_stencil_info |= S_028044_TILE_MODE_INDEX(tile_mode_index);
3664 if (stencil_only)
3665 ds->db_z_info |= S_028040_TILE_MODE_INDEX(tile_mode_index);
3666 }
3667
3668 ds->db_depth_size = S_028058_PITCH_TILE_MAX((level_info->nblk_x / 8) - 1) |
3669 S_028058_HEIGHT_TILE_MAX((level_info->nblk_y / 8) - 1);
3670 ds->db_depth_slice = S_02805C_SLICE_TILE_MAX((level_info->nblk_x * level_info->nblk_y) / 64 - 1);
3671
3672 if (radv_htile_enabled(iview->image, level)) {
3673 ds->db_z_info |= S_028040_TILE_SURFACE_ENABLE(1);
3674
3675 if (!iview->image->surface.has_stencil &&
3676 !iview->image->tc_compatible_htile)
3677 /* Use all of the htile_buffer for depth if there's no stencil. */
3678 ds->db_stencil_info |= S_028044_TILE_STENCIL_DISABLE(1);
3679
3680 va = radv_buffer_get_va(iview->bo) + iview->image->offset +
3681 iview->image->htile_offset;
3682 ds->db_htile_data_base = va >> 8;
3683 ds->db_htile_surface = S_028ABC_FULL_CACHE(1);
3684
3685 if (iview->image->tc_compatible_htile) {
3686 ds->db_htile_surface |= S_028ABC_TC_COMPATIBLE(1);
3687
3688 if (iview->image->info.samples <= 1)
3689 ds->db_z_info |= S_028040_DECOMPRESS_ON_N_ZPLANES(5);
3690 else if (iview->image->info.samples <= 4)
3691 ds->db_z_info |= S_028040_DECOMPRESS_ON_N_ZPLANES(3);
3692 else
3693 ds->db_z_info|= S_028040_DECOMPRESS_ON_N_ZPLANES(2);
3694 }
3695 }
3696 }
3697
3698 ds->db_z_read_base = ds->db_z_write_base = z_offs >> 8;
3699 ds->db_stencil_read_base = ds->db_stencil_write_base = s_offs >> 8;
3700 }
3701
3702 VkResult radv_CreateFramebuffer(
3703 VkDevice _device,
3704 const VkFramebufferCreateInfo* pCreateInfo,
3705 const VkAllocationCallbacks* pAllocator,
3706 VkFramebuffer* pFramebuffer)
3707 {
3708 RADV_FROM_HANDLE(radv_device, device, _device);
3709 struct radv_framebuffer *framebuffer;
3710
3711 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO);
3712
3713 size_t size = sizeof(*framebuffer) +
3714 sizeof(struct radv_attachment_info) * pCreateInfo->attachmentCount;
3715 framebuffer = vk_alloc2(&device->alloc, pAllocator, size, 8,
3716 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
3717 if (framebuffer == NULL)
3718 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
3719
3720 framebuffer->attachment_count = pCreateInfo->attachmentCount;
3721 framebuffer->width = pCreateInfo->width;
3722 framebuffer->height = pCreateInfo->height;
3723 framebuffer->layers = pCreateInfo->layers;
3724 for (uint32_t i = 0; i < pCreateInfo->attachmentCount; i++) {
3725 VkImageView _iview = pCreateInfo->pAttachments[i];
3726 struct radv_image_view *iview = radv_image_view_from_handle(_iview);
3727 framebuffer->attachments[i].attachment = iview;
3728 if (iview->aspect_mask & VK_IMAGE_ASPECT_COLOR_BIT) {
3729 radv_initialise_color_surface(device, &framebuffer->attachments[i].cb, iview);
3730 } else if (iview->aspect_mask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) {
3731 radv_initialise_ds_surface(device, &framebuffer->attachments[i].ds, iview);
3732 }
3733 framebuffer->width = MIN2(framebuffer->width, iview->extent.width);
3734 framebuffer->height = MIN2(framebuffer->height, iview->extent.height);
3735 framebuffer->layers = MIN2(framebuffer->layers, radv_surface_max_layer_count(iview));
3736 }
3737
3738 *pFramebuffer = radv_framebuffer_to_handle(framebuffer);
3739 return VK_SUCCESS;
3740 }
3741
3742 void radv_DestroyFramebuffer(
3743 VkDevice _device,
3744 VkFramebuffer _fb,
3745 const VkAllocationCallbacks* pAllocator)
3746 {
3747 RADV_FROM_HANDLE(radv_device, device, _device);
3748 RADV_FROM_HANDLE(radv_framebuffer, fb, _fb);
3749
3750 if (!fb)
3751 return;
3752 vk_free2(&device->alloc, pAllocator, fb);
3753 }
3754
3755 static unsigned radv_tex_wrap(VkSamplerAddressMode address_mode)
3756 {
3757 switch (address_mode) {
3758 case VK_SAMPLER_ADDRESS_MODE_REPEAT:
3759 return V_008F30_SQ_TEX_WRAP;
3760 case VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT:
3761 return V_008F30_SQ_TEX_MIRROR;
3762 case VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE:
3763 return V_008F30_SQ_TEX_CLAMP_LAST_TEXEL;
3764 case VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER:
3765 return V_008F30_SQ_TEX_CLAMP_BORDER;
3766 case VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE:
3767 return V_008F30_SQ_TEX_MIRROR_ONCE_LAST_TEXEL;
3768 default:
3769 unreachable("illegal tex wrap mode");
3770 break;
3771 }
3772 }
3773
3774 static unsigned
3775 radv_tex_compare(VkCompareOp op)
3776 {
3777 switch (op) {
3778 case VK_COMPARE_OP_NEVER:
3779 return V_008F30_SQ_TEX_DEPTH_COMPARE_NEVER;
3780 case VK_COMPARE_OP_LESS:
3781 return V_008F30_SQ_TEX_DEPTH_COMPARE_LESS;
3782 case VK_COMPARE_OP_EQUAL:
3783 return V_008F30_SQ_TEX_DEPTH_COMPARE_EQUAL;
3784 case VK_COMPARE_OP_LESS_OR_EQUAL:
3785 return V_008F30_SQ_TEX_DEPTH_COMPARE_LESSEQUAL;
3786 case VK_COMPARE_OP_GREATER:
3787 return V_008F30_SQ_TEX_DEPTH_COMPARE_GREATER;
3788 case VK_COMPARE_OP_NOT_EQUAL:
3789 return V_008F30_SQ_TEX_DEPTH_COMPARE_NOTEQUAL;
3790 case VK_COMPARE_OP_GREATER_OR_EQUAL:
3791 return V_008F30_SQ_TEX_DEPTH_COMPARE_GREATEREQUAL;
3792 case VK_COMPARE_OP_ALWAYS:
3793 return V_008F30_SQ_TEX_DEPTH_COMPARE_ALWAYS;
3794 default:
3795 unreachable("illegal compare mode");
3796 break;
3797 }
3798 }
3799
3800 static unsigned
3801 radv_tex_filter(VkFilter filter, unsigned max_ansio)
3802 {
3803 switch (filter) {
3804 case VK_FILTER_NEAREST:
3805 return (max_ansio > 1 ? V_008F38_SQ_TEX_XY_FILTER_ANISO_POINT :
3806 V_008F38_SQ_TEX_XY_FILTER_POINT);
3807 case VK_FILTER_LINEAR:
3808 return (max_ansio > 1 ? V_008F38_SQ_TEX_XY_FILTER_ANISO_BILINEAR :
3809 V_008F38_SQ_TEX_XY_FILTER_BILINEAR);
3810 case VK_FILTER_CUBIC_IMG:
3811 default:
3812 fprintf(stderr, "illegal texture filter");
3813 return 0;
3814 }
3815 }
3816
3817 static unsigned
3818 radv_tex_mipfilter(VkSamplerMipmapMode mode)
3819 {
3820 switch (mode) {
3821 case VK_SAMPLER_MIPMAP_MODE_NEAREST:
3822 return V_008F38_SQ_TEX_Z_FILTER_POINT;
3823 case VK_SAMPLER_MIPMAP_MODE_LINEAR:
3824 return V_008F38_SQ_TEX_Z_FILTER_LINEAR;
3825 default:
3826 return V_008F38_SQ_TEX_Z_FILTER_NONE;
3827 }
3828 }
3829
3830 static unsigned
3831 radv_tex_bordercolor(VkBorderColor bcolor)
3832 {
3833 switch (bcolor) {
3834 case VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK:
3835 case VK_BORDER_COLOR_INT_TRANSPARENT_BLACK:
3836 return V_008F3C_SQ_TEX_BORDER_COLOR_TRANS_BLACK;
3837 case VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK:
3838 case VK_BORDER_COLOR_INT_OPAQUE_BLACK:
3839 return V_008F3C_SQ_TEX_BORDER_COLOR_OPAQUE_BLACK;
3840 case VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE:
3841 case VK_BORDER_COLOR_INT_OPAQUE_WHITE:
3842 return V_008F3C_SQ_TEX_BORDER_COLOR_OPAQUE_WHITE;
3843 default:
3844 break;
3845 }
3846 return 0;
3847 }
3848
3849 static unsigned
3850 radv_tex_aniso_filter(unsigned filter)
3851 {
3852 if (filter < 2)
3853 return 0;
3854 if (filter < 4)
3855 return 1;
3856 if (filter < 8)
3857 return 2;
3858 if (filter < 16)
3859 return 3;
3860 return 4;
3861 }
3862
3863 static void
3864 radv_init_sampler(struct radv_device *device,
3865 struct radv_sampler *sampler,
3866 const VkSamplerCreateInfo *pCreateInfo)
3867 {
3868 uint32_t max_aniso = pCreateInfo->anisotropyEnable && pCreateInfo->maxAnisotropy > 1.0 ?
3869 (uint32_t) pCreateInfo->maxAnisotropy : 0;
3870 uint32_t max_aniso_ratio = radv_tex_aniso_filter(max_aniso);
3871 bool is_vi = (device->physical_device->rad_info.chip_class >= VI);
3872
3873 sampler->state[0] = (S_008F30_CLAMP_X(radv_tex_wrap(pCreateInfo->addressModeU)) |
3874 S_008F30_CLAMP_Y(radv_tex_wrap(pCreateInfo->addressModeV)) |
3875 S_008F30_CLAMP_Z(radv_tex_wrap(pCreateInfo->addressModeW)) |
3876 S_008F30_MAX_ANISO_RATIO(max_aniso_ratio) |
3877 S_008F30_DEPTH_COMPARE_FUNC(radv_tex_compare(pCreateInfo->compareOp)) |
3878 S_008F30_FORCE_UNNORMALIZED(pCreateInfo->unnormalizedCoordinates ? 1 : 0) |
3879 S_008F30_ANISO_THRESHOLD(max_aniso_ratio >> 1) |
3880 S_008F30_ANISO_BIAS(max_aniso_ratio) |
3881 S_008F30_DISABLE_CUBE_WRAP(0) |
3882 S_008F30_COMPAT_MODE(is_vi));
3883 sampler->state[1] = (S_008F34_MIN_LOD(S_FIXED(CLAMP(pCreateInfo->minLod, 0, 15), 8)) |
3884 S_008F34_MAX_LOD(S_FIXED(CLAMP(pCreateInfo->maxLod, 0, 15), 8)) |
3885 S_008F34_PERF_MIP(max_aniso_ratio ? max_aniso_ratio + 6 : 0));
3886 sampler->state[2] = (S_008F38_LOD_BIAS(S_FIXED(CLAMP(pCreateInfo->mipLodBias, -16, 16), 8)) |
3887 S_008F38_XY_MAG_FILTER(radv_tex_filter(pCreateInfo->magFilter, max_aniso)) |
3888 S_008F38_XY_MIN_FILTER(radv_tex_filter(pCreateInfo->minFilter, max_aniso)) |
3889 S_008F38_MIP_FILTER(radv_tex_mipfilter(pCreateInfo->mipmapMode)) |
3890 S_008F38_MIP_POINT_PRECLAMP(0) |
3891 S_008F38_DISABLE_LSB_CEIL(device->physical_device->rad_info.chip_class <= VI) |
3892 S_008F38_FILTER_PREC_FIX(1) |
3893 S_008F38_ANISO_OVERRIDE(is_vi));
3894 sampler->state[3] = (S_008F3C_BORDER_COLOR_PTR(0) |
3895 S_008F3C_BORDER_COLOR_TYPE(radv_tex_bordercolor(pCreateInfo->borderColor)));
3896 }
3897
3898 VkResult radv_CreateSampler(
3899 VkDevice _device,
3900 const VkSamplerCreateInfo* pCreateInfo,
3901 const VkAllocationCallbacks* pAllocator,
3902 VkSampler* pSampler)
3903 {
3904 RADV_FROM_HANDLE(radv_device, device, _device);
3905 struct radv_sampler *sampler;
3906
3907 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO);
3908
3909 sampler = vk_alloc2(&device->alloc, pAllocator, sizeof(*sampler), 8,
3910 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
3911 if (!sampler)
3912 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
3913
3914 radv_init_sampler(device, sampler, pCreateInfo);
3915 *pSampler = radv_sampler_to_handle(sampler);
3916
3917 return VK_SUCCESS;
3918 }
3919
3920 void radv_DestroySampler(
3921 VkDevice _device,
3922 VkSampler _sampler,
3923 const VkAllocationCallbacks* pAllocator)
3924 {
3925 RADV_FROM_HANDLE(radv_device, device, _device);
3926 RADV_FROM_HANDLE(radv_sampler, sampler, _sampler);
3927
3928 if (!sampler)
3929 return;
3930 vk_free2(&device->alloc, pAllocator, sampler);
3931 }
3932
3933 /* vk_icd.h does not declare this function, so we declare it here to
3934 * suppress Wmissing-prototypes.
3935 */
3936 PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
3937 vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion);
3938
3939 PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
3940 vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion)
3941 {
3942 /* For the full details on loader interface versioning, see
3943 * <https://github.com/KhronosGroup/Vulkan-LoaderAndValidationLayers/blob/master/loader/LoaderAndLayerInterface.md>.
3944 * What follows is a condensed summary, to help you navigate the large and
3945 * confusing official doc.
3946 *
3947 * - Loader interface v0 is incompatible with later versions. We don't
3948 * support it.
3949 *
3950 * - In loader interface v1:
3951 * - The first ICD entrypoint called by the loader is
3952 * vk_icdGetInstanceProcAddr(). The ICD must statically expose this
3953 * entrypoint.
3954 * - The ICD must statically expose no other Vulkan symbol unless it is
3955 * linked with -Bsymbolic.
3956 * - Each dispatchable Vulkan handle created by the ICD must be
3957 * a pointer to a struct whose first member is VK_LOADER_DATA. The
3958 * ICD must initialize VK_LOADER_DATA.loadMagic to ICD_LOADER_MAGIC.
3959 * - The loader implements vkCreate{PLATFORM}SurfaceKHR() and
3960 * vkDestroySurfaceKHR(). The ICD must be capable of working with
3961 * such loader-managed surfaces.
3962 *
3963 * - Loader interface v2 differs from v1 in:
3964 * - The first ICD entrypoint called by the loader is
3965 * vk_icdNegotiateLoaderICDInterfaceVersion(). The ICD must
3966 * statically expose this entrypoint.
3967 *
3968 * - Loader interface v3 differs from v2 in:
3969 * - The ICD must implement vkCreate{PLATFORM}SurfaceKHR(),
3970 * vkDestroySurfaceKHR(), and other API which uses VKSurfaceKHR,
3971 * because the loader no longer does so.
3972 */
3973 *pSupportedVersion = MIN2(*pSupportedVersion, 3u);
3974 return VK_SUCCESS;
3975 }
3976
3977 VkResult radv_GetMemoryFdKHR(VkDevice _device,
3978 const VkMemoryGetFdInfoKHR *pGetFdInfo,
3979 int *pFD)
3980 {
3981 RADV_FROM_HANDLE(radv_device, device, _device);
3982 RADV_FROM_HANDLE(radv_device_memory, memory, pGetFdInfo->memory);
3983
3984 assert(pGetFdInfo->sType == VK_STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR);
3985
3986 /* At the moment, we support only the below handle types. */
3987 assert(pGetFdInfo->handleType ==
3988 VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR ||
3989 pGetFdInfo->handleType ==
3990 VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
3991
3992 bool ret = radv_get_memory_fd(device, memory, pFD);
3993 if (ret == false)
3994 return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
3995 return VK_SUCCESS;
3996 }
3997
3998 VkResult radv_GetMemoryFdPropertiesKHR(VkDevice _device,
3999 VkExternalMemoryHandleTypeFlagBitsKHR handleType,
4000 int fd,
4001 VkMemoryFdPropertiesKHR *pMemoryFdProperties)
4002 {
4003 switch (handleType) {
4004 case VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT:
4005 pMemoryFdProperties->memoryTypeBits = (1 << RADV_MEM_TYPE_COUNT) - 1;
4006 return VK_SUCCESS;
4007
4008 default:
4009 /* The valid usage section for this function says:
4010 *
4011 * "handleType must not be one of the handle types defined as
4012 * opaque."
4013 *
4014 * So opaque handle types fall into the default "unsupported" case.
4015 */
4016 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR);
4017 }
4018 }
4019
4020 static VkResult radv_import_opaque_fd(struct radv_device *device,
4021 int fd,
4022 uint32_t *syncobj)
4023 {
4024 uint32_t syncobj_handle = 0;
4025 int ret = device->ws->import_syncobj(device->ws, fd, &syncobj_handle);
4026 if (ret != 0)
4027 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR);
4028
4029 if (*syncobj)
4030 device->ws->destroy_syncobj(device->ws, *syncobj);
4031
4032 *syncobj = syncobj_handle;
4033 close(fd);
4034
4035 return VK_SUCCESS;
4036 }
4037
4038 static VkResult radv_import_sync_fd(struct radv_device *device,
4039 int fd,
4040 uint32_t *syncobj)
4041 {
4042 /* If we create a syncobj we do it locally so that if we have an error, we don't
4043 * leave a syncobj in an undetermined state in the fence. */
4044 uint32_t syncobj_handle = *syncobj;
4045 if (!syncobj_handle) {
4046 int ret = device->ws->create_syncobj(device->ws, &syncobj_handle);
4047 if (ret) {
4048 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR);
4049 }
4050 }
4051
4052 if (fd == -1) {
4053 device->ws->signal_syncobj(device->ws, syncobj_handle);
4054 } else {
4055 int ret = device->ws->import_syncobj_from_sync_file(device->ws, syncobj_handle, fd);
4056 if (ret != 0)
4057 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR);
4058 }
4059
4060 *syncobj = syncobj_handle;
4061 if (fd != -1)
4062 close(fd);
4063
4064 return VK_SUCCESS;
4065 }
4066
4067 VkResult radv_ImportSemaphoreFdKHR(VkDevice _device,
4068 const VkImportSemaphoreFdInfoKHR *pImportSemaphoreFdInfo)
4069 {
4070 RADV_FROM_HANDLE(radv_device, device, _device);
4071 RADV_FROM_HANDLE(radv_semaphore, sem, pImportSemaphoreFdInfo->semaphore);
4072 uint32_t *syncobj_dst = NULL;
4073
4074 if (pImportSemaphoreFdInfo->flags & VK_SEMAPHORE_IMPORT_TEMPORARY_BIT_KHR) {
4075 syncobj_dst = &sem->temp_syncobj;
4076 } else {
4077 syncobj_dst = &sem->syncobj;
4078 }
4079
4080 switch(pImportSemaphoreFdInfo->handleType) {
4081 case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR:
4082 return radv_import_opaque_fd(device, pImportSemaphoreFdInfo->fd, syncobj_dst);
4083 case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT_KHR:
4084 return radv_import_sync_fd(device, pImportSemaphoreFdInfo->fd, syncobj_dst);
4085 default:
4086 unreachable("Unhandled semaphore handle type");
4087 }
4088 }
4089
4090 VkResult radv_GetSemaphoreFdKHR(VkDevice _device,
4091 const VkSemaphoreGetFdInfoKHR *pGetFdInfo,
4092 int *pFd)
4093 {
4094 RADV_FROM_HANDLE(radv_device, device, _device);
4095 RADV_FROM_HANDLE(radv_semaphore, sem, pGetFdInfo->semaphore);
4096 int ret;
4097 uint32_t syncobj_handle;
4098
4099 if (sem->temp_syncobj)
4100 syncobj_handle = sem->temp_syncobj;
4101 else
4102 syncobj_handle = sem->syncobj;
4103
4104 switch(pGetFdInfo->handleType) {
4105 case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR:
4106 ret = device->ws->export_syncobj(device->ws, syncobj_handle, pFd);
4107 break;
4108 case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT_KHR:
4109 ret = device->ws->export_syncobj_to_sync_file(device->ws, syncobj_handle, pFd);
4110 if (!ret) {
4111 if (sem->temp_syncobj) {
4112 close (sem->temp_syncobj);
4113 sem->temp_syncobj = 0;
4114 } else {
4115 device->ws->reset_syncobj(device->ws, syncobj_handle);
4116 }
4117 }
4118 break;
4119 default:
4120 unreachable("Unhandled semaphore handle type");
4121 }
4122
4123 if (ret)
4124 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR);
4125 return VK_SUCCESS;
4126 }
4127
4128 void radv_GetPhysicalDeviceExternalSemaphoreProperties(
4129 VkPhysicalDevice physicalDevice,
4130 const VkPhysicalDeviceExternalSemaphoreInfoKHR* pExternalSemaphoreInfo,
4131 VkExternalSemaphorePropertiesKHR* pExternalSemaphoreProperties)
4132 {
4133 RADV_FROM_HANDLE(radv_physical_device, pdevice, physicalDevice);
4134
4135 /* Require has_syncobj_wait_for_submit for the syncobj signal ioctl introduced at virtually the same time */
4136 if (pdevice->rad_info.has_syncobj_wait_for_submit &&
4137 (pExternalSemaphoreInfo->handleType == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR ||
4138 pExternalSemaphoreInfo->handleType == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT_KHR)) {
4139 pExternalSemaphoreProperties->exportFromImportedHandleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR | VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT_KHR;
4140 pExternalSemaphoreProperties->compatibleHandleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR | VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT_KHR;
4141 pExternalSemaphoreProperties->externalSemaphoreFeatures = VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT_KHR |
4142 VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT_KHR;
4143 } else if (pExternalSemaphoreInfo->handleType == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR) {
4144 pExternalSemaphoreProperties->exportFromImportedHandleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR;
4145 pExternalSemaphoreProperties->compatibleHandleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR;
4146 pExternalSemaphoreProperties->externalSemaphoreFeatures = VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT_KHR |
4147 VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT_KHR;
4148 } else {
4149 pExternalSemaphoreProperties->exportFromImportedHandleTypes = 0;
4150 pExternalSemaphoreProperties->compatibleHandleTypes = 0;
4151 pExternalSemaphoreProperties->externalSemaphoreFeatures = 0;
4152 }
4153 }
4154
4155 VkResult radv_ImportFenceFdKHR(VkDevice _device,
4156 const VkImportFenceFdInfoKHR *pImportFenceFdInfo)
4157 {
4158 RADV_FROM_HANDLE(radv_device, device, _device);
4159 RADV_FROM_HANDLE(radv_fence, fence, pImportFenceFdInfo->fence);
4160 uint32_t *syncobj_dst = NULL;
4161
4162
4163 if (pImportFenceFdInfo->flags & VK_FENCE_IMPORT_TEMPORARY_BIT_KHR) {
4164 syncobj_dst = &fence->temp_syncobj;
4165 } else {
4166 syncobj_dst = &fence->syncobj;
4167 }
4168
4169 switch(pImportFenceFdInfo->handleType) {
4170 case VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR:
4171 return radv_import_opaque_fd(device, pImportFenceFdInfo->fd, syncobj_dst);
4172 case VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT_KHR:
4173 return radv_import_sync_fd(device, pImportFenceFdInfo->fd, syncobj_dst);
4174 default:
4175 unreachable("Unhandled fence handle type");
4176 }
4177 }
4178
4179 VkResult radv_GetFenceFdKHR(VkDevice _device,
4180 const VkFenceGetFdInfoKHR *pGetFdInfo,
4181 int *pFd)
4182 {
4183 RADV_FROM_HANDLE(radv_device, device, _device);
4184 RADV_FROM_HANDLE(radv_fence, fence, pGetFdInfo->fence);
4185 int ret;
4186 uint32_t syncobj_handle;
4187
4188 if (fence->temp_syncobj)
4189 syncobj_handle = fence->temp_syncobj;
4190 else
4191 syncobj_handle = fence->syncobj;
4192
4193 switch(pGetFdInfo->handleType) {
4194 case VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR:
4195 ret = device->ws->export_syncobj(device->ws, syncobj_handle, pFd);
4196 break;
4197 case VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT_KHR:
4198 ret = device->ws->export_syncobj_to_sync_file(device->ws, syncobj_handle, pFd);
4199 if (!ret) {
4200 if (fence->temp_syncobj) {
4201 close (fence->temp_syncobj);
4202 fence->temp_syncobj = 0;
4203 } else {
4204 device->ws->reset_syncobj(device->ws, syncobj_handle);
4205 }
4206 }
4207 break;
4208 default:
4209 unreachable("Unhandled fence handle type");
4210 }
4211
4212 if (ret)
4213 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR);
4214 return VK_SUCCESS;
4215 }
4216
4217 void radv_GetPhysicalDeviceExternalFenceProperties(
4218 VkPhysicalDevice physicalDevice,
4219 const VkPhysicalDeviceExternalFenceInfoKHR* pExternalFenceInfo,
4220 VkExternalFencePropertiesKHR* pExternalFenceProperties)
4221 {
4222 RADV_FROM_HANDLE(radv_physical_device, pdevice, physicalDevice);
4223
4224 if (pdevice->rad_info.has_syncobj_wait_for_submit &&
4225 (pExternalFenceInfo->handleType == VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR ||
4226 pExternalFenceInfo->handleType == VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT_KHR)) {
4227 pExternalFenceProperties->exportFromImportedHandleTypes = VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR | VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT_KHR;
4228 pExternalFenceProperties->compatibleHandleTypes = VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR | VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT_KHR;
4229 pExternalFenceProperties->externalFenceFeatures = VK_EXTERNAL_FENCE_FEATURE_EXPORTABLE_BIT_KHR |
4230 VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT_KHR;
4231 } else {
4232 pExternalFenceProperties->exportFromImportedHandleTypes = 0;
4233 pExternalFenceProperties->compatibleHandleTypes = 0;
4234 pExternalFenceProperties->externalFenceFeatures = 0;
4235 }
4236 }
4237
4238 VkResult
4239 radv_CreateDebugReportCallbackEXT(VkInstance _instance,
4240 const VkDebugReportCallbackCreateInfoEXT* pCreateInfo,
4241 const VkAllocationCallbacks* pAllocator,
4242 VkDebugReportCallbackEXT* pCallback)
4243 {
4244 RADV_FROM_HANDLE(radv_instance, instance, _instance);
4245 return vk_create_debug_report_callback(&instance->debug_report_callbacks,
4246 pCreateInfo, pAllocator, &instance->alloc,
4247 pCallback);
4248 }
4249
4250 void
4251 radv_DestroyDebugReportCallbackEXT(VkInstance _instance,
4252 VkDebugReportCallbackEXT _callback,
4253 const VkAllocationCallbacks* pAllocator)
4254 {
4255 RADV_FROM_HANDLE(radv_instance, instance, _instance);
4256 vk_destroy_debug_report_callback(&instance->debug_report_callbacks,
4257 _callback, pAllocator, &instance->alloc);
4258 }
4259
4260 void
4261 radv_DebugReportMessageEXT(VkInstance _instance,
4262 VkDebugReportFlagsEXT flags,
4263 VkDebugReportObjectTypeEXT objectType,
4264 uint64_t object,
4265 size_t location,
4266 int32_t messageCode,
4267 const char* pLayerPrefix,
4268 const char* pMessage)
4269 {
4270 RADV_FROM_HANDLE(radv_instance, instance, _instance);
4271 vk_debug_report(&instance->debug_report_callbacks, flags, objectType,
4272 object, location, messageCode, pLayerPrefix, pMessage);
4273 }