radv: add radv_emit_shader_pointer() helper
[mesa.git] / src / amd / vulkan / radv_device.c
1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 */
27
28 #include <stdbool.h>
29 #include <string.h>
30 #include <unistd.h>
31 #include <fcntl.h>
32 #include "radv_debug.h"
33 #include "radv_private.h"
34 #include "radv_shader.h"
35 #include "radv_cs.h"
36 #include "util/disk_cache.h"
37 #include "util/strtod.h"
38 #include "vk_util.h"
39 #include <xf86drm.h>
40 #include <amdgpu.h>
41 #include <amdgpu_drm.h>
42 #include "winsys/amdgpu/radv_amdgpu_winsys_public.h"
43 #include "ac_llvm_util.h"
44 #include "vk_format.h"
45 #include "sid.h"
46 #include "gfx9d.h"
47 #include "addrlib/gfx9/chip/gfx9_enum.h"
48 #include "util/debug.h"
49
50 static int
51 radv_device_get_cache_uuid(enum radeon_family family, void *uuid)
52 {
53 uint32_t mesa_timestamp, llvm_timestamp;
54 uint16_t f = family;
55 memset(uuid, 0, VK_UUID_SIZE);
56 if (!disk_cache_get_function_timestamp(radv_device_get_cache_uuid, &mesa_timestamp) ||
57 !disk_cache_get_function_timestamp(LLVMInitializeAMDGPUTargetInfo, &llvm_timestamp))
58 return -1;
59
60 memcpy(uuid, &mesa_timestamp, 4);
61 memcpy((char*)uuid + 4, &llvm_timestamp, 4);
62 memcpy((char*)uuid + 8, &f, 2);
63 snprintf((char*)uuid + 10, VK_UUID_SIZE - 10, "radv");
64 return 0;
65 }
66
67 static void
68 radv_get_driver_uuid(void *uuid)
69 {
70 ac_compute_driver_uuid(uuid, VK_UUID_SIZE);
71 }
72
73 static void
74 radv_get_device_uuid(struct radeon_info *info, void *uuid)
75 {
76 ac_compute_device_uuid(info, uuid, VK_UUID_SIZE);
77 }
78
79 static void
80 radv_get_device_name(enum radeon_family family, char *name, size_t name_len)
81 {
82 const char *chip_string;
83 char llvm_string[32] = {};
84
85 switch (family) {
86 case CHIP_TAHITI: chip_string = "AMD RADV TAHITI"; break;
87 case CHIP_PITCAIRN: chip_string = "AMD RADV PITCAIRN"; break;
88 case CHIP_VERDE: chip_string = "AMD RADV CAPE VERDE"; break;
89 case CHIP_OLAND: chip_string = "AMD RADV OLAND"; break;
90 case CHIP_HAINAN: chip_string = "AMD RADV HAINAN"; break;
91 case CHIP_BONAIRE: chip_string = "AMD RADV BONAIRE"; break;
92 case CHIP_KAVERI: chip_string = "AMD RADV KAVERI"; break;
93 case CHIP_KABINI: chip_string = "AMD RADV KABINI"; break;
94 case CHIP_HAWAII: chip_string = "AMD RADV HAWAII"; break;
95 case CHIP_MULLINS: chip_string = "AMD RADV MULLINS"; break;
96 case CHIP_TONGA: chip_string = "AMD RADV TONGA"; break;
97 case CHIP_ICELAND: chip_string = "AMD RADV ICELAND"; break;
98 case CHIP_CARRIZO: chip_string = "AMD RADV CARRIZO"; break;
99 case CHIP_FIJI: chip_string = "AMD RADV FIJI"; break;
100 case CHIP_POLARIS10: chip_string = "AMD RADV POLARIS10"; break;
101 case CHIP_POLARIS11: chip_string = "AMD RADV POLARIS11"; break;
102 case CHIP_POLARIS12: chip_string = "AMD RADV POLARIS12"; break;
103 case CHIP_STONEY: chip_string = "AMD RADV STONEY"; break;
104 case CHIP_VEGAM: chip_string = "AMD RADV VEGA M"; break;
105 case CHIP_VEGA10: chip_string = "AMD RADV VEGA10"; break;
106 case CHIP_VEGA12: chip_string = "AMD RADV VEGA12"; break;
107 case CHIP_RAVEN: chip_string = "AMD RADV RAVEN"; break;
108 default: chip_string = "AMD RADV unknown"; break;
109 }
110
111 snprintf(llvm_string, sizeof(llvm_string),
112 " (LLVM %i.%i.%i)", (HAVE_LLVM >> 8) & 0xff,
113 HAVE_LLVM & 0xff, MESA_LLVM_VERSION_PATCH);
114 snprintf(name, name_len, "%s%s", chip_string, llvm_string);
115 }
116
117 static void
118 radv_physical_device_init_mem_types(struct radv_physical_device *device)
119 {
120 STATIC_ASSERT(RADV_MEM_HEAP_COUNT <= VK_MAX_MEMORY_HEAPS);
121 uint64_t visible_vram_size = MIN2(device->rad_info.vram_size,
122 device->rad_info.vram_vis_size);
123
124 int vram_index = -1, visible_vram_index = -1, gart_index = -1;
125 device->memory_properties.memoryHeapCount = 0;
126 if (device->rad_info.vram_size - visible_vram_size > 0) {
127 vram_index = device->memory_properties.memoryHeapCount++;
128 device->memory_properties.memoryHeaps[vram_index] = (VkMemoryHeap) {
129 .size = device->rad_info.vram_size - visible_vram_size,
130 .flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT,
131 };
132 }
133 if (visible_vram_size) {
134 visible_vram_index = device->memory_properties.memoryHeapCount++;
135 device->memory_properties.memoryHeaps[visible_vram_index] = (VkMemoryHeap) {
136 .size = visible_vram_size,
137 .flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT,
138 };
139 }
140 if (device->rad_info.gart_size > 0) {
141 gart_index = device->memory_properties.memoryHeapCount++;
142 device->memory_properties.memoryHeaps[gart_index] = (VkMemoryHeap) {
143 .size = device->rad_info.gart_size,
144 .flags = device->rad_info.has_dedicated_vram ? 0 : VK_MEMORY_HEAP_DEVICE_LOCAL_BIT,
145 };
146 }
147
148 STATIC_ASSERT(RADV_MEM_TYPE_COUNT <= VK_MAX_MEMORY_TYPES);
149 unsigned type_count = 0;
150 if (vram_index >= 0) {
151 device->mem_type_indices[type_count] = RADV_MEM_TYPE_VRAM;
152 device->memory_properties.memoryTypes[type_count++] = (VkMemoryType) {
153 .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT,
154 .heapIndex = vram_index,
155 };
156 }
157 if (gart_index >= 0) {
158 device->mem_type_indices[type_count] = RADV_MEM_TYPE_GTT_WRITE_COMBINE;
159 device->memory_properties.memoryTypes[type_count++] = (VkMemoryType) {
160 .propertyFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
161 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT |
162 (device->rad_info.has_dedicated_vram ? 0 : VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT),
163 .heapIndex = gart_index,
164 };
165 }
166 if (visible_vram_index >= 0) {
167 device->mem_type_indices[type_count] = RADV_MEM_TYPE_VRAM_CPU_ACCESS;
168 device->memory_properties.memoryTypes[type_count++] = (VkMemoryType) {
169 .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
170 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
171 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
172 .heapIndex = visible_vram_index,
173 };
174 }
175 if (gart_index >= 0) {
176 device->mem_type_indices[type_count] = RADV_MEM_TYPE_GTT_CACHED;
177 device->memory_properties.memoryTypes[type_count++] = (VkMemoryType) {
178 .propertyFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
179 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT |
180 VK_MEMORY_PROPERTY_HOST_CACHED_BIT |
181 (device->rad_info.has_dedicated_vram ? 0 : VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT),
182 .heapIndex = gart_index,
183 };
184 }
185 device->memory_properties.memoryTypeCount = type_count;
186 }
187
188 static void
189 radv_handle_env_var_force_family(struct radv_physical_device *device)
190 {
191 const char *family = getenv("RADV_FORCE_FAMILY");
192 unsigned i;
193
194 if (!family)
195 return;
196
197 for (i = CHIP_TAHITI; i < CHIP_LAST; i++) {
198 if (!strcmp(family, ac_get_llvm_processor_name(i))) {
199 /* Override family and chip_class. */
200 device->rad_info.family = i;
201
202 if (i >= CHIP_VEGA10)
203 device->rad_info.chip_class = GFX9;
204 else if (i >= CHIP_TONGA)
205 device->rad_info.chip_class = VI;
206 else if (i >= CHIP_BONAIRE)
207 device->rad_info.chip_class = CIK;
208 else
209 device->rad_info.chip_class = SI;
210
211 return;
212 }
213 }
214
215 fprintf(stderr, "radv: Unknown family: %s\n", family);
216 exit(1);
217 }
218
219 static VkResult
220 radv_physical_device_init(struct radv_physical_device *device,
221 struct radv_instance *instance,
222 drmDevicePtr drm_device)
223 {
224 const char *path = drm_device->nodes[DRM_NODE_RENDER];
225 VkResult result;
226 drmVersionPtr version;
227 int fd;
228
229 fd = open(path, O_RDWR | O_CLOEXEC);
230 if (fd < 0)
231 return vk_error(VK_ERROR_INCOMPATIBLE_DRIVER);
232
233 version = drmGetVersion(fd);
234 if (!version) {
235 close(fd);
236 return vk_errorf(VK_ERROR_INCOMPATIBLE_DRIVER,
237 "failed to get version %s: %m", path);
238 }
239
240 if (strcmp(version->name, "amdgpu")) {
241 drmFreeVersion(version);
242 close(fd);
243 return VK_ERROR_INCOMPATIBLE_DRIVER;
244 }
245 drmFreeVersion(version);
246
247 device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
248 device->instance = instance;
249 assert(strlen(path) < ARRAY_SIZE(device->path));
250 strncpy(device->path, path, ARRAY_SIZE(device->path));
251
252 device->ws = radv_amdgpu_winsys_create(fd, instance->debug_flags,
253 instance->perftest_flags);
254 if (!device->ws) {
255 result = VK_ERROR_INCOMPATIBLE_DRIVER;
256 goto fail;
257 }
258
259 device->local_fd = fd;
260 device->ws->query_info(device->ws, &device->rad_info);
261
262 radv_handle_env_var_force_family(device);
263
264 radv_get_device_name(device->rad_info.family, device->name, sizeof(device->name));
265
266 if (radv_device_get_cache_uuid(device->rad_info.family, device->cache_uuid)) {
267 device->ws->destroy(device->ws);
268 result = vk_errorf(VK_ERROR_INITIALIZATION_FAILED,
269 "cannot generate UUID");
270 goto fail;
271 }
272
273 /* These flags affect shader compilation. */
274 uint64_t shader_env_flags =
275 (device->instance->perftest_flags & RADV_PERFTEST_SISCHED ? 0x1 : 0) |
276 (device->instance->debug_flags & RADV_DEBUG_UNSAFE_MATH ? 0x2 : 0);
277
278 /* The gpu id is already embedded in the uuid so we just pass "radv"
279 * when creating the cache.
280 */
281 char buf[VK_UUID_SIZE * 2 + 1];
282 disk_cache_format_hex_id(buf, device->cache_uuid, VK_UUID_SIZE * 2);
283 device->disk_cache = disk_cache_create(device->name, buf, shader_env_flags);
284
285 if (device->rad_info.chip_class < VI ||
286 device->rad_info.chip_class > GFX9)
287 fprintf(stderr, "WARNING: radv is not a conformant vulkan implementation, testing use only.\n");
288
289 radv_get_driver_uuid(&device->device_uuid);
290 radv_get_device_uuid(&device->rad_info, &device->device_uuid);
291
292 if (device->rad_info.family == CHIP_STONEY ||
293 device->rad_info.chip_class >= GFX9) {
294 device->has_rbplus = true;
295 device->rbplus_allowed = device->rad_info.family == CHIP_STONEY ||
296 device->rad_info.family == CHIP_VEGA12 ||
297 device->rad_info.family == CHIP_RAVEN;
298 }
299
300 /* The mere presence of CLEAR_STATE in the IB causes random GPU hangs
301 * on SI.
302 */
303 device->has_clear_state = device->rad_info.chip_class >= CIK;
304
305 device->cpdma_prefetch_writes_memory = device->rad_info.chip_class <= VI;
306
307 /* Vega10/Raven need a special workaround for a hardware bug. */
308 device->has_scissor_bug = device->rad_info.family == CHIP_VEGA10 ||
309 device->rad_info.family == CHIP_RAVEN;
310
311 /* Out-of-order primitive rasterization. */
312 device->has_out_of_order_rast = device->rad_info.chip_class >= VI &&
313 device->rad_info.max_se >= 2;
314 device->out_of_order_rast_allowed = device->has_out_of_order_rast &&
315 !(device->instance->debug_flags & RADV_DEBUG_NO_OUT_OF_ORDER);
316
317 device->dcc_msaa_allowed = device->rad_info.chip_class == VI &&
318 (device->instance->perftest_flags & RADV_PERFTEST_DCC_MSAA);
319
320 radv_physical_device_init_mem_types(device);
321 radv_fill_device_extension_table(device, &device->supported_extensions);
322
323 result = radv_init_wsi(device);
324 if (result != VK_SUCCESS) {
325 device->ws->destroy(device->ws);
326 goto fail;
327 }
328
329 if ((device->instance->debug_flags & RADV_DEBUG_INFO))
330 ac_print_gpu_info(&device->rad_info);
331
332 return VK_SUCCESS;
333
334 fail:
335 close(fd);
336 return result;
337 }
338
339 static void
340 radv_physical_device_finish(struct radv_physical_device *device)
341 {
342 radv_finish_wsi(device);
343 device->ws->destroy(device->ws);
344 disk_cache_destroy(device->disk_cache);
345 close(device->local_fd);
346 }
347
348 static void *
349 default_alloc_func(void *pUserData, size_t size, size_t align,
350 VkSystemAllocationScope allocationScope)
351 {
352 return malloc(size);
353 }
354
355 static void *
356 default_realloc_func(void *pUserData, void *pOriginal, size_t size,
357 size_t align, VkSystemAllocationScope allocationScope)
358 {
359 return realloc(pOriginal, size);
360 }
361
362 static void
363 default_free_func(void *pUserData, void *pMemory)
364 {
365 free(pMemory);
366 }
367
368 static const VkAllocationCallbacks default_alloc = {
369 .pUserData = NULL,
370 .pfnAllocation = default_alloc_func,
371 .pfnReallocation = default_realloc_func,
372 .pfnFree = default_free_func,
373 };
374
375 static const struct debug_control radv_debug_options[] = {
376 {"nofastclears", RADV_DEBUG_NO_FAST_CLEARS},
377 {"nodcc", RADV_DEBUG_NO_DCC},
378 {"shaders", RADV_DEBUG_DUMP_SHADERS},
379 {"nocache", RADV_DEBUG_NO_CACHE},
380 {"shaderstats", RADV_DEBUG_DUMP_SHADER_STATS},
381 {"nohiz", RADV_DEBUG_NO_HIZ},
382 {"nocompute", RADV_DEBUG_NO_COMPUTE_QUEUE},
383 {"unsafemath", RADV_DEBUG_UNSAFE_MATH},
384 {"allbos", RADV_DEBUG_ALL_BOS},
385 {"noibs", RADV_DEBUG_NO_IBS},
386 {"spirv", RADV_DEBUG_DUMP_SPIRV},
387 {"vmfaults", RADV_DEBUG_VM_FAULTS},
388 {"zerovram", RADV_DEBUG_ZERO_VRAM},
389 {"syncshaders", RADV_DEBUG_SYNC_SHADERS},
390 {"nosisched", RADV_DEBUG_NO_SISCHED},
391 {"preoptir", RADV_DEBUG_PREOPTIR},
392 {"nodynamicbounds", RADV_DEBUG_NO_DYNAMIC_BOUNDS},
393 {"nooutoforder", RADV_DEBUG_NO_OUT_OF_ORDER},
394 {"info", RADV_DEBUG_INFO},
395 {NULL, 0}
396 };
397
398 const char *
399 radv_get_debug_option_name(int id)
400 {
401 assert(id < ARRAY_SIZE(radv_debug_options) - 1);
402 return radv_debug_options[id].string;
403 }
404
405 static const struct debug_control radv_perftest_options[] = {
406 {"nobatchchain", RADV_PERFTEST_NO_BATCHCHAIN},
407 {"sisched", RADV_PERFTEST_SISCHED},
408 {"localbos", RADV_PERFTEST_LOCAL_BOS},
409 {"binning", RADV_PERFTEST_BINNING},
410 {"dccmsaa", RADV_PERFTEST_DCC_MSAA},
411 {NULL, 0}
412 };
413
414 const char *
415 radv_get_perftest_option_name(int id)
416 {
417 assert(id < ARRAY_SIZE(radv_debug_options) - 1);
418 return radv_perftest_options[id].string;
419 }
420
421 static void
422 radv_handle_per_app_options(struct radv_instance *instance,
423 const VkApplicationInfo *info)
424 {
425 const char *name = info ? info->pApplicationName : NULL;
426
427 if (!name)
428 return;
429
430 if (!strcmp(name, "Talos - Linux - 32bit") ||
431 !strcmp(name, "Talos - Linux - 64bit")) {
432 if (!(instance->debug_flags & RADV_DEBUG_NO_SISCHED)) {
433 /* Force enable LLVM sisched for Talos because it looks
434 * safe and it gives few more FPS.
435 */
436 instance->perftest_flags |= RADV_PERFTEST_SISCHED;
437 }
438 }
439 }
440
441 static int radv_get_instance_extension_index(const char *name)
442 {
443 for (unsigned i = 0; i < RADV_INSTANCE_EXTENSION_COUNT; ++i) {
444 if (strcmp(name, radv_instance_extensions[i].extensionName) == 0)
445 return i;
446 }
447 return -1;
448 }
449
450
451 VkResult radv_CreateInstance(
452 const VkInstanceCreateInfo* pCreateInfo,
453 const VkAllocationCallbacks* pAllocator,
454 VkInstance* pInstance)
455 {
456 struct radv_instance *instance;
457 VkResult result;
458
459 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO);
460
461 uint32_t client_version;
462 if (pCreateInfo->pApplicationInfo &&
463 pCreateInfo->pApplicationInfo->apiVersion != 0) {
464 client_version = pCreateInfo->pApplicationInfo->apiVersion;
465 } else {
466 radv_EnumerateInstanceVersion(&client_version);
467 }
468
469 instance = vk_zalloc2(&default_alloc, pAllocator, sizeof(*instance), 8,
470 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
471 if (!instance)
472 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
473
474 instance->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
475
476 if (pAllocator)
477 instance->alloc = *pAllocator;
478 else
479 instance->alloc = default_alloc;
480
481 instance->apiVersion = client_version;
482 instance->physicalDeviceCount = -1;
483
484 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
485 const char *ext_name = pCreateInfo->ppEnabledExtensionNames[i];
486 int index = radv_get_instance_extension_index(ext_name);
487
488 if (index < 0 || !radv_supported_instance_extensions.extensions[index]) {
489 vk_free2(&default_alloc, pAllocator, instance);
490 return vk_error(VK_ERROR_EXTENSION_NOT_PRESENT);
491 }
492
493 instance->enabled_extensions.extensions[index] = true;
494 }
495
496 result = vk_debug_report_instance_init(&instance->debug_report_callbacks);
497 if (result != VK_SUCCESS) {
498 vk_free2(&default_alloc, pAllocator, instance);
499 return vk_error(result);
500 }
501
502 _mesa_locale_init();
503
504 VG(VALGRIND_CREATE_MEMPOOL(instance, 0, false));
505
506 instance->debug_flags = parse_debug_string(getenv("RADV_DEBUG"),
507 radv_debug_options);
508
509 instance->perftest_flags = parse_debug_string(getenv("RADV_PERFTEST"),
510 radv_perftest_options);
511
512 radv_handle_per_app_options(instance, pCreateInfo->pApplicationInfo);
513
514 *pInstance = radv_instance_to_handle(instance);
515
516 return VK_SUCCESS;
517 }
518
519 void radv_DestroyInstance(
520 VkInstance _instance,
521 const VkAllocationCallbacks* pAllocator)
522 {
523 RADV_FROM_HANDLE(radv_instance, instance, _instance);
524
525 if (!instance)
526 return;
527
528 for (int i = 0; i < instance->physicalDeviceCount; ++i) {
529 radv_physical_device_finish(instance->physicalDevices + i);
530 }
531
532 VG(VALGRIND_DESTROY_MEMPOOL(instance));
533
534 _mesa_locale_fini();
535
536 vk_debug_report_instance_destroy(&instance->debug_report_callbacks);
537
538 vk_free(&instance->alloc, instance);
539 }
540
541 static VkResult
542 radv_enumerate_devices(struct radv_instance *instance)
543 {
544 /* TODO: Check for more devices ? */
545 drmDevicePtr devices[8];
546 VkResult result = VK_ERROR_INCOMPATIBLE_DRIVER;
547 int max_devices;
548
549 instance->physicalDeviceCount = 0;
550
551 max_devices = drmGetDevices2(0, devices, ARRAY_SIZE(devices));
552 if (max_devices < 1)
553 return vk_error(VK_ERROR_INCOMPATIBLE_DRIVER);
554
555 for (unsigned i = 0; i < (unsigned)max_devices; i++) {
556 if (devices[i]->available_nodes & 1 << DRM_NODE_RENDER &&
557 devices[i]->bustype == DRM_BUS_PCI &&
558 devices[i]->deviceinfo.pci->vendor_id == ATI_VENDOR_ID) {
559
560 result = radv_physical_device_init(instance->physicalDevices +
561 instance->physicalDeviceCount,
562 instance,
563 devices[i]);
564 if (result == VK_SUCCESS)
565 ++instance->physicalDeviceCount;
566 else if (result != VK_ERROR_INCOMPATIBLE_DRIVER)
567 break;
568 }
569 }
570 drmFreeDevices(devices, max_devices);
571
572 return result;
573 }
574
575 VkResult radv_EnumeratePhysicalDevices(
576 VkInstance _instance,
577 uint32_t* pPhysicalDeviceCount,
578 VkPhysicalDevice* pPhysicalDevices)
579 {
580 RADV_FROM_HANDLE(radv_instance, instance, _instance);
581 VkResult result;
582
583 if (instance->physicalDeviceCount < 0) {
584 result = radv_enumerate_devices(instance);
585 if (result != VK_SUCCESS &&
586 result != VK_ERROR_INCOMPATIBLE_DRIVER)
587 return result;
588 }
589
590 if (!pPhysicalDevices) {
591 *pPhysicalDeviceCount = instance->physicalDeviceCount;
592 } else {
593 *pPhysicalDeviceCount = MIN2(*pPhysicalDeviceCount, instance->physicalDeviceCount);
594 for (unsigned i = 0; i < *pPhysicalDeviceCount; ++i)
595 pPhysicalDevices[i] = radv_physical_device_to_handle(instance->physicalDevices + i);
596 }
597
598 return *pPhysicalDeviceCount < instance->physicalDeviceCount ? VK_INCOMPLETE
599 : VK_SUCCESS;
600 }
601
602 VkResult radv_EnumeratePhysicalDeviceGroups(
603 VkInstance _instance,
604 uint32_t* pPhysicalDeviceGroupCount,
605 VkPhysicalDeviceGroupProperties* pPhysicalDeviceGroupProperties)
606 {
607 RADV_FROM_HANDLE(radv_instance, instance, _instance);
608 VkResult result;
609
610 if (instance->physicalDeviceCount < 0) {
611 result = radv_enumerate_devices(instance);
612 if (result != VK_SUCCESS &&
613 result != VK_ERROR_INCOMPATIBLE_DRIVER)
614 return result;
615 }
616
617 if (!pPhysicalDeviceGroupProperties) {
618 *pPhysicalDeviceGroupCount = instance->physicalDeviceCount;
619 } else {
620 *pPhysicalDeviceGroupCount = MIN2(*pPhysicalDeviceGroupCount, instance->physicalDeviceCount);
621 for (unsigned i = 0; i < *pPhysicalDeviceGroupCount; ++i) {
622 pPhysicalDeviceGroupProperties[i].physicalDeviceCount = 1;
623 pPhysicalDeviceGroupProperties[i].physicalDevices[0] = radv_physical_device_to_handle(instance->physicalDevices + i);
624 pPhysicalDeviceGroupProperties[i].subsetAllocation = false;
625 }
626 }
627 return *pPhysicalDeviceGroupCount < instance->physicalDeviceCount ? VK_INCOMPLETE
628 : VK_SUCCESS;
629 }
630
631 void radv_GetPhysicalDeviceFeatures(
632 VkPhysicalDevice physicalDevice,
633 VkPhysicalDeviceFeatures* pFeatures)
634 {
635 memset(pFeatures, 0, sizeof(*pFeatures));
636
637 *pFeatures = (VkPhysicalDeviceFeatures) {
638 .robustBufferAccess = true,
639 .fullDrawIndexUint32 = true,
640 .imageCubeArray = true,
641 .independentBlend = true,
642 .geometryShader = true,
643 .tessellationShader = true,
644 .sampleRateShading = true,
645 .dualSrcBlend = true,
646 .logicOp = true,
647 .multiDrawIndirect = true,
648 .drawIndirectFirstInstance = true,
649 .depthClamp = true,
650 .depthBiasClamp = true,
651 .fillModeNonSolid = true,
652 .depthBounds = true,
653 .wideLines = true,
654 .largePoints = true,
655 .alphaToOne = true,
656 .multiViewport = true,
657 .samplerAnisotropy = true,
658 .textureCompressionETC2 = false,
659 .textureCompressionASTC_LDR = false,
660 .textureCompressionBC = true,
661 .occlusionQueryPrecise = true,
662 .pipelineStatisticsQuery = true,
663 .vertexPipelineStoresAndAtomics = true,
664 .fragmentStoresAndAtomics = true,
665 .shaderTessellationAndGeometryPointSize = true,
666 .shaderImageGatherExtended = true,
667 .shaderStorageImageExtendedFormats = true,
668 .shaderStorageImageMultisample = false,
669 .shaderUniformBufferArrayDynamicIndexing = true,
670 .shaderSampledImageArrayDynamicIndexing = true,
671 .shaderStorageBufferArrayDynamicIndexing = true,
672 .shaderStorageImageArrayDynamicIndexing = true,
673 .shaderStorageImageReadWithoutFormat = true,
674 .shaderStorageImageWriteWithoutFormat = true,
675 .shaderClipDistance = true,
676 .shaderCullDistance = true,
677 .shaderFloat64 = true,
678 .shaderInt64 = true,
679 .shaderInt16 = false,
680 .sparseBinding = true,
681 .variableMultisampleRate = true,
682 .inheritedQueries = true,
683 };
684 }
685
686 void radv_GetPhysicalDeviceFeatures2(
687 VkPhysicalDevice physicalDevice,
688 VkPhysicalDeviceFeatures2KHR *pFeatures)
689 {
690 vk_foreach_struct(ext, pFeatures->pNext) {
691 switch (ext->sType) {
692 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTER_FEATURES_KHR: {
693 VkPhysicalDeviceVariablePointerFeaturesKHR *features = (void *)ext;
694 features->variablePointersStorageBuffer = true;
695 features->variablePointers = false;
696 break;
697 }
698 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES_KHR: {
699 VkPhysicalDeviceMultiviewFeaturesKHR *features = (VkPhysicalDeviceMultiviewFeaturesKHR*)ext;
700 features->multiview = true;
701 features->multiviewGeometryShader = true;
702 features->multiviewTessellationShader = true;
703 break;
704 }
705 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETER_FEATURES: {
706 VkPhysicalDeviceShaderDrawParameterFeatures *features =
707 (VkPhysicalDeviceShaderDrawParameterFeatures*)ext;
708 features->shaderDrawParameters = true;
709 break;
710 }
711 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES: {
712 VkPhysicalDeviceProtectedMemoryFeatures *features =
713 (VkPhysicalDeviceProtectedMemoryFeatures*)ext;
714 features->protectedMemory = false;
715 break;
716 }
717 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES: {
718 VkPhysicalDevice16BitStorageFeatures *features =
719 (VkPhysicalDevice16BitStorageFeatures*)ext;
720 features->storageBuffer16BitAccess = false;
721 features->uniformAndStorageBuffer16BitAccess = false;
722 features->storagePushConstant16 = false;
723 features->storageInputOutput16 = false;
724 break;
725 }
726 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES: {
727 VkPhysicalDeviceSamplerYcbcrConversionFeatures *features =
728 (VkPhysicalDeviceSamplerYcbcrConversionFeatures*)ext;
729 features->samplerYcbcrConversion = false;
730 break;
731 }
732 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES_EXT: {
733 VkPhysicalDeviceDescriptorIndexingFeaturesEXT *features =
734 (VkPhysicalDeviceDescriptorIndexingFeaturesEXT*)features;
735 features->shaderInputAttachmentArrayDynamicIndexing = true;
736 features->shaderUniformTexelBufferArrayDynamicIndexing = true;
737 features->shaderStorageTexelBufferArrayDynamicIndexing = true;
738 features->shaderUniformBufferArrayNonUniformIndexing = false;
739 features->shaderSampledImageArrayNonUniformIndexing = false;
740 features->shaderStorageBufferArrayNonUniformIndexing = false;
741 features->shaderStorageImageArrayNonUniformIndexing = false;
742 features->shaderInputAttachmentArrayNonUniformIndexing = false;
743 features->shaderUniformTexelBufferArrayNonUniformIndexing = false;
744 features->shaderStorageTexelBufferArrayNonUniformIndexing = false;
745 features->descriptorBindingUniformBufferUpdateAfterBind = true;
746 features->descriptorBindingSampledImageUpdateAfterBind = true;
747 features->descriptorBindingStorageImageUpdateAfterBind = true;
748 features->descriptorBindingStorageBufferUpdateAfterBind = true;
749 features->descriptorBindingUniformTexelBufferUpdateAfterBind = true;
750 features->descriptorBindingStorageTexelBufferUpdateAfterBind = true;
751 features->descriptorBindingUpdateUnusedWhilePending = true;
752 features->descriptorBindingPartiallyBound = true;
753 features->descriptorBindingVariableDescriptorCount = true;
754 features->runtimeDescriptorArray = true;
755 break;
756 }
757 default:
758 break;
759 }
760 }
761 return radv_GetPhysicalDeviceFeatures(physicalDevice, &pFeatures->features);
762 }
763
764 void radv_GetPhysicalDeviceProperties(
765 VkPhysicalDevice physicalDevice,
766 VkPhysicalDeviceProperties* pProperties)
767 {
768 RADV_FROM_HANDLE(radv_physical_device, pdevice, physicalDevice);
769 VkSampleCountFlags sample_counts = 0xf;
770
771 /* make sure that the entire descriptor set is addressable with a signed
772 * 32-bit int. So the sum of all limits scaled by descriptor size has to
773 * be at most 2 GiB. the combined image & samples object count as one of
774 * both. This limit is for the pipeline layout, not for the set layout, but
775 * there is no set limit, so we just set a pipeline limit. I don't think
776 * any app is going to hit this soon. */
777 size_t max_descriptor_set_size = ((1ull << 31) - 16 * MAX_DYNAMIC_BUFFERS) /
778 (32 /* uniform buffer, 32 due to potential space wasted on alignment */ +
779 32 /* storage buffer, 32 due to potential space wasted on alignment */ +
780 32 /* sampler, largest when combined with image */ +
781 64 /* sampled image */ +
782 64 /* storage image */);
783
784 VkPhysicalDeviceLimits limits = {
785 .maxImageDimension1D = (1 << 14),
786 .maxImageDimension2D = (1 << 14),
787 .maxImageDimension3D = (1 << 11),
788 .maxImageDimensionCube = (1 << 14),
789 .maxImageArrayLayers = (1 << 11),
790 .maxTexelBufferElements = 128 * 1024 * 1024,
791 .maxUniformBufferRange = UINT32_MAX,
792 .maxStorageBufferRange = UINT32_MAX,
793 .maxPushConstantsSize = MAX_PUSH_CONSTANTS_SIZE,
794 .maxMemoryAllocationCount = UINT32_MAX,
795 .maxSamplerAllocationCount = 64 * 1024,
796 .bufferImageGranularity = 64, /* A cache line */
797 .sparseAddressSpaceSize = 0xffffffffu, /* buffer max size */
798 .maxBoundDescriptorSets = MAX_SETS,
799 .maxPerStageDescriptorSamplers = max_descriptor_set_size,
800 .maxPerStageDescriptorUniformBuffers = max_descriptor_set_size,
801 .maxPerStageDescriptorStorageBuffers = max_descriptor_set_size,
802 .maxPerStageDescriptorSampledImages = max_descriptor_set_size,
803 .maxPerStageDescriptorStorageImages = max_descriptor_set_size,
804 .maxPerStageDescriptorInputAttachments = max_descriptor_set_size,
805 .maxPerStageResources = max_descriptor_set_size,
806 .maxDescriptorSetSamplers = max_descriptor_set_size,
807 .maxDescriptorSetUniformBuffers = max_descriptor_set_size,
808 .maxDescriptorSetUniformBuffersDynamic = MAX_DYNAMIC_UNIFORM_BUFFERS,
809 .maxDescriptorSetStorageBuffers = max_descriptor_set_size,
810 .maxDescriptorSetStorageBuffersDynamic = MAX_DYNAMIC_STORAGE_BUFFERS,
811 .maxDescriptorSetSampledImages = max_descriptor_set_size,
812 .maxDescriptorSetStorageImages = max_descriptor_set_size,
813 .maxDescriptorSetInputAttachments = max_descriptor_set_size,
814 .maxVertexInputAttributes = 32,
815 .maxVertexInputBindings = 32,
816 .maxVertexInputAttributeOffset = 2047,
817 .maxVertexInputBindingStride = 2048,
818 .maxVertexOutputComponents = 128,
819 .maxTessellationGenerationLevel = 64,
820 .maxTessellationPatchSize = 32,
821 .maxTessellationControlPerVertexInputComponents = 128,
822 .maxTessellationControlPerVertexOutputComponents = 128,
823 .maxTessellationControlPerPatchOutputComponents = 120,
824 .maxTessellationControlTotalOutputComponents = 4096,
825 .maxTessellationEvaluationInputComponents = 128,
826 .maxTessellationEvaluationOutputComponents = 128,
827 .maxGeometryShaderInvocations = 127,
828 .maxGeometryInputComponents = 64,
829 .maxGeometryOutputComponents = 128,
830 .maxGeometryOutputVertices = 256,
831 .maxGeometryTotalOutputComponents = 1024,
832 .maxFragmentInputComponents = 128,
833 .maxFragmentOutputAttachments = 8,
834 .maxFragmentDualSrcAttachments = 1,
835 .maxFragmentCombinedOutputResources = 8,
836 .maxComputeSharedMemorySize = 32768,
837 .maxComputeWorkGroupCount = { 65535, 65535, 65535 },
838 .maxComputeWorkGroupInvocations = 2048,
839 .maxComputeWorkGroupSize = {
840 2048,
841 2048,
842 2048
843 },
844 .subPixelPrecisionBits = 4 /* FIXME */,
845 .subTexelPrecisionBits = 4 /* FIXME */,
846 .mipmapPrecisionBits = 4 /* FIXME */,
847 .maxDrawIndexedIndexValue = UINT32_MAX,
848 .maxDrawIndirectCount = UINT32_MAX,
849 .maxSamplerLodBias = 16,
850 .maxSamplerAnisotropy = 16,
851 .maxViewports = MAX_VIEWPORTS,
852 .maxViewportDimensions = { (1 << 14), (1 << 14) },
853 .viewportBoundsRange = { INT16_MIN, INT16_MAX },
854 .viewportSubPixelBits = 8,
855 .minMemoryMapAlignment = 4096, /* A page */
856 .minTexelBufferOffsetAlignment = 1,
857 .minUniformBufferOffsetAlignment = 4,
858 .minStorageBufferOffsetAlignment = 4,
859 .minTexelOffset = -32,
860 .maxTexelOffset = 31,
861 .minTexelGatherOffset = -32,
862 .maxTexelGatherOffset = 31,
863 .minInterpolationOffset = -2,
864 .maxInterpolationOffset = 2,
865 .subPixelInterpolationOffsetBits = 8,
866 .maxFramebufferWidth = (1 << 14),
867 .maxFramebufferHeight = (1 << 14),
868 .maxFramebufferLayers = (1 << 10),
869 .framebufferColorSampleCounts = sample_counts,
870 .framebufferDepthSampleCounts = sample_counts,
871 .framebufferStencilSampleCounts = sample_counts,
872 .framebufferNoAttachmentsSampleCounts = sample_counts,
873 .maxColorAttachments = MAX_RTS,
874 .sampledImageColorSampleCounts = sample_counts,
875 .sampledImageIntegerSampleCounts = VK_SAMPLE_COUNT_1_BIT,
876 .sampledImageDepthSampleCounts = sample_counts,
877 .sampledImageStencilSampleCounts = sample_counts,
878 .storageImageSampleCounts = VK_SAMPLE_COUNT_1_BIT,
879 .maxSampleMaskWords = 1,
880 .timestampComputeAndGraphics = true,
881 .timestampPeriod = 1000000.0 / pdevice->rad_info.clock_crystal_freq,
882 .maxClipDistances = 8,
883 .maxCullDistances = 8,
884 .maxCombinedClipAndCullDistances = 8,
885 .discreteQueuePriorities = 1,
886 .pointSizeRange = { 0.125, 255.875 },
887 .lineWidthRange = { 0.0, 7.9921875 },
888 .pointSizeGranularity = (1.0 / 8.0),
889 .lineWidthGranularity = (1.0 / 128.0),
890 .strictLines = false, /* FINISHME */
891 .standardSampleLocations = true,
892 .optimalBufferCopyOffsetAlignment = 128,
893 .optimalBufferCopyRowPitchAlignment = 128,
894 .nonCoherentAtomSize = 64,
895 };
896
897 *pProperties = (VkPhysicalDeviceProperties) {
898 .apiVersion = radv_physical_device_api_version(pdevice),
899 .driverVersion = vk_get_driver_version(),
900 .vendorID = ATI_VENDOR_ID,
901 .deviceID = pdevice->rad_info.pci_id,
902 .deviceType = pdevice->rad_info.has_dedicated_vram ? VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU : VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU,
903 .limits = limits,
904 .sparseProperties = {0},
905 };
906
907 strcpy(pProperties->deviceName, pdevice->name);
908 memcpy(pProperties->pipelineCacheUUID, pdevice->cache_uuid, VK_UUID_SIZE);
909 }
910
911 void radv_GetPhysicalDeviceProperties2(
912 VkPhysicalDevice physicalDevice,
913 VkPhysicalDeviceProperties2KHR *pProperties)
914 {
915 RADV_FROM_HANDLE(radv_physical_device, pdevice, physicalDevice);
916 radv_GetPhysicalDeviceProperties(physicalDevice, &pProperties->properties);
917
918 vk_foreach_struct(ext, pProperties->pNext) {
919 switch (ext->sType) {
920 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR: {
921 VkPhysicalDevicePushDescriptorPropertiesKHR *properties =
922 (VkPhysicalDevicePushDescriptorPropertiesKHR *) ext;
923 properties->maxPushDescriptors = MAX_PUSH_DESCRIPTORS;
924 break;
925 }
926 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES_KHR: {
927 VkPhysicalDeviceIDPropertiesKHR *properties = (VkPhysicalDeviceIDPropertiesKHR*)ext;
928 memcpy(properties->driverUUID, pdevice->driver_uuid, VK_UUID_SIZE);
929 memcpy(properties->deviceUUID, pdevice->device_uuid, VK_UUID_SIZE);
930 properties->deviceLUIDValid = false;
931 break;
932 }
933 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES_KHR: {
934 VkPhysicalDeviceMultiviewPropertiesKHR *properties = (VkPhysicalDeviceMultiviewPropertiesKHR*)ext;
935 properties->maxMultiviewViewCount = MAX_VIEWS;
936 properties->maxMultiviewInstanceIndex = INT_MAX;
937 break;
938 }
939 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES_KHR: {
940 VkPhysicalDevicePointClippingPropertiesKHR *properties =
941 (VkPhysicalDevicePointClippingPropertiesKHR*)ext;
942 properties->pointClippingBehavior = VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES_KHR;
943 break;
944 }
945 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DISCARD_RECTANGLE_PROPERTIES_EXT: {
946 VkPhysicalDeviceDiscardRectanglePropertiesEXT *properties =
947 (VkPhysicalDeviceDiscardRectanglePropertiesEXT*)ext;
948 properties->maxDiscardRectangles = MAX_DISCARD_RECTANGLES;
949 break;
950 }
951 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_MEMORY_HOST_PROPERTIES_EXT: {
952 VkPhysicalDeviceExternalMemoryHostPropertiesEXT *properties =
953 (VkPhysicalDeviceExternalMemoryHostPropertiesEXT *) ext;
954 properties->minImportedHostPointerAlignment = 4096;
955 break;
956 }
957 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_PROPERTIES: {
958 VkPhysicalDeviceSubgroupProperties *properties =
959 (VkPhysicalDeviceSubgroupProperties*)ext;
960 properties->subgroupSize = 64;
961 properties->supportedStages = VK_SHADER_STAGE_ALL;
962 properties->supportedOperations =
963 VK_SUBGROUP_FEATURE_BASIC_BIT |
964 VK_SUBGROUP_FEATURE_BALLOT_BIT |
965 VK_SUBGROUP_FEATURE_QUAD_BIT |
966 VK_SUBGROUP_FEATURE_SHUFFLE_BIT |
967 VK_SUBGROUP_FEATURE_SHUFFLE_RELATIVE_BIT |
968 VK_SUBGROUP_FEATURE_VOTE_BIT;
969 properties->quadOperationsInAllStages = true;
970 break;
971 }
972 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES: {
973 VkPhysicalDeviceMaintenance3Properties *properties =
974 (VkPhysicalDeviceMaintenance3Properties*)ext;
975 /* Make sure everything is addressable by a signed 32-bit int, and
976 * our largest descriptors are 96 bytes. */
977 properties->maxPerSetDescriptors = (1ull << 31) / 96;
978 /* Our buffer size fields allow only this much */
979 properties->maxMemoryAllocationSize = 0xFFFFFFFFull;
980 break;
981 }
982 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_FILTER_MINMAX_PROPERTIES_EXT: {
983 VkPhysicalDeviceSamplerFilterMinmaxPropertiesEXT *properties =
984 (VkPhysicalDeviceSamplerFilterMinmaxPropertiesEXT *)ext;
985 /* GFX6-8 only support single channel min/max filter. */
986 properties->filterMinmaxImageComponentMapping = pdevice->rad_info.chip_class >= GFX9;
987 properties->filterMinmaxSingleComponentFormats = true;
988 break;
989 }
990 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CORE_PROPERTIES_AMD: {
991 VkPhysicalDeviceShaderCorePropertiesAMD *properties =
992 (VkPhysicalDeviceShaderCorePropertiesAMD *)ext;
993
994 /* Shader engines. */
995 properties->shaderEngineCount =
996 pdevice->rad_info.max_se;
997 properties->shaderArraysPerEngineCount =
998 pdevice->rad_info.max_sh_per_se;
999 properties->computeUnitsPerShaderArray =
1000 pdevice->rad_info.num_good_compute_units /
1001 (pdevice->rad_info.max_se *
1002 pdevice->rad_info.max_sh_per_se);
1003 properties->simdPerComputeUnit = 4;
1004 properties->wavefrontsPerSimd =
1005 pdevice->rad_info.family == CHIP_TONGA ||
1006 pdevice->rad_info.family == CHIP_ICELAND ||
1007 pdevice->rad_info.family == CHIP_POLARIS10 ||
1008 pdevice->rad_info.family == CHIP_POLARIS11 ||
1009 pdevice->rad_info.family == CHIP_POLARIS12 ||
1010 pdevice->rad_info.family == CHIP_VEGAM ? 8 : 10;
1011 properties->wavefrontSize = 64;
1012
1013 /* SGPR. */
1014 properties->sgprsPerSimd =
1015 radv_get_num_physical_sgprs(pdevice);
1016 properties->minSgprAllocation =
1017 pdevice->rad_info.chip_class >= VI ? 16 : 8;
1018 properties->maxSgprAllocation =
1019 pdevice->rad_info.family == CHIP_TONGA ||
1020 pdevice->rad_info.family == CHIP_ICELAND ? 96 : 104;
1021 properties->sgprAllocationGranularity =
1022 pdevice->rad_info.chip_class >= VI ? 16 : 8;
1023
1024 /* VGPR. */
1025 properties->vgprsPerSimd = RADV_NUM_PHYSICAL_VGPRS;
1026 properties->minVgprAllocation = 4;
1027 properties->maxVgprAllocation = 256;
1028 properties->vgprAllocationGranularity = 4;
1029 break;
1030 }
1031 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_PROPERTIES_EXT: {
1032 VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT *properties =
1033 (VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT *)ext;
1034 properties->maxVertexAttribDivisor = UINT32_MAX;
1035 break;
1036 }
1037 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_PROPERTIES_EXT: {
1038 VkPhysicalDeviceDescriptorIndexingPropertiesEXT *properties =
1039 (VkPhysicalDeviceDescriptorIndexingPropertiesEXT*)ext;
1040 properties->maxUpdateAfterBindDescriptorsInAllPools = UINT32_MAX / 64;
1041 properties->shaderUniformBufferArrayNonUniformIndexingNative = false;
1042 properties->shaderSampledImageArrayNonUniformIndexingNative = false;
1043 properties->shaderStorageBufferArrayNonUniformIndexingNative = false;
1044 properties->shaderStorageImageArrayNonUniformIndexingNative = false;
1045 properties->shaderInputAttachmentArrayNonUniformIndexingNative = false;
1046 properties->robustBufferAccessUpdateAfterBind = false;
1047 properties->quadDivergentImplicitLod = false;
1048
1049 size_t max_descriptor_set_size = ((1ull << 31) - 16 * MAX_DYNAMIC_BUFFERS) /
1050 (32 /* uniform buffer, 32 due to potential space wasted on alignment */ +
1051 32 /* storage buffer, 32 due to potential space wasted on alignment */ +
1052 32 /* sampler, largest when combined with image */ +
1053 64 /* sampled image */ +
1054 64 /* storage image */);
1055 properties->maxPerStageDescriptorUpdateAfterBindSamplers = max_descriptor_set_size;
1056 properties->maxPerStageDescriptorUpdateAfterBindUniformBuffers = max_descriptor_set_size;
1057 properties->maxPerStageDescriptorUpdateAfterBindStorageBuffers = max_descriptor_set_size;
1058 properties->maxPerStageDescriptorUpdateAfterBindSampledImages = max_descriptor_set_size;
1059 properties->maxPerStageDescriptorUpdateAfterBindStorageImages = max_descriptor_set_size;
1060 properties->maxPerStageDescriptorUpdateAfterBindInputAttachments = max_descriptor_set_size;
1061 properties->maxPerStageUpdateAfterBindResources = max_descriptor_set_size;
1062 properties->maxDescriptorSetUpdateAfterBindSamplers = max_descriptor_set_size;
1063 properties->maxDescriptorSetUpdateAfterBindUniformBuffers = max_descriptor_set_size;
1064 properties->maxDescriptorSetUpdateAfterBindUniformBuffersDynamic = MAX_DYNAMIC_UNIFORM_BUFFERS;
1065 properties->maxDescriptorSetUpdateAfterBindStorageBuffers = max_descriptor_set_size;
1066 properties->maxDescriptorSetUpdateAfterBindStorageBuffersDynamic = MAX_DYNAMIC_STORAGE_BUFFERS;
1067 properties->maxDescriptorSetUpdateAfterBindSampledImages = max_descriptor_set_size;
1068 properties->maxDescriptorSetUpdateAfterBindStorageImages = max_descriptor_set_size;
1069 properties->maxDescriptorSetUpdateAfterBindInputAttachments = max_descriptor_set_size;
1070 break;
1071 }
1072 default:
1073 break;
1074 }
1075 }
1076 }
1077
1078 static void radv_get_physical_device_queue_family_properties(
1079 struct radv_physical_device* pdevice,
1080 uint32_t* pCount,
1081 VkQueueFamilyProperties** pQueueFamilyProperties)
1082 {
1083 int num_queue_families = 1;
1084 int idx;
1085 if (pdevice->rad_info.num_compute_rings > 0 &&
1086 !(pdevice->instance->debug_flags & RADV_DEBUG_NO_COMPUTE_QUEUE))
1087 num_queue_families++;
1088
1089 if (pQueueFamilyProperties == NULL) {
1090 *pCount = num_queue_families;
1091 return;
1092 }
1093
1094 if (!*pCount)
1095 return;
1096
1097 idx = 0;
1098 if (*pCount >= 1) {
1099 *pQueueFamilyProperties[idx] = (VkQueueFamilyProperties) {
1100 .queueFlags = VK_QUEUE_GRAPHICS_BIT |
1101 VK_QUEUE_COMPUTE_BIT |
1102 VK_QUEUE_TRANSFER_BIT |
1103 VK_QUEUE_SPARSE_BINDING_BIT,
1104 .queueCount = 1,
1105 .timestampValidBits = 64,
1106 .minImageTransferGranularity = (VkExtent3D) { 1, 1, 1 },
1107 };
1108 idx++;
1109 }
1110
1111 if (pdevice->rad_info.num_compute_rings > 0 &&
1112 !(pdevice->instance->debug_flags & RADV_DEBUG_NO_COMPUTE_QUEUE)) {
1113 if (*pCount > idx) {
1114 *pQueueFamilyProperties[idx] = (VkQueueFamilyProperties) {
1115 .queueFlags = VK_QUEUE_COMPUTE_BIT |
1116 VK_QUEUE_TRANSFER_BIT |
1117 VK_QUEUE_SPARSE_BINDING_BIT,
1118 .queueCount = pdevice->rad_info.num_compute_rings,
1119 .timestampValidBits = 64,
1120 .minImageTransferGranularity = (VkExtent3D) { 1, 1, 1 },
1121 };
1122 idx++;
1123 }
1124 }
1125 *pCount = idx;
1126 }
1127
1128 void radv_GetPhysicalDeviceQueueFamilyProperties(
1129 VkPhysicalDevice physicalDevice,
1130 uint32_t* pCount,
1131 VkQueueFamilyProperties* pQueueFamilyProperties)
1132 {
1133 RADV_FROM_HANDLE(radv_physical_device, pdevice, physicalDevice);
1134 if (!pQueueFamilyProperties) {
1135 return radv_get_physical_device_queue_family_properties(pdevice, pCount, NULL);
1136 return;
1137 }
1138 VkQueueFamilyProperties *properties[] = {
1139 pQueueFamilyProperties + 0,
1140 pQueueFamilyProperties + 1,
1141 pQueueFamilyProperties + 2,
1142 };
1143 radv_get_physical_device_queue_family_properties(pdevice, pCount, properties);
1144 assert(*pCount <= 3);
1145 }
1146
1147 void radv_GetPhysicalDeviceQueueFamilyProperties2(
1148 VkPhysicalDevice physicalDevice,
1149 uint32_t* pCount,
1150 VkQueueFamilyProperties2KHR *pQueueFamilyProperties)
1151 {
1152 RADV_FROM_HANDLE(radv_physical_device, pdevice, physicalDevice);
1153 if (!pQueueFamilyProperties) {
1154 return radv_get_physical_device_queue_family_properties(pdevice, pCount, NULL);
1155 return;
1156 }
1157 VkQueueFamilyProperties *properties[] = {
1158 &pQueueFamilyProperties[0].queueFamilyProperties,
1159 &pQueueFamilyProperties[1].queueFamilyProperties,
1160 &pQueueFamilyProperties[2].queueFamilyProperties,
1161 };
1162 radv_get_physical_device_queue_family_properties(pdevice, pCount, properties);
1163 assert(*pCount <= 3);
1164 }
1165
1166 void radv_GetPhysicalDeviceMemoryProperties(
1167 VkPhysicalDevice physicalDevice,
1168 VkPhysicalDeviceMemoryProperties *pMemoryProperties)
1169 {
1170 RADV_FROM_HANDLE(radv_physical_device, physical_device, physicalDevice);
1171
1172 *pMemoryProperties = physical_device->memory_properties;
1173 }
1174
1175 void radv_GetPhysicalDeviceMemoryProperties2(
1176 VkPhysicalDevice physicalDevice,
1177 VkPhysicalDeviceMemoryProperties2KHR *pMemoryProperties)
1178 {
1179 return radv_GetPhysicalDeviceMemoryProperties(physicalDevice,
1180 &pMemoryProperties->memoryProperties);
1181 }
1182
1183 VkResult radv_GetMemoryHostPointerPropertiesEXT(
1184 VkDevice _device,
1185 VkExternalMemoryHandleTypeFlagBitsKHR handleType,
1186 const void *pHostPointer,
1187 VkMemoryHostPointerPropertiesEXT *pMemoryHostPointerProperties)
1188 {
1189 RADV_FROM_HANDLE(radv_device, device, _device);
1190
1191 switch (handleType)
1192 {
1193 case VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT: {
1194 const struct radv_physical_device *physical_device = device->physical_device;
1195 uint32_t memoryTypeBits = 0;
1196 for (int i = 0; i < physical_device->memory_properties.memoryTypeCount; i++) {
1197 if (physical_device->mem_type_indices[i] == RADV_MEM_TYPE_GTT_CACHED) {
1198 memoryTypeBits = (1 << i);
1199 break;
1200 }
1201 }
1202 pMemoryHostPointerProperties->memoryTypeBits = memoryTypeBits;
1203 return VK_SUCCESS;
1204 }
1205 default:
1206 return VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR;
1207 }
1208 }
1209
1210 static enum radeon_ctx_priority
1211 radv_get_queue_global_priority(const VkDeviceQueueGlobalPriorityCreateInfoEXT *pObj)
1212 {
1213 /* Default to MEDIUM when a specific global priority isn't requested */
1214 if (!pObj)
1215 return RADEON_CTX_PRIORITY_MEDIUM;
1216
1217 switch(pObj->globalPriority) {
1218 case VK_QUEUE_GLOBAL_PRIORITY_REALTIME_EXT:
1219 return RADEON_CTX_PRIORITY_REALTIME;
1220 case VK_QUEUE_GLOBAL_PRIORITY_HIGH_EXT:
1221 return RADEON_CTX_PRIORITY_HIGH;
1222 case VK_QUEUE_GLOBAL_PRIORITY_MEDIUM_EXT:
1223 return RADEON_CTX_PRIORITY_MEDIUM;
1224 case VK_QUEUE_GLOBAL_PRIORITY_LOW_EXT:
1225 return RADEON_CTX_PRIORITY_LOW;
1226 default:
1227 unreachable("Illegal global priority value");
1228 return RADEON_CTX_PRIORITY_INVALID;
1229 }
1230 }
1231
1232 static int
1233 radv_queue_init(struct radv_device *device, struct radv_queue *queue,
1234 uint32_t queue_family_index, int idx,
1235 VkDeviceQueueCreateFlags flags,
1236 const VkDeviceQueueGlobalPriorityCreateInfoEXT *global_priority)
1237 {
1238 queue->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
1239 queue->device = device;
1240 queue->queue_family_index = queue_family_index;
1241 queue->queue_idx = idx;
1242 queue->priority = radv_get_queue_global_priority(global_priority);
1243 queue->flags = flags;
1244
1245 queue->hw_ctx = device->ws->ctx_create(device->ws, queue->priority);
1246 if (!queue->hw_ctx)
1247 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1248
1249 return VK_SUCCESS;
1250 }
1251
1252 static void
1253 radv_queue_finish(struct radv_queue *queue)
1254 {
1255 if (queue->hw_ctx)
1256 queue->device->ws->ctx_destroy(queue->hw_ctx);
1257
1258 if (queue->initial_full_flush_preamble_cs)
1259 queue->device->ws->cs_destroy(queue->initial_full_flush_preamble_cs);
1260 if (queue->initial_preamble_cs)
1261 queue->device->ws->cs_destroy(queue->initial_preamble_cs);
1262 if (queue->continue_preamble_cs)
1263 queue->device->ws->cs_destroy(queue->continue_preamble_cs);
1264 if (queue->descriptor_bo)
1265 queue->device->ws->buffer_destroy(queue->descriptor_bo);
1266 if (queue->scratch_bo)
1267 queue->device->ws->buffer_destroy(queue->scratch_bo);
1268 if (queue->esgs_ring_bo)
1269 queue->device->ws->buffer_destroy(queue->esgs_ring_bo);
1270 if (queue->gsvs_ring_bo)
1271 queue->device->ws->buffer_destroy(queue->gsvs_ring_bo);
1272 if (queue->tess_rings_bo)
1273 queue->device->ws->buffer_destroy(queue->tess_rings_bo);
1274 if (queue->compute_scratch_bo)
1275 queue->device->ws->buffer_destroy(queue->compute_scratch_bo);
1276 }
1277
1278 static void
1279 radv_bo_list_init(struct radv_bo_list *bo_list)
1280 {
1281 pthread_mutex_init(&bo_list->mutex, NULL);
1282 bo_list->list.count = bo_list->capacity = 0;
1283 bo_list->list.bos = NULL;
1284 }
1285
1286 static void
1287 radv_bo_list_finish(struct radv_bo_list *bo_list)
1288 {
1289 free(bo_list->list.bos);
1290 pthread_mutex_destroy(&bo_list->mutex);
1291 }
1292
1293 static VkResult radv_bo_list_add(struct radv_device *device,
1294 struct radeon_winsys_bo *bo)
1295 {
1296 struct radv_bo_list *bo_list = &device->bo_list;
1297
1298 if (unlikely(!device->use_global_bo_list))
1299 return VK_SUCCESS;
1300
1301 pthread_mutex_lock(&bo_list->mutex);
1302 if (bo_list->list.count == bo_list->capacity) {
1303 unsigned capacity = MAX2(4, bo_list->capacity * 2);
1304 void *data = realloc(bo_list->list.bos, capacity * sizeof(struct radeon_winsys_bo*));
1305
1306 if (!data) {
1307 pthread_mutex_unlock(&bo_list->mutex);
1308 return VK_ERROR_OUT_OF_HOST_MEMORY;
1309 }
1310
1311 bo_list->list.bos = (struct radeon_winsys_bo**)data;
1312 bo_list->capacity = capacity;
1313 }
1314
1315 bo_list->list.bos[bo_list->list.count++] = bo;
1316 pthread_mutex_unlock(&bo_list->mutex);
1317 return VK_SUCCESS;
1318 }
1319
1320 static void radv_bo_list_remove(struct radv_device *device,
1321 struct radeon_winsys_bo *bo)
1322 {
1323 struct radv_bo_list *bo_list = &device->bo_list;
1324
1325 if (unlikely(!device->use_global_bo_list))
1326 return;
1327
1328 pthread_mutex_lock(&bo_list->mutex);
1329 for(unsigned i = 0; i < bo_list->list.count; ++i) {
1330 if (bo_list->list.bos[i] == bo) {
1331 bo_list->list.bos[i] = bo_list->list.bos[bo_list->list.count - 1];
1332 --bo_list->list.count;
1333 break;
1334 }
1335 }
1336 pthread_mutex_unlock(&bo_list->mutex);
1337 }
1338
1339 static void
1340 radv_device_init_gs_info(struct radv_device *device)
1341 {
1342 device->gs_table_depth = ac_get_gs_table_depth(device->physical_device->rad_info.chip_class,
1343 device->physical_device->rad_info.family);
1344 }
1345
1346 static int radv_get_device_extension_index(const char *name)
1347 {
1348 for (unsigned i = 0; i < RADV_DEVICE_EXTENSION_COUNT; ++i) {
1349 if (strcmp(name, radv_device_extensions[i].extensionName) == 0)
1350 return i;
1351 }
1352 return -1;
1353 }
1354
1355 VkResult radv_CreateDevice(
1356 VkPhysicalDevice physicalDevice,
1357 const VkDeviceCreateInfo* pCreateInfo,
1358 const VkAllocationCallbacks* pAllocator,
1359 VkDevice* pDevice)
1360 {
1361 RADV_FROM_HANDLE(radv_physical_device, physical_device, physicalDevice);
1362 VkResult result;
1363 struct radv_device *device;
1364
1365 bool keep_shader_info = false;
1366
1367 /* Check enabled features */
1368 if (pCreateInfo->pEnabledFeatures) {
1369 VkPhysicalDeviceFeatures supported_features;
1370 radv_GetPhysicalDeviceFeatures(physicalDevice, &supported_features);
1371 VkBool32 *supported_feature = (VkBool32 *)&supported_features;
1372 VkBool32 *enabled_feature = (VkBool32 *)pCreateInfo->pEnabledFeatures;
1373 unsigned num_features = sizeof(VkPhysicalDeviceFeatures) / sizeof(VkBool32);
1374 for (uint32_t i = 0; i < num_features; i++) {
1375 if (enabled_feature[i] && !supported_feature[i])
1376 return vk_error(VK_ERROR_FEATURE_NOT_PRESENT);
1377 }
1378 }
1379
1380 device = vk_zalloc2(&physical_device->instance->alloc, pAllocator,
1381 sizeof(*device), 8,
1382 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
1383 if (!device)
1384 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1385
1386 device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
1387 device->instance = physical_device->instance;
1388 device->physical_device = physical_device;
1389
1390 device->ws = physical_device->ws;
1391 if (pAllocator)
1392 device->alloc = *pAllocator;
1393 else
1394 device->alloc = physical_device->instance->alloc;
1395
1396 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
1397 const char *ext_name = pCreateInfo->ppEnabledExtensionNames[i];
1398 int index = radv_get_device_extension_index(ext_name);
1399 if (index < 0 || !physical_device->supported_extensions.extensions[index]) {
1400 vk_free(&device->alloc, device);
1401 return vk_error(VK_ERROR_EXTENSION_NOT_PRESENT);
1402 }
1403
1404 device->enabled_extensions.extensions[index] = true;
1405 }
1406
1407 keep_shader_info = device->enabled_extensions.AMD_shader_info;
1408
1409 /* With update after bind we can't attach bo's to the command buffer
1410 * from the descriptor set anymore, so we have to use a global BO list.
1411 */
1412 device->use_global_bo_list =
1413 device->enabled_extensions.EXT_descriptor_indexing;
1414
1415 mtx_init(&device->shader_slab_mutex, mtx_plain);
1416 list_inithead(&device->shader_slabs);
1417
1418 radv_bo_list_init(&device->bo_list);
1419
1420 for (unsigned i = 0; i < pCreateInfo->queueCreateInfoCount; i++) {
1421 const VkDeviceQueueCreateInfo *queue_create = &pCreateInfo->pQueueCreateInfos[i];
1422 uint32_t qfi = queue_create->queueFamilyIndex;
1423 const VkDeviceQueueGlobalPriorityCreateInfoEXT *global_priority =
1424 vk_find_struct_const(queue_create->pNext, DEVICE_QUEUE_GLOBAL_PRIORITY_CREATE_INFO_EXT);
1425
1426 assert(!global_priority || device->physical_device->rad_info.has_ctx_priority);
1427
1428 device->queues[qfi] = vk_alloc(&device->alloc,
1429 queue_create->queueCount * sizeof(struct radv_queue), 8, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
1430 if (!device->queues[qfi]) {
1431 result = VK_ERROR_OUT_OF_HOST_MEMORY;
1432 goto fail;
1433 }
1434
1435 memset(device->queues[qfi], 0, queue_create->queueCount * sizeof(struct radv_queue));
1436
1437 device->queue_count[qfi] = queue_create->queueCount;
1438
1439 for (unsigned q = 0; q < queue_create->queueCount; q++) {
1440 result = radv_queue_init(device, &device->queues[qfi][q],
1441 qfi, q, queue_create->flags,
1442 global_priority);
1443 if (result != VK_SUCCESS)
1444 goto fail;
1445 }
1446 }
1447
1448 device->pbb_allowed = device->physical_device->rad_info.chip_class >= GFX9 &&
1449 (device->instance->perftest_flags & RADV_PERFTEST_BINNING);
1450
1451 /* Disabled and not implemented for now. */
1452 device->dfsm_allowed = device->pbb_allowed && false;
1453
1454 #ifdef ANDROID
1455 device->always_use_syncobj = device->physical_device->rad_info.has_syncobj_wait_for_submit;
1456 #endif
1457
1458 /* The maximum number of scratch waves. Scratch space isn't divided
1459 * evenly between CUs. The number is only a function of the number of CUs.
1460 * We can decrease the constant to decrease the scratch buffer size.
1461 *
1462 * sctx->scratch_waves must be >= the maximum possible size of
1463 * 1 threadgroup, so that the hw doesn't hang from being unable
1464 * to start any.
1465 *
1466 * The recommended value is 4 per CU at most. Higher numbers don't
1467 * bring much benefit, but they still occupy chip resources (think
1468 * async compute). I've seen ~2% performance difference between 4 and 32.
1469 */
1470 uint32_t max_threads_per_block = 2048;
1471 device->scratch_waves = MAX2(32 * physical_device->rad_info.num_good_compute_units,
1472 max_threads_per_block / 64);
1473
1474 device->dispatch_initiator = S_00B800_COMPUTE_SHADER_EN(1);
1475
1476 if (device->physical_device->rad_info.chip_class >= CIK) {
1477 /* If the KMD allows it (there is a KMD hw register for it),
1478 * allow launching waves out-of-order.
1479 */
1480 device->dispatch_initiator |= S_00B800_ORDER_MODE(1);
1481 }
1482
1483 radv_device_init_gs_info(device);
1484
1485 device->tess_offchip_block_dw_size =
1486 device->physical_device->rad_info.family == CHIP_HAWAII ? 4096 : 8192;
1487 device->has_distributed_tess =
1488 device->physical_device->rad_info.chip_class >= VI &&
1489 device->physical_device->rad_info.max_se >= 2;
1490
1491 if (getenv("RADV_TRACE_FILE")) {
1492 const char *filename = getenv("RADV_TRACE_FILE");
1493
1494 keep_shader_info = true;
1495
1496 if (!radv_init_trace(device))
1497 goto fail;
1498
1499 fprintf(stderr, "Trace file will be dumped to %s\n", filename);
1500 radv_dump_enabled_options(device, stderr);
1501 }
1502
1503 device->keep_shader_info = keep_shader_info;
1504
1505 result = radv_device_init_meta(device);
1506 if (result != VK_SUCCESS)
1507 goto fail;
1508
1509 radv_device_init_msaa(device);
1510
1511 for (int family = 0; family < RADV_MAX_QUEUE_FAMILIES; ++family) {
1512 device->empty_cs[family] = device->ws->cs_create(device->ws, family);
1513 switch (family) {
1514 case RADV_QUEUE_GENERAL:
1515 radeon_emit(device->empty_cs[family], PKT3(PKT3_CONTEXT_CONTROL, 1, 0));
1516 radeon_emit(device->empty_cs[family], CONTEXT_CONTROL_LOAD_ENABLE(1));
1517 radeon_emit(device->empty_cs[family], CONTEXT_CONTROL_SHADOW_ENABLE(1));
1518 break;
1519 case RADV_QUEUE_COMPUTE:
1520 radeon_emit(device->empty_cs[family], PKT3(PKT3_NOP, 0, 0));
1521 radeon_emit(device->empty_cs[family], 0);
1522 break;
1523 }
1524 device->ws->cs_finalize(device->empty_cs[family]);
1525 }
1526
1527 if (device->physical_device->rad_info.chip_class >= CIK)
1528 cik_create_gfx_config(device);
1529
1530 VkPipelineCacheCreateInfo ci;
1531 ci.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
1532 ci.pNext = NULL;
1533 ci.flags = 0;
1534 ci.pInitialData = NULL;
1535 ci.initialDataSize = 0;
1536 VkPipelineCache pc;
1537 result = radv_CreatePipelineCache(radv_device_to_handle(device),
1538 &ci, NULL, &pc);
1539 if (result != VK_SUCCESS)
1540 goto fail_meta;
1541
1542 device->mem_cache = radv_pipeline_cache_from_handle(pc);
1543
1544 *pDevice = radv_device_to_handle(device);
1545 return VK_SUCCESS;
1546
1547 fail_meta:
1548 radv_device_finish_meta(device);
1549 fail:
1550 radv_bo_list_finish(&device->bo_list);
1551
1552 if (device->trace_bo)
1553 device->ws->buffer_destroy(device->trace_bo);
1554
1555 if (device->gfx_init)
1556 device->ws->buffer_destroy(device->gfx_init);
1557
1558 for (unsigned i = 0; i < RADV_MAX_QUEUE_FAMILIES; i++) {
1559 for (unsigned q = 0; q < device->queue_count[i]; q++)
1560 radv_queue_finish(&device->queues[i][q]);
1561 if (device->queue_count[i])
1562 vk_free(&device->alloc, device->queues[i]);
1563 }
1564
1565 vk_free(&device->alloc, device);
1566 return result;
1567 }
1568
1569 void radv_DestroyDevice(
1570 VkDevice _device,
1571 const VkAllocationCallbacks* pAllocator)
1572 {
1573 RADV_FROM_HANDLE(radv_device, device, _device);
1574
1575 if (!device)
1576 return;
1577
1578 if (device->trace_bo)
1579 device->ws->buffer_destroy(device->trace_bo);
1580
1581 if (device->gfx_init)
1582 device->ws->buffer_destroy(device->gfx_init);
1583
1584 for (unsigned i = 0; i < RADV_MAX_QUEUE_FAMILIES; i++) {
1585 for (unsigned q = 0; q < device->queue_count[i]; q++)
1586 radv_queue_finish(&device->queues[i][q]);
1587 if (device->queue_count[i])
1588 vk_free(&device->alloc, device->queues[i]);
1589 if (device->empty_cs[i])
1590 device->ws->cs_destroy(device->empty_cs[i]);
1591 }
1592 radv_device_finish_meta(device);
1593
1594 VkPipelineCache pc = radv_pipeline_cache_to_handle(device->mem_cache);
1595 radv_DestroyPipelineCache(radv_device_to_handle(device), pc, NULL);
1596
1597 radv_destroy_shader_slabs(device);
1598
1599 radv_bo_list_finish(&device->bo_list);
1600 vk_free(&device->alloc, device);
1601 }
1602
1603 VkResult radv_EnumerateInstanceLayerProperties(
1604 uint32_t* pPropertyCount,
1605 VkLayerProperties* pProperties)
1606 {
1607 if (pProperties == NULL) {
1608 *pPropertyCount = 0;
1609 return VK_SUCCESS;
1610 }
1611
1612 /* None supported at this time */
1613 return vk_error(VK_ERROR_LAYER_NOT_PRESENT);
1614 }
1615
1616 VkResult radv_EnumerateDeviceLayerProperties(
1617 VkPhysicalDevice physicalDevice,
1618 uint32_t* pPropertyCount,
1619 VkLayerProperties* pProperties)
1620 {
1621 if (pProperties == NULL) {
1622 *pPropertyCount = 0;
1623 return VK_SUCCESS;
1624 }
1625
1626 /* None supported at this time */
1627 return vk_error(VK_ERROR_LAYER_NOT_PRESENT);
1628 }
1629
1630 void radv_GetDeviceQueue2(
1631 VkDevice _device,
1632 const VkDeviceQueueInfo2* pQueueInfo,
1633 VkQueue* pQueue)
1634 {
1635 RADV_FROM_HANDLE(radv_device, device, _device);
1636 struct radv_queue *queue;
1637
1638 queue = &device->queues[pQueueInfo->queueFamilyIndex][pQueueInfo->queueIndex];
1639 if (pQueueInfo->flags != queue->flags) {
1640 /* From the Vulkan 1.1.70 spec:
1641 *
1642 * "The queue returned by vkGetDeviceQueue2 must have the same
1643 * flags value from this structure as that used at device
1644 * creation time in a VkDeviceQueueCreateInfo instance. If no
1645 * matching flags were specified at device creation time then
1646 * pQueue will return VK_NULL_HANDLE."
1647 */
1648 *pQueue = VK_NULL_HANDLE;
1649 return;
1650 }
1651
1652 *pQueue = radv_queue_to_handle(queue);
1653 }
1654
1655 void radv_GetDeviceQueue(
1656 VkDevice _device,
1657 uint32_t queueFamilyIndex,
1658 uint32_t queueIndex,
1659 VkQueue* pQueue)
1660 {
1661 const VkDeviceQueueInfo2 info = (VkDeviceQueueInfo2) {
1662 .sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2,
1663 .queueFamilyIndex = queueFamilyIndex,
1664 .queueIndex = queueIndex
1665 };
1666
1667 radv_GetDeviceQueue2(_device, &info, pQueue);
1668 }
1669
1670 static void
1671 fill_geom_tess_rings(struct radv_queue *queue,
1672 uint32_t *map,
1673 bool add_sample_positions,
1674 uint32_t esgs_ring_size,
1675 struct radeon_winsys_bo *esgs_ring_bo,
1676 uint32_t gsvs_ring_size,
1677 struct radeon_winsys_bo *gsvs_ring_bo,
1678 uint32_t tess_factor_ring_size,
1679 uint32_t tess_offchip_ring_offset,
1680 uint32_t tess_offchip_ring_size,
1681 struct radeon_winsys_bo *tess_rings_bo)
1682 {
1683 uint64_t esgs_va = 0, gsvs_va = 0;
1684 uint64_t tess_va = 0, tess_offchip_va = 0;
1685 uint32_t *desc = &map[4];
1686
1687 if (esgs_ring_bo)
1688 esgs_va = radv_buffer_get_va(esgs_ring_bo);
1689 if (gsvs_ring_bo)
1690 gsvs_va = radv_buffer_get_va(gsvs_ring_bo);
1691 if (tess_rings_bo) {
1692 tess_va = radv_buffer_get_va(tess_rings_bo);
1693 tess_offchip_va = tess_va + tess_offchip_ring_offset;
1694 }
1695
1696 /* stride 0, num records - size, add tid, swizzle, elsize4,
1697 index stride 64 */
1698 desc[0] = esgs_va;
1699 desc[1] = S_008F04_BASE_ADDRESS_HI(esgs_va >> 32) |
1700 S_008F04_STRIDE(0) |
1701 S_008F04_SWIZZLE_ENABLE(true);
1702 desc[2] = esgs_ring_size;
1703 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
1704 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
1705 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
1706 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
1707 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
1708 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32) |
1709 S_008F0C_ELEMENT_SIZE(1) |
1710 S_008F0C_INDEX_STRIDE(3) |
1711 S_008F0C_ADD_TID_ENABLE(true);
1712
1713 desc += 4;
1714 /* GS entry for ES->GS ring */
1715 /* stride 0, num records - size, elsize0,
1716 index stride 0 */
1717 desc[0] = esgs_va;
1718 desc[1] = S_008F04_BASE_ADDRESS_HI(esgs_va >> 32)|
1719 S_008F04_STRIDE(0) |
1720 S_008F04_SWIZZLE_ENABLE(false);
1721 desc[2] = esgs_ring_size;
1722 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
1723 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
1724 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
1725 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
1726 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
1727 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32) |
1728 S_008F0C_ELEMENT_SIZE(0) |
1729 S_008F0C_INDEX_STRIDE(0) |
1730 S_008F0C_ADD_TID_ENABLE(false);
1731
1732 desc += 4;
1733 /* VS entry for GS->VS ring */
1734 /* stride 0, num records - size, elsize0,
1735 index stride 0 */
1736 desc[0] = gsvs_va;
1737 desc[1] = S_008F04_BASE_ADDRESS_HI(gsvs_va >> 32)|
1738 S_008F04_STRIDE(0) |
1739 S_008F04_SWIZZLE_ENABLE(false);
1740 desc[2] = gsvs_ring_size;
1741 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
1742 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
1743 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
1744 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
1745 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
1746 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32) |
1747 S_008F0C_ELEMENT_SIZE(0) |
1748 S_008F0C_INDEX_STRIDE(0) |
1749 S_008F0C_ADD_TID_ENABLE(false);
1750 desc += 4;
1751
1752 /* stride gsvs_itemsize, num records 64
1753 elsize 4, index stride 16 */
1754 /* shader will patch stride and desc[2] */
1755 desc[0] = gsvs_va;
1756 desc[1] = S_008F04_BASE_ADDRESS_HI(gsvs_va >> 32)|
1757 S_008F04_STRIDE(0) |
1758 S_008F04_SWIZZLE_ENABLE(true);
1759 desc[2] = 0;
1760 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
1761 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
1762 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
1763 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
1764 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
1765 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32) |
1766 S_008F0C_ELEMENT_SIZE(1) |
1767 S_008F0C_INDEX_STRIDE(1) |
1768 S_008F0C_ADD_TID_ENABLE(true);
1769 desc += 4;
1770
1771 desc[0] = tess_va;
1772 desc[1] = S_008F04_BASE_ADDRESS_HI(tess_va >> 32) |
1773 S_008F04_STRIDE(0) |
1774 S_008F04_SWIZZLE_ENABLE(false);
1775 desc[2] = tess_factor_ring_size;
1776 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
1777 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
1778 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
1779 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
1780 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
1781 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32) |
1782 S_008F0C_ELEMENT_SIZE(0) |
1783 S_008F0C_INDEX_STRIDE(0) |
1784 S_008F0C_ADD_TID_ENABLE(false);
1785 desc += 4;
1786
1787 desc[0] = tess_offchip_va;
1788 desc[1] = S_008F04_BASE_ADDRESS_HI(tess_offchip_va >> 32) |
1789 S_008F04_STRIDE(0) |
1790 S_008F04_SWIZZLE_ENABLE(false);
1791 desc[2] = tess_offchip_ring_size;
1792 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
1793 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
1794 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
1795 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
1796 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
1797 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32) |
1798 S_008F0C_ELEMENT_SIZE(0) |
1799 S_008F0C_INDEX_STRIDE(0) |
1800 S_008F0C_ADD_TID_ENABLE(false);
1801 desc += 4;
1802
1803 /* add sample positions after all rings */
1804 memcpy(desc, queue->device->sample_locations_1x, 8);
1805 desc += 2;
1806 memcpy(desc, queue->device->sample_locations_2x, 16);
1807 desc += 4;
1808 memcpy(desc, queue->device->sample_locations_4x, 32);
1809 desc += 8;
1810 memcpy(desc, queue->device->sample_locations_8x, 64);
1811 desc += 16;
1812 memcpy(desc, queue->device->sample_locations_16x, 128);
1813 }
1814
1815 static unsigned
1816 radv_get_hs_offchip_param(struct radv_device *device, uint32_t *max_offchip_buffers_p)
1817 {
1818 bool double_offchip_buffers = device->physical_device->rad_info.chip_class >= CIK &&
1819 device->physical_device->rad_info.family != CHIP_CARRIZO &&
1820 device->physical_device->rad_info.family != CHIP_STONEY;
1821 unsigned max_offchip_buffers_per_se = double_offchip_buffers ? 128 : 64;
1822 unsigned max_offchip_buffers = max_offchip_buffers_per_se *
1823 device->physical_device->rad_info.max_se;
1824 unsigned offchip_granularity;
1825 unsigned hs_offchip_param;
1826 switch (device->tess_offchip_block_dw_size) {
1827 default:
1828 assert(0);
1829 /* fall through */
1830 case 8192:
1831 offchip_granularity = V_03093C_X_8K_DWORDS;
1832 break;
1833 case 4096:
1834 offchip_granularity = V_03093C_X_4K_DWORDS;
1835 break;
1836 }
1837
1838 switch (device->physical_device->rad_info.chip_class) {
1839 case SI:
1840 max_offchip_buffers = MIN2(max_offchip_buffers, 126);
1841 break;
1842 case CIK:
1843 case VI:
1844 case GFX9:
1845 default:
1846 max_offchip_buffers = MIN2(max_offchip_buffers, 508);
1847 break;
1848 }
1849
1850 *max_offchip_buffers_p = max_offchip_buffers;
1851 if (device->physical_device->rad_info.chip_class >= CIK) {
1852 if (device->physical_device->rad_info.chip_class >= VI)
1853 --max_offchip_buffers;
1854 hs_offchip_param =
1855 S_03093C_OFFCHIP_BUFFERING(max_offchip_buffers) |
1856 S_03093C_OFFCHIP_GRANULARITY(offchip_granularity);
1857 } else {
1858 hs_offchip_param =
1859 S_0089B0_OFFCHIP_BUFFERING(max_offchip_buffers);
1860 }
1861 return hs_offchip_param;
1862 }
1863
1864 static void
1865 radv_emit_gs_ring_sizes(struct radv_queue *queue, struct radeon_winsys_cs *cs,
1866 struct radeon_winsys_bo *esgs_ring_bo,
1867 uint32_t esgs_ring_size,
1868 struct radeon_winsys_bo *gsvs_ring_bo,
1869 uint32_t gsvs_ring_size)
1870 {
1871 if (!esgs_ring_bo && !gsvs_ring_bo)
1872 return;
1873
1874 if (esgs_ring_bo)
1875 radv_cs_add_buffer(queue->device->ws, cs, esgs_ring_bo, 8);
1876
1877 if (gsvs_ring_bo)
1878 radv_cs_add_buffer(queue->device->ws, cs, gsvs_ring_bo, 8);
1879
1880 if (queue->device->physical_device->rad_info.chip_class >= CIK) {
1881 radeon_set_uconfig_reg_seq(cs, R_030900_VGT_ESGS_RING_SIZE, 2);
1882 radeon_emit(cs, esgs_ring_size >> 8);
1883 radeon_emit(cs, gsvs_ring_size >> 8);
1884 } else {
1885 radeon_set_config_reg_seq(cs, R_0088C8_VGT_ESGS_RING_SIZE, 2);
1886 radeon_emit(cs, esgs_ring_size >> 8);
1887 radeon_emit(cs, gsvs_ring_size >> 8);
1888 }
1889 }
1890
1891 static void
1892 radv_emit_tess_factor_ring(struct radv_queue *queue, struct radeon_winsys_cs *cs,
1893 unsigned hs_offchip_param, unsigned tf_ring_size,
1894 struct radeon_winsys_bo *tess_rings_bo)
1895 {
1896 uint64_t tf_va;
1897
1898 if (!tess_rings_bo)
1899 return;
1900
1901 tf_va = radv_buffer_get_va(tess_rings_bo);
1902
1903 radv_cs_add_buffer(queue->device->ws, cs, tess_rings_bo, 8);
1904
1905 if (queue->device->physical_device->rad_info.chip_class >= CIK) {
1906 radeon_set_uconfig_reg(cs, R_030938_VGT_TF_RING_SIZE,
1907 S_030938_SIZE(tf_ring_size / 4));
1908 radeon_set_uconfig_reg(cs, R_030940_VGT_TF_MEMORY_BASE,
1909 tf_va >> 8);
1910 if (queue->device->physical_device->rad_info.chip_class >= GFX9) {
1911 radeon_set_uconfig_reg(cs, R_030944_VGT_TF_MEMORY_BASE_HI,
1912 S_030944_BASE_HI(tf_va >> 40));
1913 }
1914 radeon_set_uconfig_reg(cs, R_03093C_VGT_HS_OFFCHIP_PARAM,
1915 hs_offchip_param);
1916 } else {
1917 radeon_set_config_reg(cs, R_008988_VGT_TF_RING_SIZE,
1918 S_008988_SIZE(tf_ring_size / 4));
1919 radeon_set_config_reg(cs, R_0089B8_VGT_TF_MEMORY_BASE,
1920 tf_va >> 8);
1921 radeon_set_config_reg(cs, R_0089B0_VGT_HS_OFFCHIP_PARAM,
1922 hs_offchip_param);
1923 }
1924 }
1925
1926 static void
1927 radv_emit_compute_scratch(struct radv_queue *queue, struct radeon_winsys_cs *cs,
1928 struct radeon_winsys_bo *compute_scratch_bo)
1929 {
1930 uint64_t scratch_va;
1931
1932 if (!compute_scratch_bo)
1933 return;
1934
1935 scratch_va = radv_buffer_get_va(compute_scratch_bo);
1936
1937 radv_cs_add_buffer(queue->device->ws, cs, compute_scratch_bo, 8);
1938
1939 radeon_set_sh_reg_seq(cs, R_00B900_COMPUTE_USER_DATA_0, 2);
1940 radeon_emit(cs, scratch_va);
1941 radeon_emit(cs, S_008F04_BASE_ADDRESS_HI(scratch_va >> 32) |
1942 S_008F04_SWIZZLE_ENABLE(1));
1943 }
1944
1945 static void
1946 radv_emit_global_shader_pointers(struct radv_queue *queue,
1947 struct radeon_winsys_cs *cs,
1948 struct radeon_winsys_bo *descriptor_bo)
1949 {
1950 uint64_t va;
1951
1952 if (!descriptor_bo)
1953 return;
1954
1955 va = radv_buffer_get_va(descriptor_bo);
1956
1957 radv_cs_add_buffer(queue->device->ws, cs, descriptor_bo, 8);
1958
1959 if (queue->device->physical_device->rad_info.chip_class >= GFX9) {
1960 uint32_t regs[] = {R_00B030_SPI_SHADER_USER_DATA_PS_0,
1961 R_00B130_SPI_SHADER_USER_DATA_VS_0,
1962 R_00B208_SPI_SHADER_USER_DATA_ADDR_LO_GS,
1963 R_00B408_SPI_SHADER_USER_DATA_ADDR_LO_HS};
1964
1965 for (int i = 0; i < ARRAY_SIZE(regs); ++i) {
1966 radv_emit_shader_pointer(cs, regs[i], va);
1967 }
1968 } else {
1969 uint32_t regs[] = {R_00B030_SPI_SHADER_USER_DATA_PS_0,
1970 R_00B130_SPI_SHADER_USER_DATA_VS_0,
1971 R_00B230_SPI_SHADER_USER_DATA_GS_0,
1972 R_00B330_SPI_SHADER_USER_DATA_ES_0,
1973 R_00B430_SPI_SHADER_USER_DATA_HS_0,
1974 R_00B530_SPI_SHADER_USER_DATA_LS_0};
1975
1976 for (int i = 0; i < ARRAY_SIZE(regs); ++i) {
1977 radv_emit_shader_pointer(cs, regs[i], va);
1978 }
1979 }
1980 }
1981
1982 static VkResult
1983 radv_get_preamble_cs(struct radv_queue *queue,
1984 uint32_t scratch_size,
1985 uint32_t compute_scratch_size,
1986 uint32_t esgs_ring_size,
1987 uint32_t gsvs_ring_size,
1988 bool needs_tess_rings,
1989 bool needs_sample_positions,
1990 struct radeon_winsys_cs **initial_full_flush_preamble_cs,
1991 struct radeon_winsys_cs **initial_preamble_cs,
1992 struct radeon_winsys_cs **continue_preamble_cs)
1993 {
1994 struct radeon_winsys_bo *scratch_bo = NULL;
1995 struct radeon_winsys_bo *descriptor_bo = NULL;
1996 struct radeon_winsys_bo *compute_scratch_bo = NULL;
1997 struct radeon_winsys_bo *esgs_ring_bo = NULL;
1998 struct radeon_winsys_bo *gsvs_ring_bo = NULL;
1999 struct radeon_winsys_bo *tess_rings_bo = NULL;
2000 struct radeon_winsys_cs *dest_cs[3] = {0};
2001 bool add_tess_rings = false, add_sample_positions = false;
2002 unsigned tess_factor_ring_size = 0, tess_offchip_ring_size = 0;
2003 unsigned max_offchip_buffers;
2004 unsigned hs_offchip_param = 0;
2005 unsigned tess_offchip_ring_offset;
2006 uint32_t ring_bo_flags = RADEON_FLAG_NO_CPU_ACCESS | RADEON_FLAG_NO_INTERPROCESS_SHARING;
2007 if (!queue->has_tess_rings) {
2008 if (needs_tess_rings)
2009 add_tess_rings = true;
2010 }
2011 if (!queue->has_sample_positions) {
2012 if (needs_sample_positions)
2013 add_sample_positions = true;
2014 }
2015 tess_factor_ring_size = 32768 * queue->device->physical_device->rad_info.max_se;
2016 hs_offchip_param = radv_get_hs_offchip_param(queue->device,
2017 &max_offchip_buffers);
2018 tess_offchip_ring_offset = align(tess_factor_ring_size, 64 * 1024);
2019 tess_offchip_ring_size = max_offchip_buffers *
2020 queue->device->tess_offchip_block_dw_size * 4;
2021
2022 if (scratch_size <= queue->scratch_size &&
2023 compute_scratch_size <= queue->compute_scratch_size &&
2024 esgs_ring_size <= queue->esgs_ring_size &&
2025 gsvs_ring_size <= queue->gsvs_ring_size &&
2026 !add_tess_rings && !add_sample_positions &&
2027 queue->initial_preamble_cs) {
2028 *initial_full_flush_preamble_cs = queue->initial_full_flush_preamble_cs;
2029 *initial_preamble_cs = queue->initial_preamble_cs;
2030 *continue_preamble_cs = queue->continue_preamble_cs;
2031 if (!scratch_size && !compute_scratch_size && !esgs_ring_size && !gsvs_ring_size)
2032 *continue_preamble_cs = NULL;
2033 return VK_SUCCESS;
2034 }
2035
2036 if (scratch_size > queue->scratch_size) {
2037 scratch_bo = queue->device->ws->buffer_create(queue->device->ws,
2038 scratch_size,
2039 4096,
2040 RADEON_DOMAIN_VRAM,
2041 ring_bo_flags);
2042 if (!scratch_bo)
2043 goto fail;
2044 } else
2045 scratch_bo = queue->scratch_bo;
2046
2047 if (compute_scratch_size > queue->compute_scratch_size) {
2048 compute_scratch_bo = queue->device->ws->buffer_create(queue->device->ws,
2049 compute_scratch_size,
2050 4096,
2051 RADEON_DOMAIN_VRAM,
2052 ring_bo_flags);
2053 if (!compute_scratch_bo)
2054 goto fail;
2055
2056 } else
2057 compute_scratch_bo = queue->compute_scratch_bo;
2058
2059 if (esgs_ring_size > queue->esgs_ring_size) {
2060 esgs_ring_bo = queue->device->ws->buffer_create(queue->device->ws,
2061 esgs_ring_size,
2062 4096,
2063 RADEON_DOMAIN_VRAM,
2064 ring_bo_flags);
2065 if (!esgs_ring_bo)
2066 goto fail;
2067 } else {
2068 esgs_ring_bo = queue->esgs_ring_bo;
2069 esgs_ring_size = queue->esgs_ring_size;
2070 }
2071
2072 if (gsvs_ring_size > queue->gsvs_ring_size) {
2073 gsvs_ring_bo = queue->device->ws->buffer_create(queue->device->ws,
2074 gsvs_ring_size,
2075 4096,
2076 RADEON_DOMAIN_VRAM,
2077 ring_bo_flags);
2078 if (!gsvs_ring_bo)
2079 goto fail;
2080 } else {
2081 gsvs_ring_bo = queue->gsvs_ring_bo;
2082 gsvs_ring_size = queue->gsvs_ring_size;
2083 }
2084
2085 if (add_tess_rings) {
2086 tess_rings_bo = queue->device->ws->buffer_create(queue->device->ws,
2087 tess_offchip_ring_offset + tess_offchip_ring_size,
2088 256,
2089 RADEON_DOMAIN_VRAM,
2090 ring_bo_flags);
2091 if (!tess_rings_bo)
2092 goto fail;
2093 } else {
2094 tess_rings_bo = queue->tess_rings_bo;
2095 }
2096
2097 if (scratch_bo != queue->scratch_bo ||
2098 esgs_ring_bo != queue->esgs_ring_bo ||
2099 gsvs_ring_bo != queue->gsvs_ring_bo ||
2100 tess_rings_bo != queue->tess_rings_bo ||
2101 add_sample_positions) {
2102 uint32_t size = 0;
2103 if (gsvs_ring_bo || esgs_ring_bo ||
2104 tess_rings_bo || add_sample_positions) {
2105 size = 112; /* 2 dword + 2 padding + 4 dword * 6 */
2106 if (add_sample_positions)
2107 size += 256; /* 32+16+8+4+2+1 samples * 4 * 2 = 248 bytes. */
2108 }
2109 else if (scratch_bo)
2110 size = 8; /* 2 dword */
2111
2112 descriptor_bo = queue->device->ws->buffer_create(queue->device->ws,
2113 size,
2114 4096,
2115 RADEON_DOMAIN_VRAM,
2116 RADEON_FLAG_CPU_ACCESS |
2117 RADEON_FLAG_NO_INTERPROCESS_SHARING |
2118 RADEON_FLAG_READ_ONLY);
2119 if (!descriptor_bo)
2120 goto fail;
2121 } else
2122 descriptor_bo = queue->descriptor_bo;
2123
2124 for(int i = 0; i < 3; ++i) {
2125 struct radeon_winsys_cs *cs = NULL;
2126 cs = queue->device->ws->cs_create(queue->device->ws,
2127 queue->queue_family_index ? RING_COMPUTE : RING_GFX);
2128 if (!cs)
2129 goto fail;
2130
2131 dest_cs[i] = cs;
2132
2133 if (scratch_bo)
2134 radv_cs_add_buffer(queue->device->ws, cs, scratch_bo, 8);
2135
2136 if (descriptor_bo != queue->descriptor_bo) {
2137 uint32_t *map = (uint32_t*)queue->device->ws->buffer_map(descriptor_bo);
2138
2139 if (scratch_bo) {
2140 uint64_t scratch_va = radv_buffer_get_va(scratch_bo);
2141 uint32_t rsrc1 = S_008F04_BASE_ADDRESS_HI(scratch_va >> 32) |
2142 S_008F04_SWIZZLE_ENABLE(1);
2143 map[0] = scratch_va;
2144 map[1] = rsrc1;
2145 }
2146
2147 if (esgs_ring_bo || gsvs_ring_bo || tess_rings_bo ||
2148 add_sample_positions)
2149 fill_geom_tess_rings(queue, map, add_sample_positions,
2150 esgs_ring_size, esgs_ring_bo,
2151 gsvs_ring_size, gsvs_ring_bo,
2152 tess_factor_ring_size,
2153 tess_offchip_ring_offset,
2154 tess_offchip_ring_size,
2155 tess_rings_bo);
2156
2157 queue->device->ws->buffer_unmap(descriptor_bo);
2158 }
2159
2160 if (esgs_ring_bo || gsvs_ring_bo || tess_rings_bo) {
2161 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
2162 radeon_emit(cs, EVENT_TYPE(V_028A90_VS_PARTIAL_FLUSH) | EVENT_INDEX(4));
2163 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
2164 radeon_emit(cs, EVENT_TYPE(V_028A90_VGT_FLUSH) | EVENT_INDEX(0));
2165 }
2166
2167 radv_emit_gs_ring_sizes(queue, cs, esgs_ring_bo, esgs_ring_size,
2168 gsvs_ring_bo, gsvs_ring_size);
2169 radv_emit_tess_factor_ring(queue, cs, hs_offchip_param,
2170 tess_factor_ring_size, tess_rings_bo);
2171 radv_emit_global_shader_pointers(queue, cs, descriptor_bo);
2172 radv_emit_compute_scratch(queue, cs, compute_scratch_bo);
2173
2174 if (i == 0) {
2175 si_cs_emit_cache_flush(cs,
2176 queue->device->physical_device->rad_info.chip_class,
2177 NULL, 0,
2178 queue->queue_family_index == RING_COMPUTE &&
2179 queue->device->physical_device->rad_info.chip_class >= CIK,
2180 (queue->queue_family_index == RADV_QUEUE_COMPUTE ? RADV_CMD_FLAG_CS_PARTIAL_FLUSH : (RADV_CMD_FLAG_CS_PARTIAL_FLUSH | RADV_CMD_FLAG_PS_PARTIAL_FLUSH)) |
2181 RADV_CMD_FLAG_INV_ICACHE |
2182 RADV_CMD_FLAG_INV_SMEM_L1 |
2183 RADV_CMD_FLAG_INV_VMEM_L1 |
2184 RADV_CMD_FLAG_INV_GLOBAL_L2);
2185 } else if (i == 1) {
2186 si_cs_emit_cache_flush(cs,
2187 queue->device->physical_device->rad_info.chip_class,
2188 NULL, 0,
2189 queue->queue_family_index == RING_COMPUTE &&
2190 queue->device->physical_device->rad_info.chip_class >= CIK,
2191 RADV_CMD_FLAG_INV_ICACHE |
2192 RADV_CMD_FLAG_INV_SMEM_L1 |
2193 RADV_CMD_FLAG_INV_VMEM_L1 |
2194 RADV_CMD_FLAG_INV_GLOBAL_L2);
2195 }
2196
2197 if (!queue->device->ws->cs_finalize(cs))
2198 goto fail;
2199 }
2200
2201 if (queue->initial_full_flush_preamble_cs)
2202 queue->device->ws->cs_destroy(queue->initial_full_flush_preamble_cs);
2203
2204 if (queue->initial_preamble_cs)
2205 queue->device->ws->cs_destroy(queue->initial_preamble_cs);
2206
2207 if (queue->continue_preamble_cs)
2208 queue->device->ws->cs_destroy(queue->continue_preamble_cs);
2209
2210 queue->initial_full_flush_preamble_cs = dest_cs[0];
2211 queue->initial_preamble_cs = dest_cs[1];
2212 queue->continue_preamble_cs = dest_cs[2];
2213
2214 if (scratch_bo != queue->scratch_bo) {
2215 if (queue->scratch_bo)
2216 queue->device->ws->buffer_destroy(queue->scratch_bo);
2217 queue->scratch_bo = scratch_bo;
2218 queue->scratch_size = scratch_size;
2219 }
2220
2221 if (compute_scratch_bo != queue->compute_scratch_bo) {
2222 if (queue->compute_scratch_bo)
2223 queue->device->ws->buffer_destroy(queue->compute_scratch_bo);
2224 queue->compute_scratch_bo = compute_scratch_bo;
2225 queue->compute_scratch_size = compute_scratch_size;
2226 }
2227
2228 if (esgs_ring_bo != queue->esgs_ring_bo) {
2229 if (queue->esgs_ring_bo)
2230 queue->device->ws->buffer_destroy(queue->esgs_ring_bo);
2231 queue->esgs_ring_bo = esgs_ring_bo;
2232 queue->esgs_ring_size = esgs_ring_size;
2233 }
2234
2235 if (gsvs_ring_bo != queue->gsvs_ring_bo) {
2236 if (queue->gsvs_ring_bo)
2237 queue->device->ws->buffer_destroy(queue->gsvs_ring_bo);
2238 queue->gsvs_ring_bo = gsvs_ring_bo;
2239 queue->gsvs_ring_size = gsvs_ring_size;
2240 }
2241
2242 if (tess_rings_bo != queue->tess_rings_bo) {
2243 queue->tess_rings_bo = tess_rings_bo;
2244 queue->has_tess_rings = true;
2245 }
2246
2247 if (descriptor_bo != queue->descriptor_bo) {
2248 if (queue->descriptor_bo)
2249 queue->device->ws->buffer_destroy(queue->descriptor_bo);
2250
2251 queue->descriptor_bo = descriptor_bo;
2252 }
2253
2254 if (add_sample_positions)
2255 queue->has_sample_positions = true;
2256
2257 *initial_full_flush_preamble_cs = queue->initial_full_flush_preamble_cs;
2258 *initial_preamble_cs = queue->initial_preamble_cs;
2259 *continue_preamble_cs = queue->continue_preamble_cs;
2260 if (!scratch_size && !compute_scratch_size && !esgs_ring_size && !gsvs_ring_size)
2261 *continue_preamble_cs = NULL;
2262 return VK_SUCCESS;
2263 fail:
2264 for (int i = 0; i < ARRAY_SIZE(dest_cs); ++i)
2265 if (dest_cs[i])
2266 queue->device->ws->cs_destroy(dest_cs[i]);
2267 if (descriptor_bo && descriptor_bo != queue->descriptor_bo)
2268 queue->device->ws->buffer_destroy(descriptor_bo);
2269 if (scratch_bo && scratch_bo != queue->scratch_bo)
2270 queue->device->ws->buffer_destroy(scratch_bo);
2271 if (compute_scratch_bo && compute_scratch_bo != queue->compute_scratch_bo)
2272 queue->device->ws->buffer_destroy(compute_scratch_bo);
2273 if (esgs_ring_bo && esgs_ring_bo != queue->esgs_ring_bo)
2274 queue->device->ws->buffer_destroy(esgs_ring_bo);
2275 if (gsvs_ring_bo && gsvs_ring_bo != queue->gsvs_ring_bo)
2276 queue->device->ws->buffer_destroy(gsvs_ring_bo);
2277 if (tess_rings_bo && tess_rings_bo != queue->tess_rings_bo)
2278 queue->device->ws->buffer_destroy(tess_rings_bo);
2279 return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
2280 }
2281
2282 static VkResult radv_alloc_sem_counts(struct radv_winsys_sem_counts *counts,
2283 int num_sems,
2284 const VkSemaphore *sems,
2285 VkFence _fence,
2286 bool reset_temp)
2287 {
2288 int syncobj_idx = 0, sem_idx = 0;
2289
2290 if (num_sems == 0 && _fence == VK_NULL_HANDLE)
2291 return VK_SUCCESS;
2292
2293 for (uint32_t i = 0; i < num_sems; i++) {
2294 RADV_FROM_HANDLE(radv_semaphore, sem, sems[i]);
2295
2296 if (sem->temp_syncobj || sem->syncobj)
2297 counts->syncobj_count++;
2298 else
2299 counts->sem_count++;
2300 }
2301
2302 if (_fence != VK_NULL_HANDLE) {
2303 RADV_FROM_HANDLE(radv_fence, fence, _fence);
2304 if (fence->temp_syncobj || fence->syncobj)
2305 counts->syncobj_count++;
2306 }
2307
2308 if (counts->syncobj_count) {
2309 counts->syncobj = (uint32_t *)malloc(sizeof(uint32_t) * counts->syncobj_count);
2310 if (!counts->syncobj)
2311 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
2312 }
2313
2314 if (counts->sem_count) {
2315 counts->sem = (struct radeon_winsys_sem **)malloc(sizeof(struct radeon_winsys_sem *) * counts->sem_count);
2316 if (!counts->sem) {
2317 free(counts->syncobj);
2318 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
2319 }
2320 }
2321
2322 for (uint32_t i = 0; i < num_sems; i++) {
2323 RADV_FROM_HANDLE(radv_semaphore, sem, sems[i]);
2324
2325 if (sem->temp_syncobj) {
2326 counts->syncobj[syncobj_idx++] = sem->temp_syncobj;
2327 }
2328 else if (sem->syncobj)
2329 counts->syncobj[syncobj_idx++] = sem->syncobj;
2330 else {
2331 assert(sem->sem);
2332 counts->sem[sem_idx++] = sem->sem;
2333 }
2334 }
2335
2336 if (_fence != VK_NULL_HANDLE) {
2337 RADV_FROM_HANDLE(radv_fence, fence, _fence);
2338 if (fence->temp_syncobj)
2339 counts->syncobj[syncobj_idx++] = fence->temp_syncobj;
2340 else if (fence->syncobj)
2341 counts->syncobj[syncobj_idx++] = fence->syncobj;
2342 }
2343
2344 return VK_SUCCESS;
2345 }
2346
2347 void radv_free_sem_info(struct radv_winsys_sem_info *sem_info)
2348 {
2349 free(sem_info->wait.syncobj);
2350 free(sem_info->wait.sem);
2351 free(sem_info->signal.syncobj);
2352 free(sem_info->signal.sem);
2353 }
2354
2355
2356 static void radv_free_temp_syncobjs(struct radv_device *device,
2357 int num_sems,
2358 const VkSemaphore *sems)
2359 {
2360 for (uint32_t i = 0; i < num_sems; i++) {
2361 RADV_FROM_HANDLE(radv_semaphore, sem, sems[i]);
2362
2363 if (sem->temp_syncobj) {
2364 device->ws->destroy_syncobj(device->ws, sem->temp_syncobj);
2365 sem->temp_syncobj = 0;
2366 }
2367 }
2368 }
2369
2370 VkResult radv_alloc_sem_info(struct radv_winsys_sem_info *sem_info,
2371 int num_wait_sems,
2372 const VkSemaphore *wait_sems,
2373 int num_signal_sems,
2374 const VkSemaphore *signal_sems,
2375 VkFence fence)
2376 {
2377 VkResult ret;
2378 memset(sem_info, 0, sizeof(*sem_info));
2379
2380 ret = radv_alloc_sem_counts(&sem_info->wait, num_wait_sems, wait_sems, VK_NULL_HANDLE, true);
2381 if (ret)
2382 return ret;
2383 ret = radv_alloc_sem_counts(&sem_info->signal, num_signal_sems, signal_sems, fence, false);
2384 if (ret)
2385 radv_free_sem_info(sem_info);
2386
2387 /* caller can override these */
2388 sem_info->cs_emit_wait = true;
2389 sem_info->cs_emit_signal = true;
2390 return ret;
2391 }
2392
2393 /* Signals fence as soon as all the work currently put on queue is done. */
2394 static VkResult radv_signal_fence(struct radv_queue *queue,
2395 struct radv_fence *fence)
2396 {
2397 int ret;
2398 VkResult result;
2399 struct radv_winsys_sem_info sem_info;
2400
2401 result = radv_alloc_sem_info(&sem_info, 0, NULL, 0, NULL,
2402 radv_fence_to_handle(fence));
2403 if (result != VK_SUCCESS)
2404 return result;
2405
2406 ret = queue->device->ws->cs_submit(queue->hw_ctx, queue->queue_idx,
2407 &queue->device->empty_cs[queue->queue_family_index],
2408 1, NULL, NULL, &sem_info, NULL,
2409 false, fence->fence);
2410 radv_free_sem_info(&sem_info);
2411
2412 /* TODO: find a better error */
2413 if (ret)
2414 return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
2415
2416 return VK_SUCCESS;
2417 }
2418
2419 VkResult radv_QueueSubmit(
2420 VkQueue _queue,
2421 uint32_t submitCount,
2422 const VkSubmitInfo* pSubmits,
2423 VkFence _fence)
2424 {
2425 RADV_FROM_HANDLE(radv_queue, queue, _queue);
2426 RADV_FROM_HANDLE(radv_fence, fence, _fence);
2427 struct radeon_winsys_fence *base_fence = fence ? fence->fence : NULL;
2428 struct radeon_winsys_ctx *ctx = queue->hw_ctx;
2429 int ret;
2430 uint32_t max_cs_submission = queue->device->trace_bo ? 1 : UINT32_MAX;
2431 uint32_t scratch_size = 0;
2432 uint32_t compute_scratch_size = 0;
2433 uint32_t esgs_ring_size = 0, gsvs_ring_size = 0;
2434 struct radeon_winsys_cs *initial_preamble_cs = NULL, *initial_flush_preamble_cs = NULL, *continue_preamble_cs = NULL;
2435 VkResult result;
2436 bool fence_emitted = false;
2437 bool tess_rings_needed = false;
2438 bool sample_positions_needed = false;
2439
2440 /* Do this first so failing to allocate scratch buffers can't result in
2441 * partially executed submissions. */
2442 for (uint32_t i = 0; i < submitCount; i++) {
2443 for (uint32_t j = 0; j < pSubmits[i].commandBufferCount; j++) {
2444 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer,
2445 pSubmits[i].pCommandBuffers[j]);
2446
2447 scratch_size = MAX2(scratch_size, cmd_buffer->scratch_size_needed);
2448 compute_scratch_size = MAX2(compute_scratch_size,
2449 cmd_buffer->compute_scratch_size_needed);
2450 esgs_ring_size = MAX2(esgs_ring_size, cmd_buffer->esgs_ring_size_needed);
2451 gsvs_ring_size = MAX2(gsvs_ring_size, cmd_buffer->gsvs_ring_size_needed);
2452 tess_rings_needed |= cmd_buffer->tess_rings_needed;
2453 sample_positions_needed |= cmd_buffer->sample_positions_needed;
2454 }
2455 }
2456
2457 result = radv_get_preamble_cs(queue, scratch_size, compute_scratch_size,
2458 esgs_ring_size, gsvs_ring_size, tess_rings_needed,
2459 sample_positions_needed, &initial_flush_preamble_cs,
2460 &initial_preamble_cs, &continue_preamble_cs);
2461 if (result != VK_SUCCESS)
2462 return result;
2463
2464 for (uint32_t i = 0; i < submitCount; i++) {
2465 struct radeon_winsys_cs **cs_array;
2466 bool do_flush = !i || pSubmits[i].pWaitDstStageMask;
2467 bool can_patch = true;
2468 uint32_t advance;
2469 struct radv_winsys_sem_info sem_info;
2470
2471 result = radv_alloc_sem_info(&sem_info,
2472 pSubmits[i].waitSemaphoreCount,
2473 pSubmits[i].pWaitSemaphores,
2474 pSubmits[i].signalSemaphoreCount,
2475 pSubmits[i].pSignalSemaphores,
2476 _fence);
2477 if (result != VK_SUCCESS)
2478 return result;
2479
2480 if (!pSubmits[i].commandBufferCount) {
2481 if (pSubmits[i].waitSemaphoreCount || pSubmits[i].signalSemaphoreCount) {
2482 ret = queue->device->ws->cs_submit(ctx, queue->queue_idx,
2483 &queue->device->empty_cs[queue->queue_family_index],
2484 1, NULL, NULL,
2485 &sem_info, NULL,
2486 false, base_fence);
2487 if (ret) {
2488 radv_loge("failed to submit CS %d\n", i);
2489 abort();
2490 }
2491 fence_emitted = true;
2492 }
2493 radv_free_sem_info(&sem_info);
2494 continue;
2495 }
2496
2497 cs_array = malloc(sizeof(struct radeon_winsys_cs *) *
2498 (pSubmits[i].commandBufferCount));
2499
2500 for (uint32_t j = 0; j < pSubmits[i].commandBufferCount; j++) {
2501 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer,
2502 pSubmits[i].pCommandBuffers[j]);
2503 assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
2504
2505 cs_array[j] = cmd_buffer->cs;
2506 if ((cmd_buffer->usage_flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT))
2507 can_patch = false;
2508
2509 cmd_buffer->status = RADV_CMD_BUFFER_STATUS_PENDING;
2510 }
2511
2512 for (uint32_t j = 0; j < pSubmits[i].commandBufferCount; j += advance) {
2513 struct radeon_winsys_cs *initial_preamble = (do_flush && !j) ? initial_flush_preamble_cs : initial_preamble_cs;
2514 const struct radv_winsys_bo_list *bo_list = NULL;
2515
2516 advance = MIN2(max_cs_submission,
2517 pSubmits[i].commandBufferCount - j);
2518
2519 if (queue->device->trace_bo)
2520 *queue->device->trace_id_ptr = 0;
2521
2522 sem_info.cs_emit_wait = j == 0;
2523 sem_info.cs_emit_signal = j + advance == pSubmits[i].commandBufferCount;
2524
2525 if (unlikely(queue->device->use_global_bo_list)) {
2526 pthread_mutex_lock(&queue->device->bo_list.mutex);
2527 bo_list = &queue->device->bo_list.list;
2528 }
2529
2530 ret = queue->device->ws->cs_submit(ctx, queue->queue_idx, cs_array + j,
2531 advance, initial_preamble, continue_preamble_cs,
2532 &sem_info, bo_list,
2533 can_patch, base_fence);
2534
2535 if (unlikely(queue->device->use_global_bo_list))
2536 pthread_mutex_unlock(&queue->device->bo_list.mutex);
2537
2538 if (ret) {
2539 radv_loge("failed to submit CS %d\n", i);
2540 abort();
2541 }
2542 fence_emitted = true;
2543 if (queue->device->trace_bo) {
2544 radv_check_gpu_hangs(queue, cs_array[j]);
2545 }
2546 }
2547
2548 radv_free_temp_syncobjs(queue->device,
2549 pSubmits[i].waitSemaphoreCount,
2550 pSubmits[i].pWaitSemaphores);
2551 radv_free_sem_info(&sem_info);
2552 free(cs_array);
2553 }
2554
2555 if (fence) {
2556 if (!fence_emitted) {
2557 radv_signal_fence(queue, fence);
2558 }
2559 fence->submitted = true;
2560 }
2561
2562 return VK_SUCCESS;
2563 }
2564
2565 VkResult radv_QueueWaitIdle(
2566 VkQueue _queue)
2567 {
2568 RADV_FROM_HANDLE(radv_queue, queue, _queue);
2569
2570 queue->device->ws->ctx_wait_idle(queue->hw_ctx,
2571 radv_queue_family_to_ring(queue->queue_family_index),
2572 queue->queue_idx);
2573 return VK_SUCCESS;
2574 }
2575
2576 VkResult radv_DeviceWaitIdle(
2577 VkDevice _device)
2578 {
2579 RADV_FROM_HANDLE(radv_device, device, _device);
2580
2581 for (unsigned i = 0; i < RADV_MAX_QUEUE_FAMILIES; i++) {
2582 for (unsigned q = 0; q < device->queue_count[i]; q++) {
2583 radv_QueueWaitIdle(radv_queue_to_handle(&device->queues[i][q]));
2584 }
2585 }
2586 return VK_SUCCESS;
2587 }
2588
2589 VkResult radv_EnumerateInstanceExtensionProperties(
2590 const char* pLayerName,
2591 uint32_t* pPropertyCount,
2592 VkExtensionProperties* pProperties)
2593 {
2594 VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
2595
2596 for (int i = 0; i < RADV_INSTANCE_EXTENSION_COUNT; i++) {
2597 if (radv_supported_instance_extensions.extensions[i]) {
2598 vk_outarray_append(&out, prop) {
2599 *prop = radv_instance_extensions[i];
2600 }
2601 }
2602 }
2603
2604 return vk_outarray_status(&out);
2605 }
2606
2607 VkResult radv_EnumerateDeviceExtensionProperties(
2608 VkPhysicalDevice physicalDevice,
2609 const char* pLayerName,
2610 uint32_t* pPropertyCount,
2611 VkExtensionProperties* pProperties)
2612 {
2613 RADV_FROM_HANDLE(radv_physical_device, device, physicalDevice);
2614 VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
2615
2616 for (int i = 0; i < RADV_DEVICE_EXTENSION_COUNT; i++) {
2617 if (device->supported_extensions.extensions[i]) {
2618 vk_outarray_append(&out, prop) {
2619 *prop = radv_device_extensions[i];
2620 }
2621 }
2622 }
2623
2624 return vk_outarray_status(&out);
2625 }
2626
2627 PFN_vkVoidFunction radv_GetInstanceProcAddr(
2628 VkInstance _instance,
2629 const char* pName)
2630 {
2631 RADV_FROM_HANDLE(radv_instance, instance, _instance);
2632
2633 return radv_lookup_entrypoint_checked(pName,
2634 instance ? instance->apiVersion : 0,
2635 instance ? &instance->enabled_extensions : NULL,
2636 NULL);
2637 }
2638
2639 /* The loader wants us to expose a second GetInstanceProcAddr function
2640 * to work around certain LD_PRELOAD issues seen in apps.
2641 */
2642 PUBLIC
2643 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vk_icdGetInstanceProcAddr(
2644 VkInstance instance,
2645 const char* pName);
2646
2647 PUBLIC
2648 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vk_icdGetInstanceProcAddr(
2649 VkInstance instance,
2650 const char* pName)
2651 {
2652 return radv_GetInstanceProcAddr(instance, pName);
2653 }
2654
2655 PFN_vkVoidFunction radv_GetDeviceProcAddr(
2656 VkDevice _device,
2657 const char* pName)
2658 {
2659 RADV_FROM_HANDLE(radv_device, device, _device);
2660
2661 return radv_lookup_entrypoint_checked(pName,
2662 device->instance->apiVersion,
2663 &device->instance->enabled_extensions,
2664 &device->enabled_extensions);
2665 }
2666
2667 bool radv_get_memory_fd(struct radv_device *device,
2668 struct radv_device_memory *memory,
2669 int *pFD)
2670 {
2671 struct radeon_bo_metadata metadata;
2672
2673 if (memory->image) {
2674 radv_init_metadata(device, memory->image, &metadata);
2675 device->ws->buffer_set_metadata(memory->bo, &metadata);
2676 }
2677
2678 return device->ws->buffer_get_fd(device->ws, memory->bo,
2679 pFD);
2680 }
2681
2682 static VkResult radv_alloc_memory(struct radv_device *device,
2683 const VkMemoryAllocateInfo* pAllocateInfo,
2684 const VkAllocationCallbacks* pAllocator,
2685 VkDeviceMemory* pMem)
2686 {
2687 struct radv_device_memory *mem;
2688 VkResult result;
2689 enum radeon_bo_domain domain;
2690 uint32_t flags = 0;
2691 enum radv_mem_type mem_type_index = device->physical_device->mem_type_indices[pAllocateInfo->memoryTypeIndex];
2692
2693 assert(pAllocateInfo->sType == VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO);
2694
2695 if (pAllocateInfo->allocationSize == 0) {
2696 /* Apparently, this is allowed */
2697 *pMem = VK_NULL_HANDLE;
2698 return VK_SUCCESS;
2699 }
2700
2701 const VkImportMemoryFdInfoKHR *import_info =
2702 vk_find_struct_const(pAllocateInfo->pNext, IMPORT_MEMORY_FD_INFO_KHR);
2703 const VkMemoryDedicatedAllocateInfoKHR *dedicate_info =
2704 vk_find_struct_const(pAllocateInfo->pNext, MEMORY_DEDICATED_ALLOCATE_INFO_KHR);
2705 const VkExportMemoryAllocateInfoKHR *export_info =
2706 vk_find_struct_const(pAllocateInfo->pNext, EXPORT_MEMORY_ALLOCATE_INFO_KHR);
2707 const VkImportMemoryHostPointerInfoEXT *host_ptr_info =
2708 vk_find_struct_const(pAllocateInfo->pNext, IMPORT_MEMORY_HOST_POINTER_INFO_EXT);
2709
2710 const struct wsi_memory_allocate_info *wsi_info =
2711 vk_find_struct_const(pAllocateInfo->pNext, WSI_MEMORY_ALLOCATE_INFO_MESA);
2712
2713 mem = vk_alloc2(&device->alloc, pAllocator, sizeof(*mem), 8,
2714 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2715 if (mem == NULL)
2716 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
2717
2718 if (wsi_info && wsi_info->implicit_sync)
2719 flags |= RADEON_FLAG_IMPLICIT_SYNC;
2720
2721 if (dedicate_info) {
2722 mem->image = radv_image_from_handle(dedicate_info->image);
2723 mem->buffer = radv_buffer_from_handle(dedicate_info->buffer);
2724 } else {
2725 mem->image = NULL;
2726 mem->buffer = NULL;
2727 }
2728
2729 mem->user_ptr = NULL;
2730
2731 if (import_info) {
2732 assert(import_info->handleType ==
2733 VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR ||
2734 import_info->handleType ==
2735 VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
2736 mem->bo = device->ws->buffer_from_fd(device->ws, import_info->fd,
2737 NULL, NULL);
2738 if (!mem->bo) {
2739 result = VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR;
2740 goto fail;
2741 } else {
2742 close(import_info->fd);
2743 }
2744 } else if (host_ptr_info) {
2745 assert(host_ptr_info->handleType == VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT);
2746 assert(mem_type_index == RADV_MEM_TYPE_GTT_CACHED);
2747 mem->bo = device->ws->buffer_from_ptr(device->ws, host_ptr_info->pHostPointer,
2748 pAllocateInfo->allocationSize);
2749 if (!mem->bo) {
2750 result = VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR;
2751 goto fail;
2752 } else {
2753 mem->user_ptr = host_ptr_info->pHostPointer;
2754 }
2755 } else {
2756 uint64_t alloc_size = align_u64(pAllocateInfo->allocationSize, 4096);
2757 if (mem_type_index == RADV_MEM_TYPE_GTT_WRITE_COMBINE ||
2758 mem_type_index == RADV_MEM_TYPE_GTT_CACHED)
2759 domain = RADEON_DOMAIN_GTT;
2760 else
2761 domain = RADEON_DOMAIN_VRAM;
2762
2763 if (mem_type_index == RADV_MEM_TYPE_VRAM)
2764 flags |= RADEON_FLAG_NO_CPU_ACCESS;
2765 else
2766 flags |= RADEON_FLAG_CPU_ACCESS;
2767
2768 if (mem_type_index == RADV_MEM_TYPE_GTT_WRITE_COMBINE)
2769 flags |= RADEON_FLAG_GTT_WC;
2770
2771 if (!dedicate_info && !import_info && (!export_info || !export_info->handleTypes))
2772 flags |= RADEON_FLAG_NO_INTERPROCESS_SHARING;
2773
2774 mem->bo = device->ws->buffer_create(device->ws, alloc_size, device->physical_device->rad_info.max_alignment,
2775 domain, flags);
2776
2777 if (!mem->bo) {
2778 result = VK_ERROR_OUT_OF_DEVICE_MEMORY;
2779 goto fail;
2780 }
2781 mem->type_index = mem_type_index;
2782 }
2783
2784 result = radv_bo_list_add(device, mem->bo);
2785 if (result != VK_SUCCESS)
2786 goto fail_bo;
2787
2788 *pMem = radv_device_memory_to_handle(mem);
2789
2790 return VK_SUCCESS;
2791
2792 fail_bo:
2793 device->ws->buffer_destroy(mem->bo);
2794 fail:
2795 vk_free2(&device->alloc, pAllocator, mem);
2796
2797 return result;
2798 }
2799
2800 VkResult radv_AllocateMemory(
2801 VkDevice _device,
2802 const VkMemoryAllocateInfo* pAllocateInfo,
2803 const VkAllocationCallbacks* pAllocator,
2804 VkDeviceMemory* pMem)
2805 {
2806 RADV_FROM_HANDLE(radv_device, device, _device);
2807 return radv_alloc_memory(device, pAllocateInfo, pAllocator, pMem);
2808 }
2809
2810 void radv_FreeMemory(
2811 VkDevice _device,
2812 VkDeviceMemory _mem,
2813 const VkAllocationCallbacks* pAllocator)
2814 {
2815 RADV_FROM_HANDLE(radv_device, device, _device);
2816 RADV_FROM_HANDLE(radv_device_memory, mem, _mem);
2817
2818 if (mem == NULL)
2819 return;
2820
2821 radv_bo_list_remove(device, mem->bo);
2822 device->ws->buffer_destroy(mem->bo);
2823 mem->bo = NULL;
2824
2825 vk_free2(&device->alloc, pAllocator, mem);
2826 }
2827
2828 VkResult radv_MapMemory(
2829 VkDevice _device,
2830 VkDeviceMemory _memory,
2831 VkDeviceSize offset,
2832 VkDeviceSize size,
2833 VkMemoryMapFlags flags,
2834 void** ppData)
2835 {
2836 RADV_FROM_HANDLE(radv_device, device, _device);
2837 RADV_FROM_HANDLE(radv_device_memory, mem, _memory);
2838
2839 if (mem == NULL) {
2840 *ppData = NULL;
2841 return VK_SUCCESS;
2842 }
2843
2844 if (mem->user_ptr)
2845 *ppData = mem->user_ptr;
2846 else
2847 *ppData = device->ws->buffer_map(mem->bo);
2848
2849 if (*ppData) {
2850 *ppData += offset;
2851 return VK_SUCCESS;
2852 }
2853
2854 return vk_error(VK_ERROR_MEMORY_MAP_FAILED);
2855 }
2856
2857 void radv_UnmapMemory(
2858 VkDevice _device,
2859 VkDeviceMemory _memory)
2860 {
2861 RADV_FROM_HANDLE(radv_device, device, _device);
2862 RADV_FROM_HANDLE(radv_device_memory, mem, _memory);
2863
2864 if (mem == NULL)
2865 return;
2866
2867 if (mem->user_ptr == NULL)
2868 device->ws->buffer_unmap(mem->bo);
2869 }
2870
2871 VkResult radv_FlushMappedMemoryRanges(
2872 VkDevice _device,
2873 uint32_t memoryRangeCount,
2874 const VkMappedMemoryRange* pMemoryRanges)
2875 {
2876 return VK_SUCCESS;
2877 }
2878
2879 VkResult radv_InvalidateMappedMemoryRanges(
2880 VkDevice _device,
2881 uint32_t memoryRangeCount,
2882 const VkMappedMemoryRange* pMemoryRanges)
2883 {
2884 return VK_SUCCESS;
2885 }
2886
2887 void radv_GetBufferMemoryRequirements(
2888 VkDevice _device,
2889 VkBuffer _buffer,
2890 VkMemoryRequirements* pMemoryRequirements)
2891 {
2892 RADV_FROM_HANDLE(radv_device, device, _device);
2893 RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
2894
2895 pMemoryRequirements->memoryTypeBits = (1u << device->physical_device->memory_properties.memoryTypeCount) - 1;
2896
2897 if (buffer->flags & VK_BUFFER_CREATE_SPARSE_BINDING_BIT)
2898 pMemoryRequirements->alignment = 4096;
2899 else
2900 pMemoryRequirements->alignment = 16;
2901
2902 pMemoryRequirements->size = align64(buffer->size, pMemoryRequirements->alignment);
2903 }
2904
2905 void radv_GetBufferMemoryRequirements2(
2906 VkDevice device,
2907 const VkBufferMemoryRequirementsInfo2KHR* pInfo,
2908 VkMemoryRequirements2KHR* pMemoryRequirements)
2909 {
2910 radv_GetBufferMemoryRequirements(device, pInfo->buffer,
2911 &pMemoryRequirements->memoryRequirements);
2912 RADV_FROM_HANDLE(radv_buffer, buffer, pInfo->buffer);
2913 vk_foreach_struct(ext, pMemoryRequirements->pNext) {
2914 switch (ext->sType) {
2915 case VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR: {
2916 VkMemoryDedicatedRequirementsKHR *req =
2917 (VkMemoryDedicatedRequirementsKHR *) ext;
2918 req->requiresDedicatedAllocation = buffer->shareable;
2919 req->prefersDedicatedAllocation = req->requiresDedicatedAllocation;
2920 break;
2921 }
2922 default:
2923 break;
2924 }
2925 }
2926 }
2927
2928 void radv_GetImageMemoryRequirements(
2929 VkDevice _device,
2930 VkImage _image,
2931 VkMemoryRequirements* pMemoryRequirements)
2932 {
2933 RADV_FROM_HANDLE(radv_device, device, _device);
2934 RADV_FROM_HANDLE(radv_image, image, _image);
2935
2936 pMemoryRequirements->memoryTypeBits = (1u << device->physical_device->memory_properties.memoryTypeCount) - 1;
2937
2938 pMemoryRequirements->size = image->size;
2939 pMemoryRequirements->alignment = image->alignment;
2940 }
2941
2942 void radv_GetImageMemoryRequirements2(
2943 VkDevice device,
2944 const VkImageMemoryRequirementsInfo2KHR* pInfo,
2945 VkMemoryRequirements2KHR* pMemoryRequirements)
2946 {
2947 radv_GetImageMemoryRequirements(device, pInfo->image,
2948 &pMemoryRequirements->memoryRequirements);
2949
2950 RADV_FROM_HANDLE(radv_image, image, pInfo->image);
2951
2952 vk_foreach_struct(ext, pMemoryRequirements->pNext) {
2953 switch (ext->sType) {
2954 case VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR: {
2955 VkMemoryDedicatedRequirementsKHR *req =
2956 (VkMemoryDedicatedRequirementsKHR *) ext;
2957 req->requiresDedicatedAllocation = image->shareable;
2958 req->prefersDedicatedAllocation = req->requiresDedicatedAllocation;
2959 break;
2960 }
2961 default:
2962 break;
2963 }
2964 }
2965 }
2966
2967 void radv_GetImageSparseMemoryRequirements(
2968 VkDevice device,
2969 VkImage image,
2970 uint32_t* pSparseMemoryRequirementCount,
2971 VkSparseImageMemoryRequirements* pSparseMemoryRequirements)
2972 {
2973 stub();
2974 }
2975
2976 void radv_GetImageSparseMemoryRequirements2(
2977 VkDevice device,
2978 const VkImageSparseMemoryRequirementsInfo2KHR* pInfo,
2979 uint32_t* pSparseMemoryRequirementCount,
2980 VkSparseImageMemoryRequirements2KHR* pSparseMemoryRequirements)
2981 {
2982 stub();
2983 }
2984
2985 void radv_GetDeviceMemoryCommitment(
2986 VkDevice device,
2987 VkDeviceMemory memory,
2988 VkDeviceSize* pCommittedMemoryInBytes)
2989 {
2990 *pCommittedMemoryInBytes = 0;
2991 }
2992
2993 VkResult radv_BindBufferMemory2(VkDevice device,
2994 uint32_t bindInfoCount,
2995 const VkBindBufferMemoryInfoKHR *pBindInfos)
2996 {
2997 for (uint32_t i = 0; i < bindInfoCount; ++i) {
2998 RADV_FROM_HANDLE(radv_device_memory, mem, pBindInfos[i].memory);
2999 RADV_FROM_HANDLE(radv_buffer, buffer, pBindInfos[i].buffer);
3000
3001 if (mem) {
3002 buffer->bo = mem->bo;
3003 buffer->offset = pBindInfos[i].memoryOffset;
3004 } else {
3005 buffer->bo = NULL;
3006 }
3007 }
3008 return VK_SUCCESS;
3009 }
3010
3011 VkResult radv_BindBufferMemory(
3012 VkDevice device,
3013 VkBuffer buffer,
3014 VkDeviceMemory memory,
3015 VkDeviceSize memoryOffset)
3016 {
3017 const VkBindBufferMemoryInfoKHR info = {
3018 .sType = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR,
3019 .buffer = buffer,
3020 .memory = memory,
3021 .memoryOffset = memoryOffset
3022 };
3023
3024 return radv_BindBufferMemory2(device, 1, &info);
3025 }
3026
3027 VkResult radv_BindImageMemory2(VkDevice device,
3028 uint32_t bindInfoCount,
3029 const VkBindImageMemoryInfoKHR *pBindInfos)
3030 {
3031 for (uint32_t i = 0; i < bindInfoCount; ++i) {
3032 RADV_FROM_HANDLE(radv_device_memory, mem, pBindInfos[i].memory);
3033 RADV_FROM_HANDLE(radv_image, image, pBindInfos[i].image);
3034
3035 if (mem) {
3036 image->bo = mem->bo;
3037 image->offset = pBindInfos[i].memoryOffset;
3038 } else {
3039 image->bo = NULL;
3040 image->offset = 0;
3041 }
3042 }
3043 return VK_SUCCESS;
3044 }
3045
3046
3047 VkResult radv_BindImageMemory(
3048 VkDevice device,
3049 VkImage image,
3050 VkDeviceMemory memory,
3051 VkDeviceSize memoryOffset)
3052 {
3053 const VkBindImageMemoryInfoKHR info = {
3054 .sType = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR,
3055 .image = image,
3056 .memory = memory,
3057 .memoryOffset = memoryOffset
3058 };
3059
3060 return radv_BindImageMemory2(device, 1, &info);
3061 }
3062
3063
3064 static void
3065 radv_sparse_buffer_bind_memory(struct radv_device *device,
3066 const VkSparseBufferMemoryBindInfo *bind)
3067 {
3068 RADV_FROM_HANDLE(radv_buffer, buffer, bind->buffer);
3069
3070 for (uint32_t i = 0; i < bind->bindCount; ++i) {
3071 struct radv_device_memory *mem = NULL;
3072
3073 if (bind->pBinds[i].memory != VK_NULL_HANDLE)
3074 mem = radv_device_memory_from_handle(bind->pBinds[i].memory);
3075
3076 device->ws->buffer_virtual_bind(buffer->bo,
3077 bind->pBinds[i].resourceOffset,
3078 bind->pBinds[i].size,
3079 mem ? mem->bo : NULL,
3080 bind->pBinds[i].memoryOffset);
3081 }
3082 }
3083
3084 static void
3085 radv_sparse_image_opaque_bind_memory(struct radv_device *device,
3086 const VkSparseImageOpaqueMemoryBindInfo *bind)
3087 {
3088 RADV_FROM_HANDLE(radv_image, image, bind->image);
3089
3090 for (uint32_t i = 0; i < bind->bindCount; ++i) {
3091 struct radv_device_memory *mem = NULL;
3092
3093 if (bind->pBinds[i].memory != VK_NULL_HANDLE)
3094 mem = radv_device_memory_from_handle(bind->pBinds[i].memory);
3095
3096 device->ws->buffer_virtual_bind(image->bo,
3097 bind->pBinds[i].resourceOffset,
3098 bind->pBinds[i].size,
3099 mem ? mem->bo : NULL,
3100 bind->pBinds[i].memoryOffset);
3101 }
3102 }
3103
3104 VkResult radv_QueueBindSparse(
3105 VkQueue _queue,
3106 uint32_t bindInfoCount,
3107 const VkBindSparseInfo* pBindInfo,
3108 VkFence _fence)
3109 {
3110 RADV_FROM_HANDLE(radv_fence, fence, _fence);
3111 RADV_FROM_HANDLE(radv_queue, queue, _queue);
3112 struct radeon_winsys_fence *base_fence = fence ? fence->fence : NULL;
3113 bool fence_emitted = false;
3114
3115 for (uint32_t i = 0; i < bindInfoCount; ++i) {
3116 struct radv_winsys_sem_info sem_info;
3117 for (uint32_t j = 0; j < pBindInfo[i].bufferBindCount; ++j) {
3118 radv_sparse_buffer_bind_memory(queue->device,
3119 pBindInfo[i].pBufferBinds + j);
3120 }
3121
3122 for (uint32_t j = 0; j < pBindInfo[i].imageOpaqueBindCount; ++j) {
3123 radv_sparse_image_opaque_bind_memory(queue->device,
3124 pBindInfo[i].pImageOpaqueBinds + j);
3125 }
3126
3127 VkResult result;
3128 result = radv_alloc_sem_info(&sem_info,
3129 pBindInfo[i].waitSemaphoreCount,
3130 pBindInfo[i].pWaitSemaphores,
3131 pBindInfo[i].signalSemaphoreCount,
3132 pBindInfo[i].pSignalSemaphores,
3133 _fence);
3134 if (result != VK_SUCCESS)
3135 return result;
3136
3137 if (pBindInfo[i].waitSemaphoreCount || pBindInfo[i].signalSemaphoreCount) {
3138 queue->device->ws->cs_submit(queue->hw_ctx, queue->queue_idx,
3139 &queue->device->empty_cs[queue->queue_family_index],
3140 1, NULL, NULL,
3141 &sem_info, NULL,
3142 false, base_fence);
3143 fence_emitted = true;
3144 if (fence)
3145 fence->submitted = true;
3146 }
3147
3148 radv_free_sem_info(&sem_info);
3149
3150 }
3151
3152 if (fence) {
3153 if (!fence_emitted) {
3154 radv_signal_fence(queue, fence);
3155 }
3156 fence->submitted = true;
3157 }
3158
3159 return VK_SUCCESS;
3160 }
3161
3162 VkResult radv_CreateFence(
3163 VkDevice _device,
3164 const VkFenceCreateInfo* pCreateInfo,
3165 const VkAllocationCallbacks* pAllocator,
3166 VkFence* pFence)
3167 {
3168 RADV_FROM_HANDLE(radv_device, device, _device);
3169 const VkExportFenceCreateInfoKHR *export =
3170 vk_find_struct_const(pCreateInfo->pNext, EXPORT_FENCE_CREATE_INFO_KHR);
3171 VkExternalFenceHandleTypeFlagsKHR handleTypes =
3172 export ? export->handleTypes : 0;
3173
3174 struct radv_fence *fence = vk_alloc2(&device->alloc, pAllocator,
3175 sizeof(*fence), 8,
3176 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
3177
3178 if (!fence)
3179 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
3180
3181 fence->submitted = false;
3182 fence->signalled = !!(pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT);
3183 fence->temp_syncobj = 0;
3184 if (device->always_use_syncobj || handleTypes) {
3185 int ret = device->ws->create_syncobj(device->ws, &fence->syncobj);
3186 if (ret) {
3187 vk_free2(&device->alloc, pAllocator, fence);
3188 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
3189 }
3190 if (pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT) {
3191 device->ws->signal_syncobj(device->ws, fence->syncobj);
3192 }
3193 fence->fence = NULL;
3194 } else {
3195 fence->fence = device->ws->create_fence();
3196 if (!fence->fence) {
3197 vk_free2(&device->alloc, pAllocator, fence);
3198 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
3199 }
3200 fence->syncobj = 0;
3201 }
3202
3203 *pFence = radv_fence_to_handle(fence);
3204
3205 return VK_SUCCESS;
3206 }
3207
3208 void radv_DestroyFence(
3209 VkDevice _device,
3210 VkFence _fence,
3211 const VkAllocationCallbacks* pAllocator)
3212 {
3213 RADV_FROM_HANDLE(radv_device, device, _device);
3214 RADV_FROM_HANDLE(radv_fence, fence, _fence);
3215
3216 if (!fence)
3217 return;
3218
3219 if (fence->temp_syncobj)
3220 device->ws->destroy_syncobj(device->ws, fence->temp_syncobj);
3221 if (fence->syncobj)
3222 device->ws->destroy_syncobj(device->ws, fence->syncobj);
3223 if (fence->fence)
3224 device->ws->destroy_fence(fence->fence);
3225 vk_free2(&device->alloc, pAllocator, fence);
3226 }
3227
3228
3229 static uint64_t radv_get_current_time()
3230 {
3231 struct timespec tv;
3232 clock_gettime(CLOCK_MONOTONIC, &tv);
3233 return tv.tv_nsec + tv.tv_sec*1000000000ull;
3234 }
3235
3236 static uint64_t radv_get_absolute_timeout(uint64_t timeout)
3237 {
3238 uint64_t current_time = radv_get_current_time();
3239
3240 timeout = MIN2(UINT64_MAX - current_time, timeout);
3241
3242 return current_time + timeout;
3243 }
3244
3245
3246 static bool radv_all_fences_plain_and_submitted(uint32_t fenceCount, const VkFence *pFences)
3247 {
3248 for (uint32_t i = 0; i < fenceCount; ++i) {
3249 RADV_FROM_HANDLE(radv_fence, fence, pFences[i]);
3250 if (fence->syncobj || fence->temp_syncobj || (!fence->signalled && !fence->submitted))
3251 return false;
3252 }
3253 return true;
3254 }
3255
3256 VkResult radv_WaitForFences(
3257 VkDevice _device,
3258 uint32_t fenceCount,
3259 const VkFence* pFences,
3260 VkBool32 waitAll,
3261 uint64_t timeout)
3262 {
3263 RADV_FROM_HANDLE(radv_device, device, _device);
3264 timeout = radv_get_absolute_timeout(timeout);
3265
3266 if (device->always_use_syncobj) {
3267 uint32_t *handles = malloc(sizeof(uint32_t) * fenceCount);
3268 if (!handles)
3269 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
3270
3271 for (uint32_t i = 0; i < fenceCount; ++i) {
3272 RADV_FROM_HANDLE(radv_fence, fence, pFences[i]);
3273 handles[i] = fence->temp_syncobj ? fence->temp_syncobj : fence->syncobj;
3274 }
3275
3276 bool success = device->ws->wait_syncobj(device->ws, handles, fenceCount, waitAll, timeout);
3277
3278 free(handles);
3279 return success ? VK_SUCCESS : VK_TIMEOUT;
3280 }
3281
3282 if (!waitAll && fenceCount > 1) {
3283 /* Not doing this by default for waitAll, due to needing to allocate twice. */
3284 if (device->physical_device->rad_info.drm_minor >= 10 && radv_all_fences_plain_and_submitted(fenceCount, pFences)) {
3285 uint32_t wait_count = 0;
3286 struct radeon_winsys_fence **fences = malloc(sizeof(struct radeon_winsys_fence *) * fenceCount);
3287 if (!fences)
3288 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
3289
3290 for (uint32_t i = 0; i < fenceCount; ++i) {
3291 RADV_FROM_HANDLE(radv_fence, fence, pFences[i]);
3292
3293 if (fence->signalled) {
3294 free(fences);
3295 return VK_SUCCESS;
3296 }
3297
3298 fences[wait_count++] = fence->fence;
3299 }
3300
3301 bool success = device->ws->fences_wait(device->ws, fences, wait_count,
3302 waitAll, timeout - radv_get_current_time());
3303
3304 free(fences);
3305 return success ? VK_SUCCESS : VK_TIMEOUT;
3306 }
3307
3308 while(radv_get_current_time() <= timeout) {
3309 for (uint32_t i = 0; i < fenceCount; ++i) {
3310 if (radv_GetFenceStatus(_device, pFences[i]) == VK_SUCCESS)
3311 return VK_SUCCESS;
3312 }
3313 }
3314 return VK_TIMEOUT;
3315 }
3316
3317 for (uint32_t i = 0; i < fenceCount; ++i) {
3318 RADV_FROM_HANDLE(radv_fence, fence, pFences[i]);
3319 bool expired = false;
3320
3321 if (fence->temp_syncobj) {
3322 if (!device->ws->wait_syncobj(device->ws, &fence->temp_syncobj, 1, true, timeout))
3323 return VK_TIMEOUT;
3324 continue;
3325 }
3326
3327 if (fence->syncobj) {
3328 if (!device->ws->wait_syncobj(device->ws, &fence->syncobj, 1, true, timeout))
3329 return VK_TIMEOUT;
3330 continue;
3331 }
3332
3333 if (fence->signalled)
3334 continue;
3335
3336 if (!fence->submitted) {
3337 while(radv_get_current_time() <= timeout && !fence->submitted)
3338 /* Do nothing */;
3339
3340 if (!fence->submitted)
3341 return VK_TIMEOUT;
3342
3343 /* Recheck as it may have been set by submitting operations. */
3344 if (fence->signalled)
3345 continue;
3346 }
3347
3348 expired = device->ws->fence_wait(device->ws, fence->fence, true, timeout);
3349 if (!expired)
3350 return VK_TIMEOUT;
3351
3352 fence->signalled = true;
3353 }
3354
3355 return VK_SUCCESS;
3356 }
3357
3358 VkResult radv_ResetFences(VkDevice _device,
3359 uint32_t fenceCount,
3360 const VkFence *pFences)
3361 {
3362 RADV_FROM_HANDLE(radv_device, device, _device);
3363
3364 for (unsigned i = 0; i < fenceCount; ++i) {
3365 RADV_FROM_HANDLE(radv_fence, fence, pFences[i]);
3366 fence->submitted = fence->signalled = false;
3367
3368 /* Per spec, we first restore the permanent payload, and then reset, so
3369 * having a temp syncobj should not skip resetting the permanent syncobj. */
3370 if (fence->temp_syncobj) {
3371 device->ws->destroy_syncobj(device->ws, fence->temp_syncobj);
3372 fence->temp_syncobj = 0;
3373 }
3374
3375 if (fence->syncobj) {
3376 device->ws->reset_syncobj(device->ws, fence->syncobj);
3377 }
3378 }
3379
3380 return VK_SUCCESS;
3381 }
3382
3383 VkResult radv_GetFenceStatus(VkDevice _device, VkFence _fence)
3384 {
3385 RADV_FROM_HANDLE(radv_device, device, _device);
3386 RADV_FROM_HANDLE(radv_fence, fence, _fence);
3387
3388 if (fence->temp_syncobj) {
3389 bool success = device->ws->wait_syncobj(device->ws, &fence->temp_syncobj, 1, true, 0);
3390 return success ? VK_SUCCESS : VK_NOT_READY;
3391 }
3392
3393 if (fence->syncobj) {
3394 bool success = device->ws->wait_syncobj(device->ws, &fence->syncobj, 1, true, 0);
3395 return success ? VK_SUCCESS : VK_NOT_READY;
3396 }
3397
3398 if (fence->signalled)
3399 return VK_SUCCESS;
3400 if (!fence->submitted)
3401 return VK_NOT_READY;
3402 if (!device->ws->fence_wait(device->ws, fence->fence, false, 0))
3403 return VK_NOT_READY;
3404
3405 return VK_SUCCESS;
3406 }
3407
3408
3409 // Queue semaphore functions
3410
3411 VkResult radv_CreateSemaphore(
3412 VkDevice _device,
3413 const VkSemaphoreCreateInfo* pCreateInfo,
3414 const VkAllocationCallbacks* pAllocator,
3415 VkSemaphore* pSemaphore)
3416 {
3417 RADV_FROM_HANDLE(radv_device, device, _device);
3418 const VkExportSemaphoreCreateInfoKHR *export =
3419 vk_find_struct_const(pCreateInfo->pNext, EXPORT_SEMAPHORE_CREATE_INFO_KHR);
3420 VkExternalSemaphoreHandleTypeFlagsKHR handleTypes =
3421 export ? export->handleTypes : 0;
3422
3423 struct radv_semaphore *sem = vk_alloc2(&device->alloc, pAllocator,
3424 sizeof(*sem), 8,
3425 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
3426 if (!sem)
3427 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
3428
3429 sem->temp_syncobj = 0;
3430 /* create a syncobject if we are going to export this semaphore */
3431 if (device->always_use_syncobj || handleTypes) {
3432 assert (device->physical_device->rad_info.has_syncobj);
3433 int ret = device->ws->create_syncobj(device->ws, &sem->syncobj);
3434 if (ret) {
3435 vk_free2(&device->alloc, pAllocator, sem);
3436 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
3437 }
3438 sem->sem = NULL;
3439 } else {
3440 sem->sem = device->ws->create_sem(device->ws);
3441 if (!sem->sem) {
3442 vk_free2(&device->alloc, pAllocator, sem);
3443 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
3444 }
3445 sem->syncobj = 0;
3446 }
3447
3448 *pSemaphore = radv_semaphore_to_handle(sem);
3449 return VK_SUCCESS;
3450 }
3451
3452 void radv_DestroySemaphore(
3453 VkDevice _device,
3454 VkSemaphore _semaphore,
3455 const VkAllocationCallbacks* pAllocator)
3456 {
3457 RADV_FROM_HANDLE(radv_device, device, _device);
3458 RADV_FROM_HANDLE(radv_semaphore, sem, _semaphore);
3459 if (!_semaphore)
3460 return;
3461
3462 if (sem->syncobj)
3463 device->ws->destroy_syncobj(device->ws, sem->syncobj);
3464 else
3465 device->ws->destroy_sem(sem->sem);
3466 vk_free2(&device->alloc, pAllocator, sem);
3467 }
3468
3469 VkResult radv_CreateEvent(
3470 VkDevice _device,
3471 const VkEventCreateInfo* pCreateInfo,
3472 const VkAllocationCallbacks* pAllocator,
3473 VkEvent* pEvent)
3474 {
3475 RADV_FROM_HANDLE(radv_device, device, _device);
3476 struct radv_event *event = vk_alloc2(&device->alloc, pAllocator,
3477 sizeof(*event), 8,
3478 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
3479
3480 if (!event)
3481 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
3482
3483 event->bo = device->ws->buffer_create(device->ws, 8, 8,
3484 RADEON_DOMAIN_GTT,
3485 RADEON_FLAG_VA_UNCACHED | RADEON_FLAG_CPU_ACCESS | RADEON_FLAG_NO_INTERPROCESS_SHARING);
3486 if (!event->bo) {
3487 vk_free2(&device->alloc, pAllocator, event);
3488 return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
3489 }
3490
3491 event->map = (uint64_t*)device->ws->buffer_map(event->bo);
3492
3493 *pEvent = radv_event_to_handle(event);
3494
3495 return VK_SUCCESS;
3496 }
3497
3498 void radv_DestroyEvent(
3499 VkDevice _device,
3500 VkEvent _event,
3501 const VkAllocationCallbacks* pAllocator)
3502 {
3503 RADV_FROM_HANDLE(radv_device, device, _device);
3504 RADV_FROM_HANDLE(radv_event, event, _event);
3505
3506 if (!event)
3507 return;
3508 device->ws->buffer_destroy(event->bo);
3509 vk_free2(&device->alloc, pAllocator, event);
3510 }
3511
3512 VkResult radv_GetEventStatus(
3513 VkDevice _device,
3514 VkEvent _event)
3515 {
3516 RADV_FROM_HANDLE(radv_event, event, _event);
3517
3518 if (*event->map == 1)
3519 return VK_EVENT_SET;
3520 return VK_EVENT_RESET;
3521 }
3522
3523 VkResult radv_SetEvent(
3524 VkDevice _device,
3525 VkEvent _event)
3526 {
3527 RADV_FROM_HANDLE(radv_event, event, _event);
3528 *event->map = 1;
3529
3530 return VK_SUCCESS;
3531 }
3532
3533 VkResult radv_ResetEvent(
3534 VkDevice _device,
3535 VkEvent _event)
3536 {
3537 RADV_FROM_HANDLE(radv_event, event, _event);
3538 *event->map = 0;
3539
3540 return VK_SUCCESS;
3541 }
3542
3543 VkResult radv_CreateBuffer(
3544 VkDevice _device,
3545 const VkBufferCreateInfo* pCreateInfo,
3546 const VkAllocationCallbacks* pAllocator,
3547 VkBuffer* pBuffer)
3548 {
3549 RADV_FROM_HANDLE(radv_device, device, _device);
3550 struct radv_buffer *buffer;
3551
3552 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO);
3553
3554 buffer = vk_alloc2(&device->alloc, pAllocator, sizeof(*buffer), 8,
3555 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
3556 if (buffer == NULL)
3557 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
3558
3559 buffer->size = pCreateInfo->size;
3560 buffer->usage = pCreateInfo->usage;
3561 buffer->bo = NULL;
3562 buffer->offset = 0;
3563 buffer->flags = pCreateInfo->flags;
3564
3565 buffer->shareable = vk_find_struct_const(pCreateInfo->pNext,
3566 EXTERNAL_MEMORY_BUFFER_CREATE_INFO_KHR) != NULL;
3567
3568 if (pCreateInfo->flags & VK_BUFFER_CREATE_SPARSE_BINDING_BIT) {
3569 buffer->bo = device->ws->buffer_create(device->ws,
3570 align64(buffer->size, 4096),
3571 4096, 0, RADEON_FLAG_VIRTUAL);
3572 if (!buffer->bo) {
3573 vk_free2(&device->alloc, pAllocator, buffer);
3574 return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
3575 }
3576 }
3577
3578 *pBuffer = radv_buffer_to_handle(buffer);
3579
3580 return VK_SUCCESS;
3581 }
3582
3583 void radv_DestroyBuffer(
3584 VkDevice _device,
3585 VkBuffer _buffer,
3586 const VkAllocationCallbacks* pAllocator)
3587 {
3588 RADV_FROM_HANDLE(radv_device, device, _device);
3589 RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
3590
3591 if (!buffer)
3592 return;
3593
3594 if (buffer->flags & VK_BUFFER_CREATE_SPARSE_BINDING_BIT)
3595 device->ws->buffer_destroy(buffer->bo);
3596
3597 vk_free2(&device->alloc, pAllocator, buffer);
3598 }
3599
3600 static inline unsigned
3601 si_tile_mode_index(const struct radv_image *image, unsigned level, bool stencil)
3602 {
3603 if (stencil)
3604 return image->surface.u.legacy.stencil_tiling_index[level];
3605 else
3606 return image->surface.u.legacy.tiling_index[level];
3607 }
3608
3609 static uint32_t radv_surface_max_layer_count(struct radv_image_view *iview)
3610 {
3611 return iview->type == VK_IMAGE_VIEW_TYPE_3D ? iview->extent.depth : (iview->base_layer + iview->layer_count);
3612 }
3613
3614 static uint32_t
3615 radv_init_dcc_control_reg(struct radv_device *device,
3616 struct radv_image_view *iview)
3617 {
3618 unsigned max_uncompressed_block_size = V_028C78_MAX_BLOCK_SIZE_256B;
3619 unsigned min_compressed_block_size = V_028C78_MIN_BLOCK_SIZE_32B;
3620 unsigned max_compressed_block_size;
3621 unsigned independent_64b_blocks;
3622
3623 if (device->physical_device->rad_info.chip_class < VI)
3624 return 0;
3625
3626 if (iview->image->info.samples > 1) {
3627 if (iview->image->surface.bpe == 1)
3628 max_uncompressed_block_size = V_028C78_MAX_BLOCK_SIZE_64B;
3629 else if (iview->image->surface.bpe == 2)
3630 max_uncompressed_block_size = V_028C78_MAX_BLOCK_SIZE_128B;
3631 }
3632
3633 if (!device->physical_device->rad_info.has_dedicated_vram) {
3634 /* amdvlk: [min-compressed-block-size] should be set to 32 for
3635 * dGPU and 64 for APU because all of our APUs to date use
3636 * DIMMs which have a request granularity size of 64B while all
3637 * other chips have a 32B request size.
3638 */
3639 min_compressed_block_size = V_028C78_MIN_BLOCK_SIZE_64B;
3640 }
3641
3642 if (iview->image->usage & (VK_IMAGE_USAGE_SAMPLED_BIT |
3643 VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
3644 VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT)) {
3645 /* If this DCC image is potentially going to be used in texture
3646 * fetches, we need some special settings.
3647 */
3648 independent_64b_blocks = 1;
3649 max_compressed_block_size = V_028C78_MAX_BLOCK_SIZE_64B;
3650 } else {
3651 /* MAX_UNCOMPRESSED_BLOCK_SIZE must be >=
3652 * MAX_COMPRESSED_BLOCK_SIZE. Set MAX_COMPRESSED_BLOCK_SIZE as
3653 * big as possible for better compression state.
3654 */
3655 independent_64b_blocks = 0;
3656 max_compressed_block_size = max_uncompressed_block_size;
3657 }
3658
3659 return S_028C78_MAX_UNCOMPRESSED_BLOCK_SIZE(max_uncompressed_block_size) |
3660 S_028C78_MAX_COMPRESSED_BLOCK_SIZE(max_compressed_block_size) |
3661 S_028C78_MIN_COMPRESSED_BLOCK_SIZE(min_compressed_block_size) |
3662 S_028C78_INDEPENDENT_64B_BLOCKS(independent_64b_blocks);
3663 }
3664
3665 static void
3666 radv_initialise_color_surface(struct radv_device *device,
3667 struct radv_color_buffer_info *cb,
3668 struct radv_image_view *iview)
3669 {
3670 const struct vk_format_description *desc;
3671 unsigned ntype, format, swap, endian;
3672 unsigned blend_clamp = 0, blend_bypass = 0;
3673 uint64_t va;
3674 const struct radeon_surf *surf = &iview->image->surface;
3675
3676 desc = vk_format_description(iview->vk_format);
3677
3678 memset(cb, 0, sizeof(*cb));
3679
3680 /* Intensity is implemented as Red, so treat it that way. */
3681 cb->cb_color_attrib = S_028C74_FORCE_DST_ALPHA_1(desc->swizzle[3] == VK_SWIZZLE_1);
3682
3683 va = radv_buffer_get_va(iview->bo) + iview->image->offset;
3684
3685 cb->cb_color_base = va >> 8;
3686
3687 if (device->physical_device->rad_info.chip_class >= GFX9) {
3688 struct gfx9_surf_meta_flags meta;
3689 if (iview->image->dcc_offset)
3690 meta = iview->image->surface.u.gfx9.dcc;
3691 else
3692 meta = iview->image->surface.u.gfx9.cmask;
3693
3694 cb->cb_color_attrib |= S_028C74_COLOR_SW_MODE(iview->image->surface.u.gfx9.surf.swizzle_mode) |
3695 S_028C74_FMASK_SW_MODE(iview->image->surface.u.gfx9.fmask.swizzle_mode) |
3696 S_028C74_RB_ALIGNED(meta.rb_aligned) |
3697 S_028C74_PIPE_ALIGNED(meta.pipe_aligned);
3698
3699 cb->cb_color_base += iview->image->surface.u.gfx9.surf_offset >> 8;
3700 cb->cb_color_base |= iview->image->surface.tile_swizzle;
3701 } else {
3702 const struct legacy_surf_level *level_info = &surf->u.legacy.level[iview->base_mip];
3703 unsigned pitch_tile_max, slice_tile_max, tile_mode_index;
3704
3705 cb->cb_color_base += level_info->offset >> 8;
3706 if (level_info->mode == RADEON_SURF_MODE_2D)
3707 cb->cb_color_base |= iview->image->surface.tile_swizzle;
3708
3709 pitch_tile_max = level_info->nblk_x / 8 - 1;
3710 slice_tile_max = (level_info->nblk_x * level_info->nblk_y) / 64 - 1;
3711 tile_mode_index = si_tile_mode_index(iview->image, iview->base_mip, false);
3712
3713 cb->cb_color_pitch = S_028C64_TILE_MAX(pitch_tile_max);
3714 cb->cb_color_slice = S_028C68_TILE_MAX(slice_tile_max);
3715 cb->cb_color_cmask_slice = iview->image->cmask.slice_tile_max;
3716
3717 cb->cb_color_attrib |= S_028C74_TILE_MODE_INDEX(tile_mode_index);
3718
3719 if (radv_image_has_fmask(iview->image)) {
3720 if (device->physical_device->rad_info.chip_class >= CIK)
3721 cb->cb_color_pitch |= S_028C64_FMASK_TILE_MAX(iview->image->fmask.pitch_in_pixels / 8 - 1);
3722 cb->cb_color_attrib |= S_028C74_FMASK_TILE_MODE_INDEX(iview->image->fmask.tile_mode_index);
3723 cb->cb_color_fmask_slice = S_028C88_TILE_MAX(iview->image->fmask.slice_tile_max);
3724 } else {
3725 /* This must be set for fast clear to work without FMASK. */
3726 if (device->physical_device->rad_info.chip_class >= CIK)
3727 cb->cb_color_pitch |= S_028C64_FMASK_TILE_MAX(pitch_tile_max);
3728 cb->cb_color_attrib |= S_028C74_FMASK_TILE_MODE_INDEX(tile_mode_index);
3729 cb->cb_color_fmask_slice = S_028C88_TILE_MAX(slice_tile_max);
3730 }
3731 }
3732
3733 /* CMASK variables */
3734 va = radv_buffer_get_va(iview->bo) + iview->image->offset;
3735 va += iview->image->cmask.offset;
3736 cb->cb_color_cmask = va >> 8;
3737
3738 va = radv_buffer_get_va(iview->bo) + iview->image->offset;
3739 va += iview->image->dcc_offset;
3740 cb->cb_dcc_base = va >> 8;
3741 cb->cb_dcc_base |= iview->image->surface.tile_swizzle;
3742
3743 uint32_t max_slice = radv_surface_max_layer_count(iview) - 1;
3744 cb->cb_color_view = S_028C6C_SLICE_START(iview->base_layer) |
3745 S_028C6C_SLICE_MAX(max_slice);
3746
3747 if (iview->image->info.samples > 1) {
3748 unsigned log_samples = util_logbase2(iview->image->info.samples);
3749
3750 cb->cb_color_attrib |= S_028C74_NUM_SAMPLES(log_samples) |
3751 S_028C74_NUM_FRAGMENTS(log_samples);
3752 }
3753
3754 if (radv_image_has_fmask(iview->image)) {
3755 va = radv_buffer_get_va(iview->bo) + iview->image->offset + iview->image->fmask.offset;
3756 cb->cb_color_fmask = va >> 8;
3757 cb->cb_color_fmask |= iview->image->fmask.tile_swizzle;
3758 } else {
3759 cb->cb_color_fmask = cb->cb_color_base;
3760 }
3761
3762 ntype = radv_translate_color_numformat(iview->vk_format,
3763 desc,
3764 vk_format_get_first_non_void_channel(iview->vk_format));
3765 format = radv_translate_colorformat(iview->vk_format);
3766 if (format == V_028C70_COLOR_INVALID || ntype == ~0u)
3767 radv_finishme("Illegal color\n");
3768 swap = radv_translate_colorswap(iview->vk_format, FALSE);
3769 endian = radv_colorformat_endian_swap(format);
3770
3771 /* blend clamp should be set for all NORM/SRGB types */
3772 if (ntype == V_028C70_NUMBER_UNORM ||
3773 ntype == V_028C70_NUMBER_SNORM ||
3774 ntype == V_028C70_NUMBER_SRGB)
3775 blend_clamp = 1;
3776
3777 /* set blend bypass according to docs if SINT/UINT or
3778 8/24 COLOR variants */
3779 if (ntype == V_028C70_NUMBER_UINT || ntype == V_028C70_NUMBER_SINT ||
3780 format == V_028C70_COLOR_8_24 || format == V_028C70_COLOR_24_8 ||
3781 format == V_028C70_COLOR_X24_8_32_FLOAT) {
3782 blend_clamp = 0;
3783 blend_bypass = 1;
3784 }
3785 #if 0
3786 if ((ntype == V_028C70_NUMBER_UINT || ntype == V_028C70_NUMBER_SINT) &&
3787 (format == V_028C70_COLOR_8 ||
3788 format == V_028C70_COLOR_8_8 ||
3789 format == V_028C70_COLOR_8_8_8_8))
3790 ->color_is_int8 = true;
3791 #endif
3792 cb->cb_color_info = S_028C70_FORMAT(format) |
3793 S_028C70_COMP_SWAP(swap) |
3794 S_028C70_BLEND_CLAMP(blend_clamp) |
3795 S_028C70_BLEND_BYPASS(blend_bypass) |
3796 S_028C70_SIMPLE_FLOAT(1) |
3797 S_028C70_ROUND_MODE(ntype != V_028C70_NUMBER_UNORM &&
3798 ntype != V_028C70_NUMBER_SNORM &&
3799 ntype != V_028C70_NUMBER_SRGB &&
3800 format != V_028C70_COLOR_8_24 &&
3801 format != V_028C70_COLOR_24_8) |
3802 S_028C70_NUMBER_TYPE(ntype) |
3803 S_028C70_ENDIAN(endian);
3804 if (radv_image_has_fmask(iview->image)) {
3805 cb->cb_color_info |= S_028C70_COMPRESSION(1);
3806 if (device->physical_device->rad_info.chip_class == SI) {
3807 unsigned fmask_bankh = util_logbase2(iview->image->fmask.bank_height);
3808 cb->cb_color_attrib |= S_028C74_FMASK_BANK_HEIGHT(fmask_bankh);
3809 }
3810 }
3811
3812 if (radv_image_has_cmask(iview->image) &&
3813 !(device->instance->debug_flags & RADV_DEBUG_NO_FAST_CLEARS))
3814 cb->cb_color_info |= S_028C70_FAST_CLEAR(1);
3815
3816 if (radv_dcc_enabled(iview->image, iview->base_mip))
3817 cb->cb_color_info |= S_028C70_DCC_ENABLE(1);
3818
3819 cb->cb_dcc_control = radv_init_dcc_control_reg(device, iview);
3820
3821 /* This must be set for fast clear to work without FMASK. */
3822 if (!radv_image_has_fmask(iview->image) &&
3823 device->physical_device->rad_info.chip_class == SI) {
3824 unsigned bankh = util_logbase2(iview->image->surface.u.legacy.bankh);
3825 cb->cb_color_attrib |= S_028C74_FMASK_BANK_HEIGHT(bankh);
3826 }
3827
3828 if (device->physical_device->rad_info.chip_class >= GFX9) {
3829 unsigned mip0_depth = iview->image->type == VK_IMAGE_TYPE_3D ?
3830 (iview->extent.depth - 1) : (iview->image->info.array_size - 1);
3831
3832 cb->cb_color_view |= S_028C6C_MIP_LEVEL(iview->base_mip);
3833 cb->cb_color_attrib |= S_028C74_MIP0_DEPTH(mip0_depth) |
3834 S_028C74_RESOURCE_TYPE(iview->image->surface.u.gfx9.resource_type);
3835 cb->cb_color_attrib2 = S_028C68_MIP0_WIDTH(iview->extent.width - 1) |
3836 S_028C68_MIP0_HEIGHT(iview->extent.height - 1) |
3837 S_028C68_MAX_MIP(iview->image->info.levels - 1);
3838 }
3839 }
3840
3841 static unsigned
3842 radv_calc_decompress_on_z_planes(struct radv_device *device,
3843 struct radv_image_view *iview)
3844 {
3845 unsigned max_zplanes = 0;
3846
3847 assert(radv_image_is_tc_compat_htile(iview->image));
3848
3849 if (device->physical_device->rad_info.chip_class >= GFX9) {
3850 /* Default value for 32-bit depth surfaces. */
3851 max_zplanes = 4;
3852
3853 if (iview->vk_format == VK_FORMAT_D16_UNORM &&
3854 iview->image->info.samples > 1)
3855 max_zplanes = 2;
3856
3857 max_zplanes = max_zplanes + 1;
3858 } else {
3859 if (iview->vk_format == VK_FORMAT_D16_UNORM) {
3860 /* Do not enable Z plane compression for 16-bit depth
3861 * surfaces because isn't supported on GFX8. Only
3862 * 32-bit depth surfaces are supported by the hardware.
3863 * This allows to maintain shader compatibility and to
3864 * reduce the number of depth decompressions.
3865 */
3866 max_zplanes = 1;
3867 } else {
3868 if (iview->image->info.samples <= 1)
3869 max_zplanes = 5;
3870 else if (iview->image->info.samples <= 4)
3871 max_zplanes = 3;
3872 else
3873 max_zplanes = 2;
3874 }
3875 }
3876
3877 return max_zplanes;
3878 }
3879
3880 static void
3881 radv_initialise_ds_surface(struct radv_device *device,
3882 struct radv_ds_buffer_info *ds,
3883 struct radv_image_view *iview)
3884 {
3885 unsigned level = iview->base_mip;
3886 unsigned format, stencil_format;
3887 uint64_t va, s_offs, z_offs;
3888 bool stencil_only = false;
3889 memset(ds, 0, sizeof(*ds));
3890 switch (iview->image->vk_format) {
3891 case VK_FORMAT_D24_UNORM_S8_UINT:
3892 case VK_FORMAT_X8_D24_UNORM_PACK32:
3893 ds->pa_su_poly_offset_db_fmt_cntl = S_028B78_POLY_OFFSET_NEG_NUM_DB_BITS(-24);
3894 ds->offset_scale = 2.0f;
3895 break;
3896 case VK_FORMAT_D16_UNORM:
3897 case VK_FORMAT_D16_UNORM_S8_UINT:
3898 ds->pa_su_poly_offset_db_fmt_cntl = S_028B78_POLY_OFFSET_NEG_NUM_DB_BITS(-16);
3899 ds->offset_scale = 4.0f;
3900 break;
3901 case VK_FORMAT_D32_SFLOAT:
3902 case VK_FORMAT_D32_SFLOAT_S8_UINT:
3903 ds->pa_su_poly_offset_db_fmt_cntl = S_028B78_POLY_OFFSET_NEG_NUM_DB_BITS(-23) |
3904 S_028B78_POLY_OFFSET_DB_IS_FLOAT_FMT(1);
3905 ds->offset_scale = 1.0f;
3906 break;
3907 case VK_FORMAT_S8_UINT:
3908 stencil_only = true;
3909 break;
3910 default:
3911 break;
3912 }
3913
3914 format = radv_translate_dbformat(iview->image->vk_format);
3915 stencil_format = iview->image->surface.has_stencil ?
3916 V_028044_STENCIL_8 : V_028044_STENCIL_INVALID;
3917
3918 uint32_t max_slice = radv_surface_max_layer_count(iview) - 1;
3919 ds->db_depth_view = S_028008_SLICE_START(iview->base_layer) |
3920 S_028008_SLICE_MAX(max_slice);
3921
3922 ds->db_htile_data_base = 0;
3923 ds->db_htile_surface = 0;
3924
3925 va = radv_buffer_get_va(iview->bo) + iview->image->offset;
3926 s_offs = z_offs = va;
3927
3928 if (device->physical_device->rad_info.chip_class >= GFX9) {
3929 assert(iview->image->surface.u.gfx9.surf_offset == 0);
3930 s_offs += iview->image->surface.u.gfx9.stencil_offset;
3931
3932 ds->db_z_info = S_028038_FORMAT(format) |
3933 S_028038_NUM_SAMPLES(util_logbase2(iview->image->info.samples)) |
3934 S_028038_SW_MODE(iview->image->surface.u.gfx9.surf.swizzle_mode) |
3935 S_028038_MAXMIP(iview->image->info.levels - 1);
3936 ds->db_stencil_info = S_02803C_FORMAT(stencil_format) |
3937 S_02803C_SW_MODE(iview->image->surface.u.gfx9.stencil.swizzle_mode);
3938
3939 ds->db_z_info2 = S_028068_EPITCH(iview->image->surface.u.gfx9.surf.epitch);
3940 ds->db_stencil_info2 = S_02806C_EPITCH(iview->image->surface.u.gfx9.stencil.epitch);
3941 ds->db_depth_view |= S_028008_MIPID(level);
3942
3943 ds->db_depth_size = S_02801C_X_MAX(iview->image->info.width - 1) |
3944 S_02801C_Y_MAX(iview->image->info.height - 1);
3945
3946 if (radv_htile_enabled(iview->image, level)) {
3947 ds->db_z_info |= S_028038_TILE_SURFACE_ENABLE(1);
3948
3949 if (radv_image_is_tc_compat_htile(iview->image)) {
3950 unsigned max_zplanes =
3951 radv_calc_decompress_on_z_planes(device, iview);
3952
3953 ds->db_z_info |= S_028038_DECOMPRESS_ON_N_ZPLANES(max_zplanes) |
3954 S_028038_ITERATE_FLUSH(1);
3955 ds->db_stencil_info |= S_02803C_ITERATE_FLUSH(1);
3956 }
3957
3958 if (!iview->image->surface.has_stencil)
3959 /* Use all of the htile_buffer for depth if there's no stencil. */
3960 ds->db_stencil_info |= S_02803C_TILE_STENCIL_DISABLE(1);
3961 va = radv_buffer_get_va(iview->bo) + iview->image->offset +
3962 iview->image->htile_offset;
3963 ds->db_htile_data_base = va >> 8;
3964 ds->db_htile_surface = S_028ABC_FULL_CACHE(1) |
3965 S_028ABC_PIPE_ALIGNED(iview->image->surface.u.gfx9.htile.pipe_aligned) |
3966 S_028ABC_RB_ALIGNED(iview->image->surface.u.gfx9.htile.rb_aligned);
3967 }
3968 } else {
3969 const struct legacy_surf_level *level_info = &iview->image->surface.u.legacy.level[level];
3970
3971 if (stencil_only)
3972 level_info = &iview->image->surface.u.legacy.stencil_level[level];
3973
3974 z_offs += iview->image->surface.u.legacy.level[level].offset;
3975 s_offs += iview->image->surface.u.legacy.stencil_level[level].offset;
3976
3977 ds->db_depth_info = S_02803C_ADDR5_SWIZZLE_MASK(!radv_image_is_tc_compat_htile(iview->image));
3978 ds->db_z_info = S_028040_FORMAT(format) | S_028040_ZRANGE_PRECISION(1);
3979 ds->db_stencil_info = S_028044_FORMAT(stencil_format);
3980
3981 if (iview->image->info.samples > 1)
3982 ds->db_z_info |= S_028040_NUM_SAMPLES(util_logbase2(iview->image->info.samples));
3983
3984 if (device->physical_device->rad_info.chip_class >= CIK) {
3985 struct radeon_info *info = &device->physical_device->rad_info;
3986 unsigned tiling_index = iview->image->surface.u.legacy.tiling_index[level];
3987 unsigned stencil_index = iview->image->surface.u.legacy.stencil_tiling_index[level];
3988 unsigned macro_index = iview->image->surface.u.legacy.macro_tile_index;
3989 unsigned tile_mode = info->si_tile_mode_array[tiling_index];
3990 unsigned stencil_tile_mode = info->si_tile_mode_array[stencil_index];
3991 unsigned macro_mode = info->cik_macrotile_mode_array[macro_index];
3992
3993 if (stencil_only)
3994 tile_mode = stencil_tile_mode;
3995
3996 ds->db_depth_info |=
3997 S_02803C_ARRAY_MODE(G_009910_ARRAY_MODE(tile_mode)) |
3998 S_02803C_PIPE_CONFIG(G_009910_PIPE_CONFIG(tile_mode)) |
3999 S_02803C_BANK_WIDTH(G_009990_BANK_WIDTH(macro_mode)) |
4000 S_02803C_BANK_HEIGHT(G_009990_BANK_HEIGHT(macro_mode)) |
4001 S_02803C_MACRO_TILE_ASPECT(G_009990_MACRO_TILE_ASPECT(macro_mode)) |
4002 S_02803C_NUM_BANKS(G_009990_NUM_BANKS(macro_mode));
4003 ds->db_z_info |= S_028040_TILE_SPLIT(G_009910_TILE_SPLIT(tile_mode));
4004 ds->db_stencil_info |= S_028044_TILE_SPLIT(G_009910_TILE_SPLIT(stencil_tile_mode));
4005 } else {
4006 unsigned tile_mode_index = si_tile_mode_index(iview->image, level, false);
4007 ds->db_z_info |= S_028040_TILE_MODE_INDEX(tile_mode_index);
4008 tile_mode_index = si_tile_mode_index(iview->image, level, true);
4009 ds->db_stencil_info |= S_028044_TILE_MODE_INDEX(tile_mode_index);
4010 if (stencil_only)
4011 ds->db_z_info |= S_028040_TILE_MODE_INDEX(tile_mode_index);
4012 }
4013
4014 ds->db_depth_size = S_028058_PITCH_TILE_MAX((level_info->nblk_x / 8) - 1) |
4015 S_028058_HEIGHT_TILE_MAX((level_info->nblk_y / 8) - 1);
4016 ds->db_depth_slice = S_02805C_SLICE_TILE_MAX((level_info->nblk_x * level_info->nblk_y) / 64 - 1);
4017
4018 if (radv_htile_enabled(iview->image, level)) {
4019 ds->db_z_info |= S_028040_TILE_SURFACE_ENABLE(1);
4020
4021 if (!iview->image->surface.has_stencil &&
4022 !radv_image_is_tc_compat_htile(iview->image))
4023 /* Use all of the htile_buffer for depth if there's no stencil. */
4024 ds->db_stencil_info |= S_028044_TILE_STENCIL_DISABLE(1);
4025
4026 va = radv_buffer_get_va(iview->bo) + iview->image->offset +
4027 iview->image->htile_offset;
4028 ds->db_htile_data_base = va >> 8;
4029 ds->db_htile_surface = S_028ABC_FULL_CACHE(1);
4030
4031 if (radv_image_is_tc_compat_htile(iview->image)) {
4032 unsigned max_zplanes =
4033 radv_calc_decompress_on_z_planes(device, iview);
4034
4035 ds->db_htile_surface |= S_028ABC_TC_COMPATIBLE(1);
4036 ds->db_z_info |= S_028040_DECOMPRESS_ON_N_ZPLANES(max_zplanes);
4037 }
4038 }
4039 }
4040
4041 ds->db_z_read_base = ds->db_z_write_base = z_offs >> 8;
4042 ds->db_stencil_read_base = ds->db_stencil_write_base = s_offs >> 8;
4043 }
4044
4045 VkResult radv_CreateFramebuffer(
4046 VkDevice _device,
4047 const VkFramebufferCreateInfo* pCreateInfo,
4048 const VkAllocationCallbacks* pAllocator,
4049 VkFramebuffer* pFramebuffer)
4050 {
4051 RADV_FROM_HANDLE(radv_device, device, _device);
4052 struct radv_framebuffer *framebuffer;
4053
4054 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO);
4055
4056 size_t size = sizeof(*framebuffer) +
4057 sizeof(struct radv_attachment_info) * pCreateInfo->attachmentCount;
4058 framebuffer = vk_alloc2(&device->alloc, pAllocator, size, 8,
4059 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
4060 if (framebuffer == NULL)
4061 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
4062
4063 framebuffer->attachment_count = pCreateInfo->attachmentCount;
4064 framebuffer->width = pCreateInfo->width;
4065 framebuffer->height = pCreateInfo->height;
4066 framebuffer->layers = pCreateInfo->layers;
4067 for (uint32_t i = 0; i < pCreateInfo->attachmentCount; i++) {
4068 VkImageView _iview = pCreateInfo->pAttachments[i];
4069 struct radv_image_view *iview = radv_image_view_from_handle(_iview);
4070 framebuffer->attachments[i].attachment = iview;
4071 if (iview->aspect_mask & VK_IMAGE_ASPECT_COLOR_BIT) {
4072 radv_initialise_color_surface(device, &framebuffer->attachments[i].cb, iview);
4073 } else if (iview->aspect_mask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) {
4074 radv_initialise_ds_surface(device, &framebuffer->attachments[i].ds, iview);
4075 }
4076 framebuffer->width = MIN2(framebuffer->width, iview->extent.width);
4077 framebuffer->height = MIN2(framebuffer->height, iview->extent.height);
4078 framebuffer->layers = MIN2(framebuffer->layers, radv_surface_max_layer_count(iview));
4079 }
4080
4081 *pFramebuffer = radv_framebuffer_to_handle(framebuffer);
4082 return VK_SUCCESS;
4083 }
4084
4085 void radv_DestroyFramebuffer(
4086 VkDevice _device,
4087 VkFramebuffer _fb,
4088 const VkAllocationCallbacks* pAllocator)
4089 {
4090 RADV_FROM_HANDLE(radv_device, device, _device);
4091 RADV_FROM_HANDLE(radv_framebuffer, fb, _fb);
4092
4093 if (!fb)
4094 return;
4095 vk_free2(&device->alloc, pAllocator, fb);
4096 }
4097
4098 static unsigned radv_tex_wrap(VkSamplerAddressMode address_mode)
4099 {
4100 switch (address_mode) {
4101 case VK_SAMPLER_ADDRESS_MODE_REPEAT:
4102 return V_008F30_SQ_TEX_WRAP;
4103 case VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT:
4104 return V_008F30_SQ_TEX_MIRROR;
4105 case VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE:
4106 return V_008F30_SQ_TEX_CLAMP_LAST_TEXEL;
4107 case VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER:
4108 return V_008F30_SQ_TEX_CLAMP_BORDER;
4109 case VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE:
4110 return V_008F30_SQ_TEX_MIRROR_ONCE_LAST_TEXEL;
4111 default:
4112 unreachable("illegal tex wrap mode");
4113 break;
4114 }
4115 }
4116
4117 static unsigned
4118 radv_tex_compare(VkCompareOp op)
4119 {
4120 switch (op) {
4121 case VK_COMPARE_OP_NEVER:
4122 return V_008F30_SQ_TEX_DEPTH_COMPARE_NEVER;
4123 case VK_COMPARE_OP_LESS:
4124 return V_008F30_SQ_TEX_DEPTH_COMPARE_LESS;
4125 case VK_COMPARE_OP_EQUAL:
4126 return V_008F30_SQ_TEX_DEPTH_COMPARE_EQUAL;
4127 case VK_COMPARE_OP_LESS_OR_EQUAL:
4128 return V_008F30_SQ_TEX_DEPTH_COMPARE_LESSEQUAL;
4129 case VK_COMPARE_OP_GREATER:
4130 return V_008F30_SQ_TEX_DEPTH_COMPARE_GREATER;
4131 case VK_COMPARE_OP_NOT_EQUAL:
4132 return V_008F30_SQ_TEX_DEPTH_COMPARE_NOTEQUAL;
4133 case VK_COMPARE_OP_GREATER_OR_EQUAL:
4134 return V_008F30_SQ_TEX_DEPTH_COMPARE_GREATEREQUAL;
4135 case VK_COMPARE_OP_ALWAYS:
4136 return V_008F30_SQ_TEX_DEPTH_COMPARE_ALWAYS;
4137 default:
4138 unreachable("illegal compare mode");
4139 break;
4140 }
4141 }
4142
4143 static unsigned
4144 radv_tex_filter(VkFilter filter, unsigned max_ansio)
4145 {
4146 switch (filter) {
4147 case VK_FILTER_NEAREST:
4148 return (max_ansio > 1 ? V_008F38_SQ_TEX_XY_FILTER_ANISO_POINT :
4149 V_008F38_SQ_TEX_XY_FILTER_POINT);
4150 case VK_FILTER_LINEAR:
4151 return (max_ansio > 1 ? V_008F38_SQ_TEX_XY_FILTER_ANISO_BILINEAR :
4152 V_008F38_SQ_TEX_XY_FILTER_BILINEAR);
4153 case VK_FILTER_CUBIC_IMG:
4154 default:
4155 fprintf(stderr, "illegal texture filter");
4156 return 0;
4157 }
4158 }
4159
4160 static unsigned
4161 radv_tex_mipfilter(VkSamplerMipmapMode mode)
4162 {
4163 switch (mode) {
4164 case VK_SAMPLER_MIPMAP_MODE_NEAREST:
4165 return V_008F38_SQ_TEX_Z_FILTER_POINT;
4166 case VK_SAMPLER_MIPMAP_MODE_LINEAR:
4167 return V_008F38_SQ_TEX_Z_FILTER_LINEAR;
4168 default:
4169 return V_008F38_SQ_TEX_Z_FILTER_NONE;
4170 }
4171 }
4172
4173 static unsigned
4174 radv_tex_bordercolor(VkBorderColor bcolor)
4175 {
4176 switch (bcolor) {
4177 case VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK:
4178 case VK_BORDER_COLOR_INT_TRANSPARENT_BLACK:
4179 return V_008F3C_SQ_TEX_BORDER_COLOR_TRANS_BLACK;
4180 case VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK:
4181 case VK_BORDER_COLOR_INT_OPAQUE_BLACK:
4182 return V_008F3C_SQ_TEX_BORDER_COLOR_OPAQUE_BLACK;
4183 case VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE:
4184 case VK_BORDER_COLOR_INT_OPAQUE_WHITE:
4185 return V_008F3C_SQ_TEX_BORDER_COLOR_OPAQUE_WHITE;
4186 default:
4187 break;
4188 }
4189 return 0;
4190 }
4191
4192 static unsigned
4193 radv_tex_aniso_filter(unsigned filter)
4194 {
4195 if (filter < 2)
4196 return 0;
4197 if (filter < 4)
4198 return 1;
4199 if (filter < 8)
4200 return 2;
4201 if (filter < 16)
4202 return 3;
4203 return 4;
4204 }
4205
4206 static unsigned
4207 radv_tex_filter_mode(VkSamplerReductionModeEXT mode)
4208 {
4209 switch (mode) {
4210 case VK_SAMPLER_REDUCTION_MODE_WEIGHTED_AVERAGE_EXT:
4211 return SQ_IMG_FILTER_MODE_BLEND;
4212 case VK_SAMPLER_REDUCTION_MODE_MIN_EXT:
4213 return SQ_IMG_FILTER_MODE_MIN;
4214 case VK_SAMPLER_REDUCTION_MODE_MAX_EXT:
4215 return SQ_IMG_FILTER_MODE_MAX;
4216 default:
4217 break;
4218 }
4219 return 0;
4220 }
4221
4222 static void
4223 radv_init_sampler(struct radv_device *device,
4224 struct radv_sampler *sampler,
4225 const VkSamplerCreateInfo *pCreateInfo)
4226 {
4227 uint32_t max_aniso = pCreateInfo->anisotropyEnable && pCreateInfo->maxAnisotropy > 1.0 ?
4228 (uint32_t) pCreateInfo->maxAnisotropy : 0;
4229 uint32_t max_aniso_ratio = radv_tex_aniso_filter(max_aniso);
4230 bool is_vi = (device->physical_device->rad_info.chip_class >= VI);
4231 unsigned filter_mode = SQ_IMG_FILTER_MODE_BLEND;
4232
4233 const struct VkSamplerReductionModeCreateInfoEXT *sampler_reduction =
4234 vk_find_struct_const(pCreateInfo->pNext,
4235 SAMPLER_REDUCTION_MODE_CREATE_INFO_EXT);
4236 if (sampler_reduction)
4237 filter_mode = radv_tex_filter_mode(sampler_reduction->reductionMode);
4238
4239 sampler->state[0] = (S_008F30_CLAMP_X(radv_tex_wrap(pCreateInfo->addressModeU)) |
4240 S_008F30_CLAMP_Y(radv_tex_wrap(pCreateInfo->addressModeV)) |
4241 S_008F30_CLAMP_Z(radv_tex_wrap(pCreateInfo->addressModeW)) |
4242 S_008F30_MAX_ANISO_RATIO(max_aniso_ratio) |
4243 S_008F30_DEPTH_COMPARE_FUNC(radv_tex_compare(pCreateInfo->compareOp)) |
4244 S_008F30_FORCE_UNNORMALIZED(pCreateInfo->unnormalizedCoordinates ? 1 : 0) |
4245 S_008F30_ANISO_THRESHOLD(max_aniso_ratio >> 1) |
4246 S_008F30_ANISO_BIAS(max_aniso_ratio) |
4247 S_008F30_DISABLE_CUBE_WRAP(0) |
4248 S_008F30_COMPAT_MODE(is_vi) |
4249 S_008F30_FILTER_MODE(filter_mode));
4250 sampler->state[1] = (S_008F34_MIN_LOD(S_FIXED(CLAMP(pCreateInfo->minLod, 0, 15), 8)) |
4251 S_008F34_MAX_LOD(S_FIXED(CLAMP(pCreateInfo->maxLod, 0, 15), 8)) |
4252 S_008F34_PERF_MIP(max_aniso_ratio ? max_aniso_ratio + 6 : 0));
4253 sampler->state[2] = (S_008F38_LOD_BIAS(S_FIXED(CLAMP(pCreateInfo->mipLodBias, -16, 16), 8)) |
4254 S_008F38_XY_MAG_FILTER(radv_tex_filter(pCreateInfo->magFilter, max_aniso)) |
4255 S_008F38_XY_MIN_FILTER(radv_tex_filter(pCreateInfo->minFilter, max_aniso)) |
4256 S_008F38_MIP_FILTER(radv_tex_mipfilter(pCreateInfo->mipmapMode)) |
4257 S_008F38_MIP_POINT_PRECLAMP(0) |
4258 S_008F38_DISABLE_LSB_CEIL(device->physical_device->rad_info.chip_class <= VI) |
4259 S_008F38_FILTER_PREC_FIX(1) |
4260 S_008F38_ANISO_OVERRIDE(is_vi));
4261 sampler->state[3] = (S_008F3C_BORDER_COLOR_PTR(0) |
4262 S_008F3C_BORDER_COLOR_TYPE(radv_tex_bordercolor(pCreateInfo->borderColor)));
4263 }
4264
4265 VkResult radv_CreateSampler(
4266 VkDevice _device,
4267 const VkSamplerCreateInfo* pCreateInfo,
4268 const VkAllocationCallbacks* pAllocator,
4269 VkSampler* pSampler)
4270 {
4271 RADV_FROM_HANDLE(radv_device, device, _device);
4272 struct radv_sampler *sampler;
4273
4274 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO);
4275
4276 sampler = vk_alloc2(&device->alloc, pAllocator, sizeof(*sampler), 8,
4277 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
4278 if (!sampler)
4279 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
4280
4281 radv_init_sampler(device, sampler, pCreateInfo);
4282 *pSampler = radv_sampler_to_handle(sampler);
4283
4284 return VK_SUCCESS;
4285 }
4286
4287 void radv_DestroySampler(
4288 VkDevice _device,
4289 VkSampler _sampler,
4290 const VkAllocationCallbacks* pAllocator)
4291 {
4292 RADV_FROM_HANDLE(radv_device, device, _device);
4293 RADV_FROM_HANDLE(radv_sampler, sampler, _sampler);
4294
4295 if (!sampler)
4296 return;
4297 vk_free2(&device->alloc, pAllocator, sampler);
4298 }
4299
4300 /* vk_icd.h does not declare this function, so we declare it here to
4301 * suppress Wmissing-prototypes.
4302 */
4303 PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
4304 vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion);
4305
4306 PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
4307 vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion)
4308 {
4309 /* For the full details on loader interface versioning, see
4310 * <https://github.com/KhronosGroup/Vulkan-LoaderAndValidationLayers/blob/master/loader/LoaderAndLayerInterface.md>.
4311 * What follows is a condensed summary, to help you navigate the large and
4312 * confusing official doc.
4313 *
4314 * - Loader interface v0 is incompatible with later versions. We don't
4315 * support it.
4316 *
4317 * - In loader interface v1:
4318 * - The first ICD entrypoint called by the loader is
4319 * vk_icdGetInstanceProcAddr(). The ICD must statically expose this
4320 * entrypoint.
4321 * - The ICD must statically expose no other Vulkan symbol unless it is
4322 * linked with -Bsymbolic.
4323 * - Each dispatchable Vulkan handle created by the ICD must be
4324 * a pointer to a struct whose first member is VK_LOADER_DATA. The
4325 * ICD must initialize VK_LOADER_DATA.loadMagic to ICD_LOADER_MAGIC.
4326 * - The loader implements vkCreate{PLATFORM}SurfaceKHR() and
4327 * vkDestroySurfaceKHR(). The ICD must be capable of working with
4328 * such loader-managed surfaces.
4329 *
4330 * - Loader interface v2 differs from v1 in:
4331 * - The first ICD entrypoint called by the loader is
4332 * vk_icdNegotiateLoaderICDInterfaceVersion(). The ICD must
4333 * statically expose this entrypoint.
4334 *
4335 * - Loader interface v3 differs from v2 in:
4336 * - The ICD must implement vkCreate{PLATFORM}SurfaceKHR(),
4337 * vkDestroySurfaceKHR(), and other API which uses VKSurfaceKHR,
4338 * because the loader no longer does so.
4339 */
4340 *pSupportedVersion = MIN2(*pSupportedVersion, 3u);
4341 return VK_SUCCESS;
4342 }
4343
4344 VkResult radv_GetMemoryFdKHR(VkDevice _device,
4345 const VkMemoryGetFdInfoKHR *pGetFdInfo,
4346 int *pFD)
4347 {
4348 RADV_FROM_HANDLE(radv_device, device, _device);
4349 RADV_FROM_HANDLE(radv_device_memory, memory, pGetFdInfo->memory);
4350
4351 assert(pGetFdInfo->sType == VK_STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR);
4352
4353 /* At the moment, we support only the below handle types. */
4354 assert(pGetFdInfo->handleType ==
4355 VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR ||
4356 pGetFdInfo->handleType ==
4357 VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
4358
4359 bool ret = radv_get_memory_fd(device, memory, pFD);
4360 if (ret == false)
4361 return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
4362 return VK_SUCCESS;
4363 }
4364
4365 VkResult radv_GetMemoryFdPropertiesKHR(VkDevice _device,
4366 VkExternalMemoryHandleTypeFlagBitsKHR handleType,
4367 int fd,
4368 VkMemoryFdPropertiesKHR *pMemoryFdProperties)
4369 {
4370 switch (handleType) {
4371 case VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT:
4372 pMemoryFdProperties->memoryTypeBits = (1 << RADV_MEM_TYPE_COUNT) - 1;
4373 return VK_SUCCESS;
4374
4375 default:
4376 /* The valid usage section for this function says:
4377 *
4378 * "handleType must not be one of the handle types defined as
4379 * opaque."
4380 *
4381 * So opaque handle types fall into the default "unsupported" case.
4382 */
4383 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR);
4384 }
4385 }
4386
4387 static VkResult radv_import_opaque_fd(struct radv_device *device,
4388 int fd,
4389 uint32_t *syncobj)
4390 {
4391 uint32_t syncobj_handle = 0;
4392 int ret = device->ws->import_syncobj(device->ws, fd, &syncobj_handle);
4393 if (ret != 0)
4394 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR);
4395
4396 if (*syncobj)
4397 device->ws->destroy_syncobj(device->ws, *syncobj);
4398
4399 *syncobj = syncobj_handle;
4400 close(fd);
4401
4402 return VK_SUCCESS;
4403 }
4404
4405 static VkResult radv_import_sync_fd(struct radv_device *device,
4406 int fd,
4407 uint32_t *syncobj)
4408 {
4409 /* If we create a syncobj we do it locally so that if we have an error, we don't
4410 * leave a syncobj in an undetermined state in the fence. */
4411 uint32_t syncobj_handle = *syncobj;
4412 if (!syncobj_handle) {
4413 int ret = device->ws->create_syncobj(device->ws, &syncobj_handle);
4414 if (ret) {
4415 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR);
4416 }
4417 }
4418
4419 if (fd == -1) {
4420 device->ws->signal_syncobj(device->ws, syncobj_handle);
4421 } else {
4422 int ret = device->ws->import_syncobj_from_sync_file(device->ws, syncobj_handle, fd);
4423 if (ret != 0)
4424 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR);
4425 }
4426
4427 *syncobj = syncobj_handle;
4428 if (fd != -1)
4429 close(fd);
4430
4431 return VK_SUCCESS;
4432 }
4433
4434 VkResult radv_ImportSemaphoreFdKHR(VkDevice _device,
4435 const VkImportSemaphoreFdInfoKHR *pImportSemaphoreFdInfo)
4436 {
4437 RADV_FROM_HANDLE(radv_device, device, _device);
4438 RADV_FROM_HANDLE(radv_semaphore, sem, pImportSemaphoreFdInfo->semaphore);
4439 uint32_t *syncobj_dst = NULL;
4440
4441 if (pImportSemaphoreFdInfo->flags & VK_SEMAPHORE_IMPORT_TEMPORARY_BIT_KHR) {
4442 syncobj_dst = &sem->temp_syncobj;
4443 } else {
4444 syncobj_dst = &sem->syncobj;
4445 }
4446
4447 switch(pImportSemaphoreFdInfo->handleType) {
4448 case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR:
4449 return radv_import_opaque_fd(device, pImportSemaphoreFdInfo->fd, syncobj_dst);
4450 case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT_KHR:
4451 return radv_import_sync_fd(device, pImportSemaphoreFdInfo->fd, syncobj_dst);
4452 default:
4453 unreachable("Unhandled semaphore handle type");
4454 }
4455 }
4456
4457 VkResult radv_GetSemaphoreFdKHR(VkDevice _device,
4458 const VkSemaphoreGetFdInfoKHR *pGetFdInfo,
4459 int *pFd)
4460 {
4461 RADV_FROM_HANDLE(radv_device, device, _device);
4462 RADV_FROM_HANDLE(radv_semaphore, sem, pGetFdInfo->semaphore);
4463 int ret;
4464 uint32_t syncobj_handle;
4465
4466 if (sem->temp_syncobj)
4467 syncobj_handle = sem->temp_syncobj;
4468 else
4469 syncobj_handle = sem->syncobj;
4470
4471 switch(pGetFdInfo->handleType) {
4472 case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR:
4473 ret = device->ws->export_syncobj(device->ws, syncobj_handle, pFd);
4474 break;
4475 case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT_KHR:
4476 ret = device->ws->export_syncobj_to_sync_file(device->ws, syncobj_handle, pFd);
4477 if (!ret) {
4478 if (sem->temp_syncobj) {
4479 close (sem->temp_syncobj);
4480 sem->temp_syncobj = 0;
4481 } else {
4482 device->ws->reset_syncobj(device->ws, syncobj_handle);
4483 }
4484 }
4485 break;
4486 default:
4487 unreachable("Unhandled semaphore handle type");
4488 }
4489
4490 if (ret)
4491 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR);
4492 return VK_SUCCESS;
4493 }
4494
4495 void radv_GetPhysicalDeviceExternalSemaphoreProperties(
4496 VkPhysicalDevice physicalDevice,
4497 const VkPhysicalDeviceExternalSemaphoreInfoKHR* pExternalSemaphoreInfo,
4498 VkExternalSemaphorePropertiesKHR* pExternalSemaphoreProperties)
4499 {
4500 RADV_FROM_HANDLE(radv_physical_device, pdevice, physicalDevice);
4501
4502 /* Require has_syncobj_wait_for_submit for the syncobj signal ioctl introduced at virtually the same time */
4503 if (pdevice->rad_info.has_syncobj_wait_for_submit &&
4504 (pExternalSemaphoreInfo->handleType == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR ||
4505 pExternalSemaphoreInfo->handleType == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT_KHR)) {
4506 pExternalSemaphoreProperties->exportFromImportedHandleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR | VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT_KHR;
4507 pExternalSemaphoreProperties->compatibleHandleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR | VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT_KHR;
4508 pExternalSemaphoreProperties->externalSemaphoreFeatures = VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT_KHR |
4509 VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT_KHR;
4510 } else if (pExternalSemaphoreInfo->handleType == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR) {
4511 pExternalSemaphoreProperties->exportFromImportedHandleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR;
4512 pExternalSemaphoreProperties->compatibleHandleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR;
4513 pExternalSemaphoreProperties->externalSemaphoreFeatures = VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT_KHR |
4514 VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT_KHR;
4515 } else {
4516 pExternalSemaphoreProperties->exportFromImportedHandleTypes = 0;
4517 pExternalSemaphoreProperties->compatibleHandleTypes = 0;
4518 pExternalSemaphoreProperties->externalSemaphoreFeatures = 0;
4519 }
4520 }
4521
4522 VkResult radv_ImportFenceFdKHR(VkDevice _device,
4523 const VkImportFenceFdInfoKHR *pImportFenceFdInfo)
4524 {
4525 RADV_FROM_HANDLE(radv_device, device, _device);
4526 RADV_FROM_HANDLE(radv_fence, fence, pImportFenceFdInfo->fence);
4527 uint32_t *syncobj_dst = NULL;
4528
4529
4530 if (pImportFenceFdInfo->flags & VK_FENCE_IMPORT_TEMPORARY_BIT_KHR) {
4531 syncobj_dst = &fence->temp_syncobj;
4532 } else {
4533 syncobj_dst = &fence->syncobj;
4534 }
4535
4536 switch(pImportFenceFdInfo->handleType) {
4537 case VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR:
4538 return radv_import_opaque_fd(device, pImportFenceFdInfo->fd, syncobj_dst);
4539 case VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT_KHR:
4540 return radv_import_sync_fd(device, pImportFenceFdInfo->fd, syncobj_dst);
4541 default:
4542 unreachable("Unhandled fence handle type");
4543 }
4544 }
4545
4546 VkResult radv_GetFenceFdKHR(VkDevice _device,
4547 const VkFenceGetFdInfoKHR *pGetFdInfo,
4548 int *pFd)
4549 {
4550 RADV_FROM_HANDLE(radv_device, device, _device);
4551 RADV_FROM_HANDLE(radv_fence, fence, pGetFdInfo->fence);
4552 int ret;
4553 uint32_t syncobj_handle;
4554
4555 if (fence->temp_syncobj)
4556 syncobj_handle = fence->temp_syncobj;
4557 else
4558 syncobj_handle = fence->syncobj;
4559
4560 switch(pGetFdInfo->handleType) {
4561 case VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR:
4562 ret = device->ws->export_syncobj(device->ws, syncobj_handle, pFd);
4563 break;
4564 case VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT_KHR:
4565 ret = device->ws->export_syncobj_to_sync_file(device->ws, syncobj_handle, pFd);
4566 if (!ret) {
4567 if (fence->temp_syncobj) {
4568 close (fence->temp_syncobj);
4569 fence->temp_syncobj = 0;
4570 } else {
4571 device->ws->reset_syncobj(device->ws, syncobj_handle);
4572 }
4573 }
4574 break;
4575 default:
4576 unreachable("Unhandled fence handle type");
4577 }
4578
4579 if (ret)
4580 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR);
4581 return VK_SUCCESS;
4582 }
4583
4584 void radv_GetPhysicalDeviceExternalFenceProperties(
4585 VkPhysicalDevice physicalDevice,
4586 const VkPhysicalDeviceExternalFenceInfoKHR* pExternalFenceInfo,
4587 VkExternalFencePropertiesKHR* pExternalFenceProperties)
4588 {
4589 RADV_FROM_HANDLE(radv_physical_device, pdevice, physicalDevice);
4590
4591 if (pdevice->rad_info.has_syncobj_wait_for_submit &&
4592 (pExternalFenceInfo->handleType == VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR ||
4593 pExternalFenceInfo->handleType == VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT_KHR)) {
4594 pExternalFenceProperties->exportFromImportedHandleTypes = VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR | VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT_KHR;
4595 pExternalFenceProperties->compatibleHandleTypes = VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR | VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT_KHR;
4596 pExternalFenceProperties->externalFenceFeatures = VK_EXTERNAL_FENCE_FEATURE_EXPORTABLE_BIT_KHR |
4597 VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT_KHR;
4598 } else {
4599 pExternalFenceProperties->exportFromImportedHandleTypes = 0;
4600 pExternalFenceProperties->compatibleHandleTypes = 0;
4601 pExternalFenceProperties->externalFenceFeatures = 0;
4602 }
4603 }
4604
4605 VkResult
4606 radv_CreateDebugReportCallbackEXT(VkInstance _instance,
4607 const VkDebugReportCallbackCreateInfoEXT* pCreateInfo,
4608 const VkAllocationCallbacks* pAllocator,
4609 VkDebugReportCallbackEXT* pCallback)
4610 {
4611 RADV_FROM_HANDLE(radv_instance, instance, _instance);
4612 return vk_create_debug_report_callback(&instance->debug_report_callbacks,
4613 pCreateInfo, pAllocator, &instance->alloc,
4614 pCallback);
4615 }
4616
4617 void
4618 radv_DestroyDebugReportCallbackEXT(VkInstance _instance,
4619 VkDebugReportCallbackEXT _callback,
4620 const VkAllocationCallbacks* pAllocator)
4621 {
4622 RADV_FROM_HANDLE(radv_instance, instance, _instance);
4623 vk_destroy_debug_report_callback(&instance->debug_report_callbacks,
4624 _callback, pAllocator, &instance->alloc);
4625 }
4626
4627 void
4628 radv_DebugReportMessageEXT(VkInstance _instance,
4629 VkDebugReportFlagsEXT flags,
4630 VkDebugReportObjectTypeEXT objectType,
4631 uint64_t object,
4632 size_t location,
4633 int32_t messageCode,
4634 const char* pLayerPrefix,
4635 const char* pMessage)
4636 {
4637 RADV_FROM_HANDLE(radv_instance, instance, _instance);
4638 vk_debug_report(&instance->debug_report_callbacks, flags, objectType,
4639 object, location, messageCode, pLayerPrefix, pMessage);
4640 }
4641
4642 void
4643 radv_GetDeviceGroupPeerMemoryFeatures(
4644 VkDevice device,
4645 uint32_t heapIndex,
4646 uint32_t localDeviceIndex,
4647 uint32_t remoteDeviceIndex,
4648 VkPeerMemoryFeatureFlags* pPeerMemoryFeatures)
4649 {
4650 assert(localDeviceIndex == remoteDeviceIndex);
4651
4652 *pPeerMemoryFeatures = VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT |
4653 VK_PEER_MEMORY_FEATURE_COPY_DST_BIT |
4654 VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT |
4655 VK_PEER_MEMORY_FEATURE_GENERIC_DST_BIT;
4656 }