radv: move RADV_TRACE_FILE functions to radv_debug.c
[mesa.git] / src / amd / vulkan / radv_device.c
1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 */
27
28 #include <stdbool.h>
29 #include <string.h>
30 #include <unistd.h>
31 #include <fcntl.h>
32 #include "radv_debug.h"
33 #include "radv_private.h"
34 #include "radv_cs.h"
35 #include "util/disk_cache.h"
36 #include "util/strtod.h"
37 #include "vk_util.h"
38 #include <xf86drm.h>
39 #include <amdgpu.h>
40 #include <amdgpu_drm.h>
41 #include "amdgpu_id.h"
42 #include "winsys/amdgpu/radv_amdgpu_winsys_public.h"
43 #include "ac_llvm_util.h"
44 #include "vk_format.h"
45 #include "sid.h"
46 #include "gfx9d.h"
47 #include "util/debug.h"
48
49 static int
50 radv_device_get_cache_uuid(enum radeon_family family, void *uuid)
51 {
52 uint32_t mesa_timestamp, llvm_timestamp;
53 uint16_t f = family;
54 memset(uuid, 0, VK_UUID_SIZE);
55 if (!disk_cache_get_function_timestamp(radv_device_get_cache_uuid, &mesa_timestamp) ||
56 !disk_cache_get_function_timestamp(LLVMInitializeAMDGPUTargetInfo, &llvm_timestamp))
57 return -1;
58
59 memcpy(uuid, &mesa_timestamp, 4);
60 memcpy((char*)uuid + 4, &llvm_timestamp, 4);
61 memcpy((char*)uuid + 8, &f, 2);
62 snprintf((char*)uuid + 10, VK_UUID_SIZE - 10, "radv");
63 return 0;
64 }
65
66 static void
67 radv_get_driver_uuid(void *uuid)
68 {
69 ac_compute_driver_uuid(uuid, VK_UUID_SIZE);
70 }
71
72 static void
73 radv_get_device_uuid(struct radeon_info *info, void *uuid)
74 {
75 ac_compute_device_uuid(info, uuid, VK_UUID_SIZE);
76 }
77
78 static const VkExtensionProperties instance_extensions[] = {
79 {
80 .extensionName = VK_KHR_SURFACE_EXTENSION_NAME,
81 .specVersion = 25,
82 },
83 #ifdef VK_USE_PLATFORM_XCB_KHR
84 {
85 .extensionName = VK_KHR_XCB_SURFACE_EXTENSION_NAME,
86 .specVersion = 6,
87 },
88 #endif
89 #ifdef VK_USE_PLATFORM_XLIB_KHR
90 {
91 .extensionName = VK_KHR_XLIB_SURFACE_EXTENSION_NAME,
92 .specVersion = 6,
93 },
94 #endif
95 #ifdef VK_USE_PLATFORM_WAYLAND_KHR
96 {
97 .extensionName = VK_KHR_WAYLAND_SURFACE_EXTENSION_NAME,
98 .specVersion = 6,
99 },
100 #endif
101 {
102 .extensionName = VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME,
103 .specVersion = 1,
104 },
105 {
106 .extensionName = VK_KHR_EXTERNAL_MEMORY_CAPABILITIES_EXTENSION_NAME,
107 .specVersion = 1,
108 },
109 {
110 .extensionName = VK_KHR_EXTERNAL_SEMAPHORE_CAPABILITIES_EXTENSION_NAME,
111 .specVersion = 1,
112 },
113 };
114
115 static const VkExtensionProperties common_device_extensions[] = {
116 {
117 .extensionName = VK_KHR_DESCRIPTOR_UPDATE_TEMPLATE_EXTENSION_NAME,
118 .specVersion = 1,
119 },
120 {
121 .extensionName = VK_KHR_INCREMENTAL_PRESENT_EXTENSION_NAME,
122 .specVersion = 1,
123 },
124 {
125 .extensionName = VK_KHR_MAINTENANCE1_EXTENSION_NAME,
126 .specVersion = 1,
127 },
128 {
129 .extensionName = VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME,
130 .specVersion = 1,
131 },
132 {
133 .extensionName = VK_KHR_SAMPLER_MIRROR_CLAMP_TO_EDGE_EXTENSION_NAME,
134 .specVersion = 1,
135 },
136 {
137 .extensionName = VK_KHR_SWAPCHAIN_EXTENSION_NAME,
138 .specVersion = 68,
139 },
140 {
141 .extensionName = VK_AMD_DRAW_INDIRECT_COUNT_EXTENSION_NAME,
142 .specVersion = 1,
143 },
144 {
145 .extensionName = VK_KHR_SHADER_DRAW_PARAMETERS_EXTENSION_NAME,
146 .specVersion = 1,
147 },
148 {
149 .extensionName = VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME,
150 .specVersion = 1,
151 },
152 {
153 .extensionName = VK_KHR_DEDICATED_ALLOCATION_EXTENSION_NAME,
154 .specVersion = 1,
155 },
156 {
157 .extensionName = VK_KHR_EXTERNAL_MEMORY_EXTENSION_NAME,
158 .specVersion = 1,
159 },
160 {
161 .extensionName = VK_KHR_EXTERNAL_MEMORY_FD_EXTENSION_NAME,
162 .specVersion = 1,
163 },
164 {
165 .extensionName = VK_KHR_STORAGE_BUFFER_STORAGE_CLASS_EXTENSION_NAME,
166 .specVersion = 1,
167 },
168 {
169 .extensionName = VK_KHR_VARIABLE_POINTERS_EXTENSION_NAME,
170 .specVersion = 1,
171 },
172 };
173 static const VkExtensionProperties ext_sema_device_extensions[] = {
174 {
175 .extensionName = VK_KHR_EXTERNAL_SEMAPHORE_EXTENSION_NAME,
176 .specVersion = 1,
177 },
178 {
179 .extensionName = VK_KHR_EXTERNAL_SEMAPHORE_FD_EXTENSION_NAME,
180 .specVersion = 1,
181 },
182 {
183 .extensionName = VK_KHX_MULTIVIEW_EXTENSION_NAME,
184 .specVersion = 1,
185 },
186 };
187
188 static VkResult
189 radv_extensions_register(struct radv_instance *instance,
190 struct radv_extensions *extensions,
191 const VkExtensionProperties *new_ext,
192 uint32_t num_ext)
193 {
194 size_t new_size;
195 VkExtensionProperties *new_ptr;
196
197 assert(new_ext && num_ext > 0);
198
199 if (!new_ext)
200 return VK_ERROR_INITIALIZATION_FAILED;
201
202 new_size = (extensions->num_ext + num_ext) * sizeof(VkExtensionProperties);
203 new_ptr = vk_realloc(&instance->alloc, extensions->ext_array,
204 new_size, 8, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
205
206 /* Old array continues to be valid, update nothing */
207 if (!new_ptr)
208 return VK_ERROR_OUT_OF_HOST_MEMORY;
209
210 memcpy(&new_ptr[extensions->num_ext], new_ext,
211 num_ext * sizeof(VkExtensionProperties));
212 extensions->ext_array = new_ptr;
213 extensions->num_ext += num_ext;
214
215 return VK_SUCCESS;
216 }
217
218 static void
219 radv_extensions_finish(struct radv_instance *instance,
220 struct radv_extensions *extensions)
221 {
222 assert(extensions);
223
224 if (!extensions)
225 radv_loge("Attemted to free invalid extension struct\n");
226
227 if (extensions->ext_array)
228 vk_free(&instance->alloc, extensions->ext_array);
229 }
230
231 static bool
232 is_extension_enabled(const VkExtensionProperties *extensions,
233 size_t num_ext,
234 const char *name)
235 {
236 assert(extensions && name);
237
238 for (uint32_t i = 0; i < num_ext; i++) {
239 if (strcmp(name, extensions[i].extensionName) == 0)
240 return true;
241 }
242
243 return false;
244 }
245
246 static const char *
247 get_chip_name(enum radeon_family family)
248 {
249 switch (family) {
250 case CHIP_TAHITI: return "AMD RADV TAHITI";
251 case CHIP_PITCAIRN: return "AMD RADV PITCAIRN";
252 case CHIP_VERDE: return "AMD RADV CAPE VERDE";
253 case CHIP_OLAND: return "AMD RADV OLAND";
254 case CHIP_HAINAN: return "AMD RADV HAINAN";
255 case CHIP_BONAIRE: return "AMD RADV BONAIRE";
256 case CHIP_KAVERI: return "AMD RADV KAVERI";
257 case CHIP_KABINI: return "AMD RADV KABINI";
258 case CHIP_HAWAII: return "AMD RADV HAWAII";
259 case CHIP_MULLINS: return "AMD RADV MULLINS";
260 case CHIP_TONGA: return "AMD RADV TONGA";
261 case CHIP_ICELAND: return "AMD RADV ICELAND";
262 case CHIP_CARRIZO: return "AMD RADV CARRIZO";
263 case CHIP_FIJI: return "AMD RADV FIJI";
264 case CHIP_POLARIS10: return "AMD RADV POLARIS10";
265 case CHIP_POLARIS11: return "AMD RADV POLARIS11";
266 case CHIP_POLARIS12: return "AMD RADV POLARIS12";
267 case CHIP_STONEY: return "AMD RADV STONEY";
268 case CHIP_VEGA10: return "AMD RADV VEGA";
269 case CHIP_RAVEN: return "AMD RADV RAVEN";
270 default: return "AMD RADV unknown";
271 }
272 }
273
274 static VkResult
275 radv_physical_device_init(struct radv_physical_device *device,
276 struct radv_instance *instance,
277 drmDevicePtr drm_device)
278 {
279 const char *path = drm_device->nodes[DRM_NODE_RENDER];
280 VkResult result;
281 drmVersionPtr version;
282 int fd;
283
284 fd = open(path, O_RDWR | O_CLOEXEC);
285 if (fd < 0)
286 return VK_ERROR_INCOMPATIBLE_DRIVER;
287
288 version = drmGetVersion(fd);
289 if (!version) {
290 close(fd);
291 return vk_errorf(VK_ERROR_INCOMPATIBLE_DRIVER,
292 "failed to get version %s: %m", path);
293 }
294
295 if (strcmp(version->name, "amdgpu")) {
296 drmFreeVersion(version);
297 close(fd);
298 return VK_ERROR_INCOMPATIBLE_DRIVER;
299 }
300 drmFreeVersion(version);
301
302 device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
303 device->instance = instance;
304 assert(strlen(path) < ARRAY_SIZE(device->path));
305 strncpy(device->path, path, ARRAY_SIZE(device->path));
306
307 device->ws = radv_amdgpu_winsys_create(fd, instance->debug_flags,
308 instance->perftest_flags);
309 if (!device->ws) {
310 result = VK_ERROR_INCOMPATIBLE_DRIVER;
311 goto fail;
312 }
313
314 device->local_fd = fd;
315 device->ws->query_info(device->ws, &device->rad_info);
316 result = radv_init_wsi(device);
317 if (result != VK_SUCCESS) {
318 device->ws->destroy(device->ws);
319 goto fail;
320 }
321
322 if (radv_device_get_cache_uuid(device->rad_info.family, device->cache_uuid)) {
323 radv_finish_wsi(device);
324 device->ws->destroy(device->ws);
325 result = vk_errorf(VK_ERROR_INITIALIZATION_FAILED,
326 "cannot generate UUID");
327 goto fail;
328 }
329
330 result = radv_extensions_register(instance,
331 &device->extensions,
332 common_device_extensions,
333 ARRAY_SIZE(common_device_extensions));
334 if (result != VK_SUCCESS)
335 goto fail;
336
337 if (device->rad_info.has_syncobj) {
338 result = radv_extensions_register(instance,
339 &device->extensions,
340 ext_sema_device_extensions,
341 ARRAY_SIZE(ext_sema_device_extensions));
342 if (result != VK_SUCCESS)
343 goto fail;
344 }
345
346 fprintf(stderr, "WARNING: radv is not a conformant vulkan implementation, testing use only.\n");
347 device->name = get_chip_name(device->rad_info.family);
348
349 radv_get_driver_uuid(&device->device_uuid);
350 radv_get_device_uuid(&device->rad_info, &device->device_uuid);
351
352 if (device->rad_info.family == CHIP_STONEY ||
353 device->rad_info.chip_class >= GFX9) {
354 device->has_rbplus = true;
355 device->rbplus_allowed = device->rad_info.family == CHIP_STONEY;
356 }
357
358 return VK_SUCCESS;
359
360 fail:
361 close(fd);
362 return result;
363 }
364
365 static void
366 radv_physical_device_finish(struct radv_physical_device *device)
367 {
368 radv_extensions_finish(device->instance, &device->extensions);
369 radv_finish_wsi(device);
370 device->ws->destroy(device->ws);
371 close(device->local_fd);
372 }
373
374 static void *
375 default_alloc_func(void *pUserData, size_t size, size_t align,
376 VkSystemAllocationScope allocationScope)
377 {
378 return malloc(size);
379 }
380
381 static void *
382 default_realloc_func(void *pUserData, void *pOriginal, size_t size,
383 size_t align, VkSystemAllocationScope allocationScope)
384 {
385 return realloc(pOriginal, size);
386 }
387
388 static void
389 default_free_func(void *pUserData, void *pMemory)
390 {
391 free(pMemory);
392 }
393
394 static const VkAllocationCallbacks default_alloc = {
395 .pUserData = NULL,
396 .pfnAllocation = default_alloc_func,
397 .pfnReallocation = default_realloc_func,
398 .pfnFree = default_free_func,
399 };
400
401 static const struct debug_control radv_debug_options[] = {
402 {"nofastclears", RADV_DEBUG_NO_FAST_CLEARS},
403 {"nodcc", RADV_DEBUG_NO_DCC},
404 {"shaders", RADV_DEBUG_DUMP_SHADERS},
405 {"nocache", RADV_DEBUG_NO_CACHE},
406 {"shaderstats", RADV_DEBUG_DUMP_SHADER_STATS},
407 {"nohiz", RADV_DEBUG_NO_HIZ},
408 {"nocompute", RADV_DEBUG_NO_COMPUTE_QUEUE},
409 {"unsafemath", RADV_DEBUG_UNSAFE_MATH},
410 {"allbos", RADV_DEBUG_ALL_BOS},
411 {"noibs", RADV_DEBUG_NO_IBS},
412 {NULL, 0}
413 };
414
415 static const struct debug_control radv_perftest_options[] = {
416 {"batchchain", RADV_PERFTEST_BATCHCHAIN},
417 {"sisched", RADV_PERFTEST_SISCHED},
418 {NULL, 0}
419 };
420
421 VkResult radv_CreateInstance(
422 const VkInstanceCreateInfo* pCreateInfo,
423 const VkAllocationCallbacks* pAllocator,
424 VkInstance* pInstance)
425 {
426 struct radv_instance *instance;
427
428 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO);
429
430 uint32_t client_version;
431 if (pCreateInfo->pApplicationInfo &&
432 pCreateInfo->pApplicationInfo->apiVersion != 0) {
433 client_version = pCreateInfo->pApplicationInfo->apiVersion;
434 } else {
435 client_version = VK_MAKE_VERSION(1, 0, 0);
436 }
437
438 if (VK_MAKE_VERSION(1, 0, 0) > client_version ||
439 client_version > VK_MAKE_VERSION(1, 0, 0xfff)) {
440 return vk_errorf(VK_ERROR_INCOMPATIBLE_DRIVER,
441 "Client requested version %d.%d.%d",
442 VK_VERSION_MAJOR(client_version),
443 VK_VERSION_MINOR(client_version),
444 VK_VERSION_PATCH(client_version));
445 }
446
447 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
448 if (!is_extension_enabled(instance_extensions,
449 ARRAY_SIZE(instance_extensions),
450 pCreateInfo->ppEnabledExtensionNames[i]))
451 return vk_error(VK_ERROR_EXTENSION_NOT_PRESENT);
452 }
453
454 instance = vk_alloc2(&default_alloc, pAllocator, sizeof(*instance), 8,
455 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
456 if (!instance)
457 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
458
459 memset(instance, 0, sizeof(*instance));
460
461 instance->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
462
463 if (pAllocator)
464 instance->alloc = *pAllocator;
465 else
466 instance->alloc = default_alloc;
467
468 instance->apiVersion = client_version;
469 instance->physicalDeviceCount = -1;
470
471 _mesa_locale_init();
472
473 VG(VALGRIND_CREATE_MEMPOOL(instance, 0, false));
474
475 instance->debug_flags = parse_debug_string(getenv("RADV_DEBUG"),
476 radv_debug_options);
477
478 instance->perftest_flags = parse_debug_string(getenv("RADV_PERFTEST"),
479 radv_perftest_options);
480
481 *pInstance = radv_instance_to_handle(instance);
482
483 return VK_SUCCESS;
484 }
485
486 void radv_DestroyInstance(
487 VkInstance _instance,
488 const VkAllocationCallbacks* pAllocator)
489 {
490 RADV_FROM_HANDLE(radv_instance, instance, _instance);
491
492 if (!instance)
493 return;
494
495 for (int i = 0; i < instance->physicalDeviceCount; ++i) {
496 radv_physical_device_finish(instance->physicalDevices + i);
497 }
498
499 VG(VALGRIND_DESTROY_MEMPOOL(instance));
500
501 _mesa_locale_fini();
502
503 vk_free(&instance->alloc, instance);
504 }
505
506 static VkResult
507 radv_enumerate_devices(struct radv_instance *instance)
508 {
509 /* TODO: Check for more devices ? */
510 drmDevicePtr devices[8];
511 VkResult result = VK_ERROR_INCOMPATIBLE_DRIVER;
512 int max_devices;
513
514 instance->physicalDeviceCount = 0;
515
516 max_devices = drmGetDevices2(0, devices, ARRAY_SIZE(devices));
517 if (max_devices < 1)
518 return VK_ERROR_INCOMPATIBLE_DRIVER;
519
520 for (unsigned i = 0; i < (unsigned)max_devices; i++) {
521 if (devices[i]->available_nodes & 1 << DRM_NODE_RENDER &&
522 devices[i]->bustype == DRM_BUS_PCI &&
523 devices[i]->deviceinfo.pci->vendor_id == 0x1002) {
524
525 result = radv_physical_device_init(instance->physicalDevices +
526 instance->physicalDeviceCount,
527 instance,
528 devices[i]);
529 if (result == VK_SUCCESS)
530 ++instance->physicalDeviceCount;
531 else if (result != VK_ERROR_INCOMPATIBLE_DRIVER)
532 break;
533 }
534 }
535 drmFreeDevices(devices, max_devices);
536
537 return result;
538 }
539
540 VkResult radv_EnumeratePhysicalDevices(
541 VkInstance _instance,
542 uint32_t* pPhysicalDeviceCount,
543 VkPhysicalDevice* pPhysicalDevices)
544 {
545 RADV_FROM_HANDLE(radv_instance, instance, _instance);
546 VkResult result;
547
548 if (instance->physicalDeviceCount < 0) {
549 result = radv_enumerate_devices(instance);
550 if (result != VK_SUCCESS &&
551 result != VK_ERROR_INCOMPATIBLE_DRIVER)
552 return result;
553 }
554
555 if (!pPhysicalDevices) {
556 *pPhysicalDeviceCount = instance->physicalDeviceCount;
557 } else {
558 *pPhysicalDeviceCount = MIN2(*pPhysicalDeviceCount, instance->physicalDeviceCount);
559 for (unsigned i = 0; i < *pPhysicalDeviceCount; ++i)
560 pPhysicalDevices[i] = radv_physical_device_to_handle(instance->physicalDevices + i);
561 }
562
563 return *pPhysicalDeviceCount < instance->physicalDeviceCount ? VK_INCOMPLETE
564 : VK_SUCCESS;
565 }
566
567 void radv_GetPhysicalDeviceFeatures(
568 VkPhysicalDevice physicalDevice,
569 VkPhysicalDeviceFeatures* pFeatures)
570 {
571 RADV_FROM_HANDLE(radv_physical_device, pdevice, physicalDevice);
572 bool is_gfx9 = pdevice->rad_info.chip_class >= GFX9;
573 memset(pFeatures, 0, sizeof(*pFeatures));
574
575 *pFeatures = (VkPhysicalDeviceFeatures) {
576 .robustBufferAccess = true,
577 .fullDrawIndexUint32 = true,
578 .imageCubeArray = true,
579 .independentBlend = true,
580 .geometryShader = !is_gfx9,
581 .tessellationShader = !is_gfx9,
582 .sampleRateShading = true,
583 .dualSrcBlend = true,
584 .logicOp = true,
585 .multiDrawIndirect = true,
586 .drawIndirectFirstInstance = true,
587 .depthClamp = true,
588 .depthBiasClamp = true,
589 .fillModeNonSolid = true,
590 .depthBounds = true,
591 .wideLines = true,
592 .largePoints = true,
593 .alphaToOne = true,
594 .multiViewport = true,
595 .samplerAnisotropy = true,
596 .textureCompressionETC2 = false,
597 .textureCompressionASTC_LDR = false,
598 .textureCompressionBC = true,
599 .occlusionQueryPrecise = true,
600 .pipelineStatisticsQuery = true,
601 .vertexPipelineStoresAndAtomics = true,
602 .fragmentStoresAndAtomics = true,
603 .shaderTessellationAndGeometryPointSize = true,
604 .shaderImageGatherExtended = true,
605 .shaderStorageImageExtendedFormats = true,
606 .shaderStorageImageMultisample = false,
607 .shaderUniformBufferArrayDynamicIndexing = true,
608 .shaderSampledImageArrayDynamicIndexing = true,
609 .shaderStorageBufferArrayDynamicIndexing = true,
610 .shaderStorageImageArrayDynamicIndexing = true,
611 .shaderStorageImageReadWithoutFormat = true,
612 .shaderStorageImageWriteWithoutFormat = true,
613 .shaderClipDistance = true,
614 .shaderCullDistance = true,
615 .shaderFloat64 = true,
616 .shaderInt64 = true,
617 .shaderInt16 = false,
618 .sparseBinding = true,
619 .variableMultisampleRate = true,
620 .inheritedQueries = true,
621 };
622 }
623
624 void radv_GetPhysicalDeviceFeatures2KHR(
625 VkPhysicalDevice physicalDevice,
626 VkPhysicalDeviceFeatures2KHR *pFeatures)
627 {
628 vk_foreach_struct(ext, pFeatures->pNext) {
629 switch (ext->sType) {
630 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTER_FEATURES_KHR: {
631 VkPhysicalDeviceVariablePointerFeaturesKHR *features = (void *)ext;
632 features->variablePointersStorageBuffer = true;
633 features->variablePointers = false;
634 break;
635 }
636 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES_KHX: {
637 VkPhysicalDeviceMultiviewFeaturesKHX *features = (VkPhysicalDeviceMultiviewFeaturesKHX*)ext;
638 features->multiview = true;
639 features->multiviewGeometryShader = true;
640 features->multiviewTessellationShader = true;
641 break;
642 }
643 default:
644 break;
645 }
646 }
647 return radv_GetPhysicalDeviceFeatures(physicalDevice, &pFeatures->features);
648 }
649
650 void radv_GetPhysicalDeviceProperties(
651 VkPhysicalDevice physicalDevice,
652 VkPhysicalDeviceProperties* pProperties)
653 {
654 RADV_FROM_HANDLE(radv_physical_device, pdevice, physicalDevice);
655 VkSampleCountFlags sample_counts = 0xf;
656
657 /* make sure that the entire descriptor set is addressable with a signed
658 * 32-bit int. So the sum of all limits scaled by descriptor size has to
659 * be at most 2 GiB. the combined image & samples object count as one of
660 * both. This limit is for the pipeline layout, not for the set layout, but
661 * there is no set limit, so we just set a pipeline limit. I don't think
662 * any app is going to hit this soon. */
663 size_t max_descriptor_set_size = ((1ull << 31) - 16 * MAX_DYNAMIC_BUFFERS) /
664 (32 /* uniform buffer, 32 due to potential space wasted on alignement */ +
665 32 /* storage buffer, 32 due to potential space wasted on alignement */ +
666 32 /* sampler, largest when combined with image */ +
667 64 /* sampled image */ +
668 64 /* storage image */);
669
670 VkPhysicalDeviceLimits limits = {
671 .maxImageDimension1D = (1 << 14),
672 .maxImageDimension2D = (1 << 14),
673 .maxImageDimension3D = (1 << 11),
674 .maxImageDimensionCube = (1 << 14),
675 .maxImageArrayLayers = (1 << 11),
676 .maxTexelBufferElements = 128 * 1024 * 1024,
677 .maxUniformBufferRange = UINT32_MAX,
678 .maxStorageBufferRange = UINT32_MAX,
679 .maxPushConstantsSize = MAX_PUSH_CONSTANTS_SIZE,
680 .maxMemoryAllocationCount = UINT32_MAX,
681 .maxSamplerAllocationCount = 64 * 1024,
682 .bufferImageGranularity = 64, /* A cache line */
683 .sparseAddressSpaceSize = 0xffffffffu, /* buffer max size */
684 .maxBoundDescriptorSets = MAX_SETS,
685 .maxPerStageDescriptorSamplers = max_descriptor_set_size,
686 .maxPerStageDescriptorUniformBuffers = max_descriptor_set_size,
687 .maxPerStageDescriptorStorageBuffers = max_descriptor_set_size,
688 .maxPerStageDescriptorSampledImages = max_descriptor_set_size,
689 .maxPerStageDescriptorStorageImages = max_descriptor_set_size,
690 .maxPerStageDescriptorInputAttachments = max_descriptor_set_size,
691 .maxPerStageResources = max_descriptor_set_size,
692 .maxDescriptorSetSamplers = max_descriptor_set_size,
693 .maxDescriptorSetUniformBuffers = max_descriptor_set_size,
694 .maxDescriptorSetUniformBuffersDynamic = MAX_DYNAMIC_BUFFERS / 2,
695 .maxDescriptorSetStorageBuffers = max_descriptor_set_size,
696 .maxDescriptorSetStorageBuffersDynamic = MAX_DYNAMIC_BUFFERS / 2,
697 .maxDescriptorSetSampledImages = max_descriptor_set_size,
698 .maxDescriptorSetStorageImages = max_descriptor_set_size,
699 .maxDescriptorSetInputAttachments = max_descriptor_set_size,
700 .maxVertexInputAttributes = 32,
701 .maxVertexInputBindings = 32,
702 .maxVertexInputAttributeOffset = 2047,
703 .maxVertexInputBindingStride = 2048,
704 .maxVertexOutputComponents = 128,
705 .maxTessellationGenerationLevel = 64,
706 .maxTessellationPatchSize = 32,
707 .maxTessellationControlPerVertexInputComponents = 128,
708 .maxTessellationControlPerVertexOutputComponents = 128,
709 .maxTessellationControlPerPatchOutputComponents = 120,
710 .maxTessellationControlTotalOutputComponents = 4096,
711 .maxTessellationEvaluationInputComponents = 128,
712 .maxTessellationEvaluationOutputComponents = 128,
713 .maxGeometryShaderInvocations = 127,
714 .maxGeometryInputComponents = 64,
715 .maxGeometryOutputComponents = 128,
716 .maxGeometryOutputVertices = 256,
717 .maxGeometryTotalOutputComponents = 1024,
718 .maxFragmentInputComponents = 128,
719 .maxFragmentOutputAttachments = 8,
720 .maxFragmentDualSrcAttachments = 1,
721 .maxFragmentCombinedOutputResources = 8,
722 .maxComputeSharedMemorySize = 32768,
723 .maxComputeWorkGroupCount = { 65535, 65535, 65535 },
724 .maxComputeWorkGroupInvocations = 2048,
725 .maxComputeWorkGroupSize = {
726 2048,
727 2048,
728 2048
729 },
730 .subPixelPrecisionBits = 4 /* FIXME */,
731 .subTexelPrecisionBits = 4 /* FIXME */,
732 .mipmapPrecisionBits = 4 /* FIXME */,
733 .maxDrawIndexedIndexValue = UINT32_MAX,
734 .maxDrawIndirectCount = UINT32_MAX,
735 .maxSamplerLodBias = 16,
736 .maxSamplerAnisotropy = 16,
737 .maxViewports = MAX_VIEWPORTS,
738 .maxViewportDimensions = { (1 << 14), (1 << 14) },
739 .viewportBoundsRange = { INT16_MIN, INT16_MAX },
740 .viewportSubPixelBits = 13, /* We take a float? */
741 .minMemoryMapAlignment = 4096, /* A page */
742 .minTexelBufferOffsetAlignment = 1,
743 .minUniformBufferOffsetAlignment = 4,
744 .minStorageBufferOffsetAlignment = 4,
745 .minTexelOffset = -32,
746 .maxTexelOffset = 31,
747 .minTexelGatherOffset = -32,
748 .maxTexelGatherOffset = 31,
749 .minInterpolationOffset = -2,
750 .maxInterpolationOffset = 2,
751 .subPixelInterpolationOffsetBits = 8,
752 .maxFramebufferWidth = (1 << 14),
753 .maxFramebufferHeight = (1 << 14),
754 .maxFramebufferLayers = (1 << 10),
755 .framebufferColorSampleCounts = sample_counts,
756 .framebufferDepthSampleCounts = sample_counts,
757 .framebufferStencilSampleCounts = sample_counts,
758 .framebufferNoAttachmentsSampleCounts = sample_counts,
759 .maxColorAttachments = MAX_RTS,
760 .sampledImageColorSampleCounts = sample_counts,
761 .sampledImageIntegerSampleCounts = VK_SAMPLE_COUNT_1_BIT,
762 .sampledImageDepthSampleCounts = sample_counts,
763 .sampledImageStencilSampleCounts = sample_counts,
764 .storageImageSampleCounts = VK_SAMPLE_COUNT_1_BIT,
765 .maxSampleMaskWords = 1,
766 .timestampComputeAndGraphics = true,
767 .timestampPeriod = 1000000.0 / pdevice->rad_info.clock_crystal_freq,
768 .maxClipDistances = 8,
769 .maxCullDistances = 8,
770 .maxCombinedClipAndCullDistances = 8,
771 .discreteQueuePriorities = 1,
772 .pointSizeRange = { 0.125, 255.875 },
773 .lineWidthRange = { 0.0, 7.9921875 },
774 .pointSizeGranularity = (1.0 / 8.0),
775 .lineWidthGranularity = (1.0 / 128.0),
776 .strictLines = false, /* FINISHME */
777 .standardSampleLocations = true,
778 .optimalBufferCopyOffsetAlignment = 128,
779 .optimalBufferCopyRowPitchAlignment = 128,
780 .nonCoherentAtomSize = 64,
781 };
782
783 *pProperties = (VkPhysicalDeviceProperties) {
784 .apiVersion = VK_MAKE_VERSION(1, 0, 42),
785 .driverVersion = vk_get_driver_version(),
786 .vendorID = 0x1002,
787 .deviceID = pdevice->rad_info.pci_id,
788 .deviceType = pdevice->rad_info.has_dedicated_vram ? VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU : VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU,
789 .limits = limits,
790 .sparseProperties = {0},
791 };
792
793 strcpy(pProperties->deviceName, pdevice->name);
794 memcpy(pProperties->pipelineCacheUUID, pdevice->cache_uuid, VK_UUID_SIZE);
795 }
796
797 void radv_GetPhysicalDeviceProperties2KHR(
798 VkPhysicalDevice physicalDevice,
799 VkPhysicalDeviceProperties2KHR *pProperties)
800 {
801 RADV_FROM_HANDLE(radv_physical_device, pdevice, physicalDevice);
802 radv_GetPhysicalDeviceProperties(physicalDevice, &pProperties->properties);
803
804 vk_foreach_struct(ext, pProperties->pNext) {
805 switch (ext->sType) {
806 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR: {
807 VkPhysicalDevicePushDescriptorPropertiesKHR *properties =
808 (VkPhysicalDevicePushDescriptorPropertiesKHR *) ext;
809 properties->maxPushDescriptors = MAX_PUSH_DESCRIPTORS;
810 break;
811 }
812 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES_KHR: {
813 VkPhysicalDeviceIDPropertiesKHR *properties = (VkPhysicalDeviceIDPropertiesKHR*)ext;
814 memcpy(properties->driverUUID, pdevice->driver_uuid, VK_UUID_SIZE);
815 memcpy(properties->deviceUUID, pdevice->device_uuid, VK_UUID_SIZE);
816 properties->deviceLUIDValid = false;
817 break;
818 }
819 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES_KHX: {
820 VkPhysicalDeviceMultiviewPropertiesKHX *properties = (VkPhysicalDeviceMultiviewPropertiesKHX*)ext;
821 properties->maxMultiviewViewCount = MAX_VIEWS;
822 properties->maxMultiviewInstanceIndex = INT_MAX;
823 break;
824 }
825 default:
826 break;
827 }
828 }
829 }
830
831 static void radv_get_physical_device_queue_family_properties(
832 struct radv_physical_device* pdevice,
833 uint32_t* pCount,
834 VkQueueFamilyProperties** pQueueFamilyProperties)
835 {
836 int num_queue_families = 1;
837 int idx;
838 if (pdevice->rad_info.num_compute_rings > 0 &&
839 pdevice->rad_info.chip_class >= CIK &&
840 !(pdevice->instance->debug_flags & RADV_DEBUG_NO_COMPUTE_QUEUE))
841 num_queue_families++;
842
843 if (pQueueFamilyProperties == NULL) {
844 *pCount = num_queue_families;
845 return;
846 }
847
848 if (!*pCount)
849 return;
850
851 idx = 0;
852 if (*pCount >= 1) {
853 *pQueueFamilyProperties[idx] = (VkQueueFamilyProperties) {
854 .queueFlags = VK_QUEUE_GRAPHICS_BIT |
855 VK_QUEUE_COMPUTE_BIT |
856 VK_QUEUE_TRANSFER_BIT |
857 VK_QUEUE_SPARSE_BINDING_BIT,
858 .queueCount = 1,
859 .timestampValidBits = 64,
860 .minImageTransferGranularity = (VkExtent3D) { 1, 1, 1 },
861 };
862 idx++;
863 }
864
865 if (pdevice->rad_info.num_compute_rings > 0 &&
866 pdevice->rad_info.chip_class >= CIK &&
867 !(pdevice->instance->debug_flags & RADV_DEBUG_NO_COMPUTE_QUEUE)) {
868 if (*pCount > idx) {
869 *pQueueFamilyProperties[idx] = (VkQueueFamilyProperties) {
870 .queueFlags = VK_QUEUE_COMPUTE_BIT |
871 VK_QUEUE_TRANSFER_BIT |
872 VK_QUEUE_SPARSE_BINDING_BIT,
873 .queueCount = pdevice->rad_info.num_compute_rings,
874 .timestampValidBits = 64,
875 .minImageTransferGranularity = (VkExtent3D) { 1, 1, 1 },
876 };
877 idx++;
878 }
879 }
880 *pCount = idx;
881 }
882
883 void radv_GetPhysicalDeviceQueueFamilyProperties(
884 VkPhysicalDevice physicalDevice,
885 uint32_t* pCount,
886 VkQueueFamilyProperties* pQueueFamilyProperties)
887 {
888 RADV_FROM_HANDLE(radv_physical_device, pdevice, physicalDevice);
889 if (!pQueueFamilyProperties) {
890 return radv_get_physical_device_queue_family_properties(pdevice, pCount, NULL);
891 return;
892 }
893 VkQueueFamilyProperties *properties[] = {
894 pQueueFamilyProperties + 0,
895 pQueueFamilyProperties + 1,
896 pQueueFamilyProperties + 2,
897 };
898 radv_get_physical_device_queue_family_properties(pdevice, pCount, properties);
899 assert(*pCount <= 3);
900 }
901
902 void radv_GetPhysicalDeviceQueueFamilyProperties2KHR(
903 VkPhysicalDevice physicalDevice,
904 uint32_t* pCount,
905 VkQueueFamilyProperties2KHR *pQueueFamilyProperties)
906 {
907 RADV_FROM_HANDLE(radv_physical_device, pdevice, physicalDevice);
908 if (!pQueueFamilyProperties) {
909 return radv_get_physical_device_queue_family_properties(pdevice, pCount, NULL);
910 return;
911 }
912 VkQueueFamilyProperties *properties[] = {
913 &pQueueFamilyProperties[0].queueFamilyProperties,
914 &pQueueFamilyProperties[1].queueFamilyProperties,
915 &pQueueFamilyProperties[2].queueFamilyProperties,
916 };
917 radv_get_physical_device_queue_family_properties(pdevice, pCount, properties);
918 assert(*pCount <= 3);
919 }
920
921 void radv_GetPhysicalDeviceMemoryProperties(
922 VkPhysicalDevice physicalDevice,
923 VkPhysicalDeviceMemoryProperties *pMemoryProperties)
924 {
925 RADV_FROM_HANDLE(radv_physical_device, physical_device, physicalDevice);
926
927 STATIC_ASSERT(RADV_MEM_TYPE_COUNT <= VK_MAX_MEMORY_TYPES);
928
929 pMemoryProperties->memoryTypeCount = RADV_MEM_TYPE_COUNT;
930 pMemoryProperties->memoryTypes[RADV_MEM_TYPE_VRAM] = (VkMemoryType) {
931 .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT,
932 .heapIndex = RADV_MEM_HEAP_VRAM,
933 };
934 pMemoryProperties->memoryTypes[RADV_MEM_TYPE_GTT_WRITE_COMBINE] = (VkMemoryType) {
935 .propertyFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
936 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
937 .heapIndex = RADV_MEM_HEAP_GTT,
938 };
939 pMemoryProperties->memoryTypes[RADV_MEM_TYPE_VRAM_CPU_ACCESS] = (VkMemoryType) {
940 .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
941 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
942 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
943 .heapIndex = RADV_MEM_HEAP_VRAM_CPU_ACCESS,
944 };
945 pMemoryProperties->memoryTypes[RADV_MEM_TYPE_GTT_CACHED] = (VkMemoryType) {
946 .propertyFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
947 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT |
948 VK_MEMORY_PROPERTY_HOST_CACHED_BIT,
949 .heapIndex = RADV_MEM_HEAP_GTT,
950 };
951
952 STATIC_ASSERT(RADV_MEM_HEAP_COUNT <= VK_MAX_MEMORY_HEAPS);
953 uint64_t visible_vram_size = MIN2(physical_device->rad_info.vram_size,
954 physical_device->rad_info.vram_vis_size);
955
956 pMemoryProperties->memoryHeapCount = RADV_MEM_HEAP_COUNT;
957 pMemoryProperties->memoryHeaps[RADV_MEM_HEAP_VRAM] = (VkMemoryHeap) {
958 .size = physical_device->rad_info.vram_size -
959 visible_vram_size,
960 .flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT,
961 };
962 pMemoryProperties->memoryHeaps[RADV_MEM_HEAP_VRAM_CPU_ACCESS] = (VkMemoryHeap) {
963 .size = visible_vram_size,
964 .flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT,
965 };
966 pMemoryProperties->memoryHeaps[RADV_MEM_HEAP_GTT] = (VkMemoryHeap) {
967 .size = physical_device->rad_info.gart_size,
968 .flags = 0,
969 };
970 }
971
972 void radv_GetPhysicalDeviceMemoryProperties2KHR(
973 VkPhysicalDevice physicalDevice,
974 VkPhysicalDeviceMemoryProperties2KHR *pMemoryProperties)
975 {
976 return radv_GetPhysicalDeviceMemoryProperties(physicalDevice,
977 &pMemoryProperties->memoryProperties);
978 }
979
980 static int
981 radv_queue_init(struct radv_device *device, struct radv_queue *queue,
982 int queue_family_index, int idx)
983 {
984 queue->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
985 queue->device = device;
986 queue->queue_family_index = queue_family_index;
987 queue->queue_idx = idx;
988
989 queue->hw_ctx = device->ws->ctx_create(device->ws);
990 if (!queue->hw_ctx)
991 return VK_ERROR_OUT_OF_HOST_MEMORY;
992
993 return VK_SUCCESS;
994 }
995
996 static void
997 radv_queue_finish(struct radv_queue *queue)
998 {
999 if (queue->hw_ctx)
1000 queue->device->ws->ctx_destroy(queue->hw_ctx);
1001
1002 if (queue->initial_preamble_cs)
1003 queue->device->ws->cs_destroy(queue->initial_preamble_cs);
1004 if (queue->continue_preamble_cs)
1005 queue->device->ws->cs_destroy(queue->continue_preamble_cs);
1006 if (queue->descriptor_bo)
1007 queue->device->ws->buffer_destroy(queue->descriptor_bo);
1008 if (queue->scratch_bo)
1009 queue->device->ws->buffer_destroy(queue->scratch_bo);
1010 if (queue->esgs_ring_bo)
1011 queue->device->ws->buffer_destroy(queue->esgs_ring_bo);
1012 if (queue->gsvs_ring_bo)
1013 queue->device->ws->buffer_destroy(queue->gsvs_ring_bo);
1014 if (queue->tess_factor_ring_bo)
1015 queue->device->ws->buffer_destroy(queue->tess_factor_ring_bo);
1016 if (queue->tess_offchip_ring_bo)
1017 queue->device->ws->buffer_destroy(queue->tess_offchip_ring_bo);
1018 if (queue->compute_scratch_bo)
1019 queue->device->ws->buffer_destroy(queue->compute_scratch_bo);
1020 }
1021
1022 static void
1023 radv_device_init_gs_info(struct radv_device *device)
1024 {
1025 switch (device->physical_device->rad_info.family) {
1026 case CHIP_OLAND:
1027 case CHIP_HAINAN:
1028 case CHIP_KAVERI:
1029 case CHIP_KABINI:
1030 case CHIP_MULLINS:
1031 case CHIP_ICELAND:
1032 case CHIP_CARRIZO:
1033 case CHIP_STONEY:
1034 device->gs_table_depth = 16;
1035 return;
1036 case CHIP_TAHITI:
1037 case CHIP_PITCAIRN:
1038 case CHIP_VERDE:
1039 case CHIP_BONAIRE:
1040 case CHIP_HAWAII:
1041 case CHIP_TONGA:
1042 case CHIP_FIJI:
1043 case CHIP_POLARIS10:
1044 case CHIP_POLARIS11:
1045 case CHIP_POLARIS12:
1046 case CHIP_VEGA10:
1047 case CHIP_RAVEN:
1048 device->gs_table_depth = 32;
1049 return;
1050 default:
1051 unreachable("unknown GPU");
1052 }
1053 }
1054
1055 VkResult radv_CreateDevice(
1056 VkPhysicalDevice physicalDevice,
1057 const VkDeviceCreateInfo* pCreateInfo,
1058 const VkAllocationCallbacks* pAllocator,
1059 VkDevice* pDevice)
1060 {
1061 RADV_FROM_HANDLE(radv_physical_device, physical_device, physicalDevice);
1062 VkResult result;
1063 struct radv_device *device;
1064
1065 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
1066 if (!is_extension_enabled(physical_device->extensions.ext_array,
1067 physical_device->extensions.num_ext,
1068 pCreateInfo->ppEnabledExtensionNames[i]))
1069 return vk_error(VK_ERROR_EXTENSION_NOT_PRESENT);
1070 }
1071
1072 /* Check enabled features */
1073 if (pCreateInfo->pEnabledFeatures) {
1074 VkPhysicalDeviceFeatures supported_features;
1075 radv_GetPhysicalDeviceFeatures(physicalDevice, &supported_features);
1076 VkBool32 *supported_feature = (VkBool32 *)&supported_features;
1077 VkBool32 *enabled_feature = (VkBool32 *)pCreateInfo->pEnabledFeatures;
1078 unsigned num_features = sizeof(VkPhysicalDeviceFeatures) / sizeof(VkBool32);
1079 for (uint32_t i = 0; i < num_features; i++) {
1080 if (enabled_feature[i] && !supported_feature[i])
1081 return vk_error(VK_ERROR_FEATURE_NOT_PRESENT);
1082 }
1083 }
1084
1085 device = vk_alloc2(&physical_device->instance->alloc, pAllocator,
1086 sizeof(*device), 8,
1087 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
1088 if (!device)
1089 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1090
1091 memset(device, 0, sizeof(*device));
1092
1093 device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
1094 device->instance = physical_device->instance;
1095 device->physical_device = physical_device;
1096
1097 device->debug_flags = device->instance->debug_flags;
1098
1099 device->ws = physical_device->ws;
1100 if (pAllocator)
1101 device->alloc = *pAllocator;
1102 else
1103 device->alloc = physical_device->instance->alloc;
1104
1105 mtx_init(&device->shader_slab_mutex, mtx_plain);
1106 list_inithead(&device->shader_slabs);
1107
1108 for (unsigned i = 0; i < pCreateInfo->queueCreateInfoCount; i++) {
1109 const VkDeviceQueueCreateInfo *queue_create = &pCreateInfo->pQueueCreateInfos[i];
1110 uint32_t qfi = queue_create->queueFamilyIndex;
1111
1112 device->queues[qfi] = vk_alloc(&device->alloc,
1113 queue_create->queueCount * sizeof(struct radv_queue), 8, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
1114 if (!device->queues[qfi]) {
1115 result = VK_ERROR_OUT_OF_HOST_MEMORY;
1116 goto fail;
1117 }
1118
1119 memset(device->queues[qfi], 0, queue_create->queueCount * sizeof(struct radv_queue));
1120
1121 device->queue_count[qfi] = queue_create->queueCount;
1122
1123 for (unsigned q = 0; q < queue_create->queueCount; q++) {
1124 result = radv_queue_init(device, &device->queues[qfi][q], qfi, q);
1125 if (result != VK_SUCCESS)
1126 goto fail;
1127 }
1128 }
1129
1130 #if HAVE_LLVM < 0x0400
1131 device->llvm_supports_spill = false;
1132 #else
1133 device->llvm_supports_spill = true;
1134 #endif
1135
1136 /* The maximum number of scratch waves. Scratch space isn't divided
1137 * evenly between CUs. The number is only a function of the number of CUs.
1138 * We can decrease the constant to decrease the scratch buffer size.
1139 *
1140 * sctx->scratch_waves must be >= the maximum posible size of
1141 * 1 threadgroup, so that the hw doesn't hang from being unable
1142 * to start any.
1143 *
1144 * The recommended value is 4 per CU at most. Higher numbers don't
1145 * bring much benefit, but they still occupy chip resources (think
1146 * async compute). I've seen ~2% performance difference between 4 and 32.
1147 */
1148 uint32_t max_threads_per_block = 2048;
1149 device->scratch_waves = MAX2(32 * physical_device->rad_info.num_good_compute_units,
1150 max_threads_per_block / 64);
1151
1152 radv_device_init_gs_info(device);
1153
1154 device->tess_offchip_block_dw_size =
1155 device->physical_device->rad_info.family == CHIP_HAWAII ? 4096 : 8192;
1156 device->has_distributed_tess =
1157 device->physical_device->rad_info.chip_class >= VI &&
1158 device->physical_device->rad_info.max_se >= 2;
1159
1160 result = radv_device_init_meta(device);
1161 if (result != VK_SUCCESS)
1162 goto fail;
1163
1164 radv_device_init_msaa(device);
1165
1166 for (int family = 0; family < RADV_MAX_QUEUE_FAMILIES; ++family) {
1167 device->empty_cs[family] = device->ws->cs_create(device->ws, family);
1168 switch (family) {
1169 case RADV_QUEUE_GENERAL:
1170 radeon_emit(device->empty_cs[family], PKT3(PKT3_CONTEXT_CONTROL, 1, 0));
1171 radeon_emit(device->empty_cs[family], CONTEXT_CONTROL_LOAD_ENABLE(1));
1172 radeon_emit(device->empty_cs[family], CONTEXT_CONTROL_SHADOW_ENABLE(1));
1173 break;
1174 case RADV_QUEUE_COMPUTE:
1175 radeon_emit(device->empty_cs[family], PKT3(PKT3_NOP, 0, 0));
1176 radeon_emit(device->empty_cs[family], 0);
1177 break;
1178 }
1179 device->ws->cs_finalize(device->empty_cs[family]);
1180
1181 device->flush_cs[family] = device->ws->cs_create(device->ws, family);
1182 switch (family) {
1183 case RADV_QUEUE_GENERAL:
1184 case RADV_QUEUE_COMPUTE:
1185 si_cs_emit_cache_flush(device->flush_cs[family],
1186 false,
1187 device->physical_device->rad_info.chip_class,
1188 NULL, 0,
1189 family == RADV_QUEUE_COMPUTE && device->physical_device->rad_info.chip_class >= CIK,
1190 RADV_CMD_FLAG_INV_ICACHE |
1191 RADV_CMD_FLAG_INV_SMEM_L1 |
1192 RADV_CMD_FLAG_INV_VMEM_L1 |
1193 RADV_CMD_FLAG_INV_GLOBAL_L2);
1194 break;
1195 }
1196 device->ws->cs_finalize(device->flush_cs[family]);
1197
1198 device->flush_shader_cs[family] = device->ws->cs_create(device->ws, family);
1199 switch (family) {
1200 case RADV_QUEUE_GENERAL:
1201 case RADV_QUEUE_COMPUTE:
1202 si_cs_emit_cache_flush(device->flush_shader_cs[family],
1203 false,
1204 device->physical_device->rad_info.chip_class,
1205 NULL, 0,
1206 family == RADV_QUEUE_COMPUTE && device->physical_device->rad_info.chip_class >= CIK,
1207 family == RADV_QUEUE_COMPUTE ? RADV_CMD_FLAG_CS_PARTIAL_FLUSH : (RADV_CMD_FLAG_CS_PARTIAL_FLUSH | RADV_CMD_FLAG_PS_PARTIAL_FLUSH) |
1208 RADV_CMD_FLAG_INV_ICACHE |
1209 RADV_CMD_FLAG_INV_SMEM_L1 |
1210 RADV_CMD_FLAG_INV_VMEM_L1 |
1211 RADV_CMD_FLAG_INV_GLOBAL_L2);
1212 break;
1213 }
1214 device->ws->cs_finalize(device->flush_shader_cs[family]);
1215 }
1216
1217 if (getenv("RADV_TRACE_FILE")) {
1218 if (!radv_init_trace(device))
1219 goto fail;
1220 }
1221
1222 if (device->physical_device->rad_info.chip_class >= CIK)
1223 cik_create_gfx_config(device);
1224
1225 VkPipelineCacheCreateInfo ci;
1226 ci.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
1227 ci.pNext = NULL;
1228 ci.flags = 0;
1229 ci.pInitialData = NULL;
1230 ci.initialDataSize = 0;
1231 VkPipelineCache pc;
1232 result = radv_CreatePipelineCache(radv_device_to_handle(device),
1233 &ci, NULL, &pc);
1234 if (result != VK_SUCCESS)
1235 goto fail;
1236
1237 device->mem_cache = radv_pipeline_cache_from_handle(pc);
1238
1239 *pDevice = radv_device_to_handle(device);
1240 return VK_SUCCESS;
1241
1242 fail:
1243 if (device->trace_bo)
1244 device->ws->buffer_destroy(device->trace_bo);
1245
1246 if (device->gfx_init)
1247 device->ws->buffer_destroy(device->gfx_init);
1248
1249 for (unsigned i = 0; i < RADV_MAX_QUEUE_FAMILIES; i++) {
1250 for (unsigned q = 0; q < device->queue_count[i]; q++)
1251 radv_queue_finish(&device->queues[i][q]);
1252 if (device->queue_count[i])
1253 vk_free(&device->alloc, device->queues[i]);
1254 }
1255
1256 vk_free(&device->alloc, device);
1257 return result;
1258 }
1259
1260 void radv_DestroyDevice(
1261 VkDevice _device,
1262 const VkAllocationCallbacks* pAllocator)
1263 {
1264 RADV_FROM_HANDLE(radv_device, device, _device);
1265
1266 if (!device)
1267 return;
1268
1269 if (device->trace_bo)
1270 device->ws->buffer_destroy(device->trace_bo);
1271
1272 if (device->gfx_init)
1273 device->ws->buffer_destroy(device->gfx_init);
1274
1275 for (unsigned i = 0; i < RADV_MAX_QUEUE_FAMILIES; i++) {
1276 for (unsigned q = 0; q < device->queue_count[i]; q++)
1277 radv_queue_finish(&device->queues[i][q]);
1278 if (device->queue_count[i])
1279 vk_free(&device->alloc, device->queues[i]);
1280 if (device->empty_cs[i])
1281 device->ws->cs_destroy(device->empty_cs[i]);
1282 if (device->flush_cs[i])
1283 device->ws->cs_destroy(device->flush_cs[i]);
1284 if (device->flush_shader_cs[i])
1285 device->ws->cs_destroy(device->flush_shader_cs[i]);
1286 }
1287 radv_device_finish_meta(device);
1288
1289 VkPipelineCache pc = radv_pipeline_cache_to_handle(device->mem_cache);
1290 radv_DestroyPipelineCache(radv_device_to_handle(device), pc, NULL);
1291
1292 radv_destroy_shader_slabs(device);
1293
1294 vk_free(&device->alloc, device);
1295 }
1296
1297 VkResult radv_EnumerateInstanceExtensionProperties(
1298 const char* pLayerName,
1299 uint32_t* pPropertyCount,
1300 VkExtensionProperties* pProperties)
1301 {
1302 if (pProperties == NULL) {
1303 *pPropertyCount = ARRAY_SIZE(instance_extensions);
1304 return VK_SUCCESS;
1305 }
1306
1307 *pPropertyCount = MIN2(*pPropertyCount, ARRAY_SIZE(instance_extensions));
1308 typed_memcpy(pProperties, instance_extensions, *pPropertyCount);
1309
1310 if (*pPropertyCount < ARRAY_SIZE(instance_extensions))
1311 return VK_INCOMPLETE;
1312
1313 return VK_SUCCESS;
1314 }
1315
1316 VkResult radv_EnumerateDeviceExtensionProperties(
1317 VkPhysicalDevice physicalDevice,
1318 const char* pLayerName,
1319 uint32_t* pPropertyCount,
1320 VkExtensionProperties* pProperties)
1321 {
1322 RADV_FROM_HANDLE(radv_physical_device, pdevice, physicalDevice);
1323
1324 if (pProperties == NULL) {
1325 *pPropertyCount = pdevice->extensions.num_ext;
1326 return VK_SUCCESS;
1327 }
1328
1329 *pPropertyCount = MIN2(*pPropertyCount, pdevice->extensions.num_ext);
1330 typed_memcpy(pProperties, pdevice->extensions.ext_array, *pPropertyCount);
1331
1332 if (*pPropertyCount < pdevice->extensions.num_ext)
1333 return VK_INCOMPLETE;
1334
1335 return VK_SUCCESS;
1336 }
1337
1338 VkResult radv_EnumerateInstanceLayerProperties(
1339 uint32_t* pPropertyCount,
1340 VkLayerProperties* pProperties)
1341 {
1342 if (pProperties == NULL) {
1343 *pPropertyCount = 0;
1344 return VK_SUCCESS;
1345 }
1346
1347 /* None supported at this time */
1348 return vk_error(VK_ERROR_LAYER_NOT_PRESENT);
1349 }
1350
1351 VkResult radv_EnumerateDeviceLayerProperties(
1352 VkPhysicalDevice physicalDevice,
1353 uint32_t* pPropertyCount,
1354 VkLayerProperties* pProperties)
1355 {
1356 if (pProperties == NULL) {
1357 *pPropertyCount = 0;
1358 return VK_SUCCESS;
1359 }
1360
1361 /* None supported at this time */
1362 return vk_error(VK_ERROR_LAYER_NOT_PRESENT);
1363 }
1364
1365 void radv_GetDeviceQueue(
1366 VkDevice _device,
1367 uint32_t queueFamilyIndex,
1368 uint32_t queueIndex,
1369 VkQueue* pQueue)
1370 {
1371 RADV_FROM_HANDLE(radv_device, device, _device);
1372
1373 *pQueue = radv_queue_to_handle(&device->queues[queueFamilyIndex][queueIndex]);
1374 }
1375
1376 static void
1377 fill_geom_tess_rings(struct radv_queue *queue,
1378 uint32_t *map,
1379 bool add_sample_positions,
1380 uint32_t esgs_ring_size,
1381 struct radeon_winsys_bo *esgs_ring_bo,
1382 uint32_t gsvs_ring_size,
1383 struct radeon_winsys_bo *gsvs_ring_bo,
1384 uint32_t tess_factor_ring_size,
1385 struct radeon_winsys_bo *tess_factor_ring_bo,
1386 uint32_t tess_offchip_ring_size,
1387 struct radeon_winsys_bo *tess_offchip_ring_bo)
1388 {
1389 uint64_t esgs_va = 0, gsvs_va = 0;
1390 uint64_t tess_factor_va = 0, tess_offchip_va = 0;
1391 uint32_t *desc = &map[4];
1392
1393 if (esgs_ring_bo)
1394 esgs_va = queue->device->ws->buffer_get_va(esgs_ring_bo);
1395 if (gsvs_ring_bo)
1396 gsvs_va = queue->device->ws->buffer_get_va(gsvs_ring_bo);
1397 if (tess_factor_ring_bo)
1398 tess_factor_va = queue->device->ws->buffer_get_va(tess_factor_ring_bo);
1399 if (tess_offchip_ring_bo)
1400 tess_offchip_va = queue->device->ws->buffer_get_va(tess_offchip_ring_bo);
1401
1402 /* stride 0, num records - size, add tid, swizzle, elsize4,
1403 index stride 64 */
1404 desc[0] = esgs_va;
1405 desc[1] = S_008F04_BASE_ADDRESS_HI(esgs_va >> 32) |
1406 S_008F04_STRIDE(0) |
1407 S_008F04_SWIZZLE_ENABLE(true);
1408 desc[2] = esgs_ring_size;
1409 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
1410 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
1411 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
1412 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
1413 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
1414 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32) |
1415 S_008F0C_ELEMENT_SIZE(1) |
1416 S_008F0C_INDEX_STRIDE(3) |
1417 S_008F0C_ADD_TID_ENABLE(true);
1418
1419 desc += 4;
1420 /* GS entry for ES->GS ring */
1421 /* stride 0, num records - size, elsize0,
1422 index stride 0 */
1423 desc[0] = esgs_va;
1424 desc[1] = S_008F04_BASE_ADDRESS_HI(esgs_va >> 32)|
1425 S_008F04_STRIDE(0) |
1426 S_008F04_SWIZZLE_ENABLE(false);
1427 desc[2] = esgs_ring_size;
1428 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
1429 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
1430 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
1431 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
1432 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
1433 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32) |
1434 S_008F0C_ELEMENT_SIZE(0) |
1435 S_008F0C_INDEX_STRIDE(0) |
1436 S_008F0C_ADD_TID_ENABLE(false);
1437
1438 desc += 4;
1439 /* VS entry for GS->VS ring */
1440 /* stride 0, num records - size, elsize0,
1441 index stride 0 */
1442 desc[0] = gsvs_va;
1443 desc[1] = S_008F04_BASE_ADDRESS_HI(gsvs_va >> 32)|
1444 S_008F04_STRIDE(0) |
1445 S_008F04_SWIZZLE_ENABLE(false);
1446 desc[2] = gsvs_ring_size;
1447 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
1448 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
1449 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
1450 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
1451 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
1452 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32) |
1453 S_008F0C_ELEMENT_SIZE(0) |
1454 S_008F0C_INDEX_STRIDE(0) |
1455 S_008F0C_ADD_TID_ENABLE(false);
1456 desc += 4;
1457
1458 /* stride gsvs_itemsize, num records 64
1459 elsize 4, index stride 16 */
1460 /* shader will patch stride and desc[2] */
1461 desc[0] = gsvs_va;
1462 desc[1] = S_008F04_BASE_ADDRESS_HI(gsvs_va >> 32)|
1463 S_008F04_STRIDE(0) |
1464 S_008F04_SWIZZLE_ENABLE(true);
1465 desc[2] = 0;
1466 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
1467 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
1468 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
1469 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
1470 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
1471 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32) |
1472 S_008F0C_ELEMENT_SIZE(1) |
1473 S_008F0C_INDEX_STRIDE(1) |
1474 S_008F0C_ADD_TID_ENABLE(true);
1475 desc += 4;
1476
1477 desc[0] = tess_factor_va;
1478 desc[1] = S_008F04_BASE_ADDRESS_HI(tess_factor_va >> 32) |
1479 S_008F04_STRIDE(0) |
1480 S_008F04_SWIZZLE_ENABLE(false);
1481 desc[2] = tess_factor_ring_size;
1482 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
1483 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
1484 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
1485 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
1486 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
1487 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32) |
1488 S_008F0C_ELEMENT_SIZE(0) |
1489 S_008F0C_INDEX_STRIDE(0) |
1490 S_008F0C_ADD_TID_ENABLE(false);
1491 desc += 4;
1492
1493 desc[0] = tess_offchip_va;
1494 desc[1] = S_008F04_BASE_ADDRESS_HI(tess_offchip_va >> 32) |
1495 S_008F04_STRIDE(0) |
1496 S_008F04_SWIZZLE_ENABLE(false);
1497 desc[2] = tess_offchip_ring_size;
1498 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
1499 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
1500 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
1501 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
1502 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
1503 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32) |
1504 S_008F0C_ELEMENT_SIZE(0) |
1505 S_008F0C_INDEX_STRIDE(0) |
1506 S_008F0C_ADD_TID_ENABLE(false);
1507 desc += 4;
1508
1509 /* add sample positions after all rings */
1510 memcpy(desc, queue->device->sample_locations_1x, 8);
1511 desc += 2;
1512 memcpy(desc, queue->device->sample_locations_2x, 16);
1513 desc += 4;
1514 memcpy(desc, queue->device->sample_locations_4x, 32);
1515 desc += 8;
1516 memcpy(desc, queue->device->sample_locations_8x, 64);
1517 desc += 16;
1518 memcpy(desc, queue->device->sample_locations_16x, 128);
1519 }
1520
1521 static unsigned
1522 radv_get_hs_offchip_param(struct radv_device *device, uint32_t *max_offchip_buffers_p)
1523 {
1524 bool double_offchip_buffers = device->physical_device->rad_info.chip_class >= CIK &&
1525 device->physical_device->rad_info.family != CHIP_CARRIZO &&
1526 device->physical_device->rad_info.family != CHIP_STONEY;
1527 unsigned max_offchip_buffers_per_se = double_offchip_buffers ? 128 : 64;
1528 unsigned max_offchip_buffers = max_offchip_buffers_per_se *
1529 device->physical_device->rad_info.max_se;
1530 unsigned offchip_granularity;
1531 unsigned hs_offchip_param;
1532 switch (device->tess_offchip_block_dw_size) {
1533 default:
1534 assert(0);
1535 /* fall through */
1536 case 8192:
1537 offchip_granularity = V_03093C_X_8K_DWORDS;
1538 break;
1539 case 4096:
1540 offchip_granularity = V_03093C_X_4K_DWORDS;
1541 break;
1542 }
1543
1544 switch (device->physical_device->rad_info.chip_class) {
1545 case SI:
1546 max_offchip_buffers = MIN2(max_offchip_buffers, 126);
1547 break;
1548 case CIK:
1549 case VI:
1550 case GFX9:
1551 default:
1552 max_offchip_buffers = MIN2(max_offchip_buffers, 508);
1553 break;
1554 }
1555
1556 *max_offchip_buffers_p = max_offchip_buffers;
1557 if (device->physical_device->rad_info.chip_class >= CIK) {
1558 if (device->physical_device->rad_info.chip_class >= VI)
1559 --max_offchip_buffers;
1560 hs_offchip_param =
1561 S_03093C_OFFCHIP_BUFFERING(max_offchip_buffers) |
1562 S_03093C_OFFCHIP_GRANULARITY(offchip_granularity);
1563 } else {
1564 hs_offchip_param =
1565 S_0089B0_OFFCHIP_BUFFERING(max_offchip_buffers);
1566 }
1567 return hs_offchip_param;
1568 }
1569
1570 static VkResult
1571 radv_get_preamble_cs(struct radv_queue *queue,
1572 uint32_t scratch_size,
1573 uint32_t compute_scratch_size,
1574 uint32_t esgs_ring_size,
1575 uint32_t gsvs_ring_size,
1576 bool needs_tess_rings,
1577 bool needs_sample_positions,
1578 struct radeon_winsys_cs **initial_preamble_cs,
1579 struct radeon_winsys_cs **continue_preamble_cs)
1580 {
1581 struct radeon_winsys_bo *scratch_bo = NULL;
1582 struct radeon_winsys_bo *descriptor_bo = NULL;
1583 struct radeon_winsys_bo *compute_scratch_bo = NULL;
1584 struct radeon_winsys_bo *esgs_ring_bo = NULL;
1585 struct radeon_winsys_bo *gsvs_ring_bo = NULL;
1586 struct radeon_winsys_bo *tess_factor_ring_bo = NULL;
1587 struct radeon_winsys_bo *tess_offchip_ring_bo = NULL;
1588 struct radeon_winsys_cs *dest_cs[2] = {0};
1589 bool add_tess_rings = false, add_sample_positions = false;
1590 unsigned tess_factor_ring_size = 0, tess_offchip_ring_size = 0;
1591 unsigned max_offchip_buffers;
1592 unsigned hs_offchip_param = 0;
1593 if (!queue->has_tess_rings) {
1594 if (needs_tess_rings)
1595 add_tess_rings = true;
1596 }
1597 if (!queue->has_sample_positions) {
1598 if (needs_sample_positions)
1599 add_sample_positions = true;
1600 }
1601 tess_factor_ring_size = 32768 * queue->device->physical_device->rad_info.max_se;
1602 hs_offchip_param = radv_get_hs_offchip_param(queue->device,
1603 &max_offchip_buffers);
1604 tess_offchip_ring_size = max_offchip_buffers *
1605 queue->device->tess_offchip_block_dw_size * 4;
1606
1607 if (scratch_size <= queue->scratch_size &&
1608 compute_scratch_size <= queue->compute_scratch_size &&
1609 esgs_ring_size <= queue->esgs_ring_size &&
1610 gsvs_ring_size <= queue->gsvs_ring_size &&
1611 !add_tess_rings && !add_sample_positions &&
1612 queue->initial_preamble_cs) {
1613 *initial_preamble_cs = queue->initial_preamble_cs;
1614 *continue_preamble_cs = queue->continue_preamble_cs;
1615 if (!scratch_size && !compute_scratch_size && !esgs_ring_size && !gsvs_ring_size)
1616 *continue_preamble_cs = NULL;
1617 return VK_SUCCESS;
1618 }
1619
1620 if (scratch_size > queue->scratch_size) {
1621 scratch_bo = queue->device->ws->buffer_create(queue->device->ws,
1622 scratch_size,
1623 4096,
1624 RADEON_DOMAIN_VRAM,
1625 RADEON_FLAG_NO_CPU_ACCESS);
1626 if (!scratch_bo)
1627 goto fail;
1628 } else
1629 scratch_bo = queue->scratch_bo;
1630
1631 if (compute_scratch_size > queue->compute_scratch_size) {
1632 compute_scratch_bo = queue->device->ws->buffer_create(queue->device->ws,
1633 compute_scratch_size,
1634 4096,
1635 RADEON_DOMAIN_VRAM,
1636 RADEON_FLAG_NO_CPU_ACCESS);
1637 if (!compute_scratch_bo)
1638 goto fail;
1639
1640 } else
1641 compute_scratch_bo = queue->compute_scratch_bo;
1642
1643 if (esgs_ring_size > queue->esgs_ring_size) {
1644 esgs_ring_bo = queue->device->ws->buffer_create(queue->device->ws,
1645 esgs_ring_size,
1646 4096,
1647 RADEON_DOMAIN_VRAM,
1648 RADEON_FLAG_NO_CPU_ACCESS);
1649 if (!esgs_ring_bo)
1650 goto fail;
1651 } else {
1652 esgs_ring_bo = queue->esgs_ring_bo;
1653 esgs_ring_size = queue->esgs_ring_size;
1654 }
1655
1656 if (gsvs_ring_size > queue->gsvs_ring_size) {
1657 gsvs_ring_bo = queue->device->ws->buffer_create(queue->device->ws,
1658 gsvs_ring_size,
1659 4096,
1660 RADEON_DOMAIN_VRAM,
1661 RADEON_FLAG_NO_CPU_ACCESS);
1662 if (!gsvs_ring_bo)
1663 goto fail;
1664 } else {
1665 gsvs_ring_bo = queue->gsvs_ring_bo;
1666 gsvs_ring_size = queue->gsvs_ring_size;
1667 }
1668
1669 if (add_tess_rings) {
1670 tess_factor_ring_bo = queue->device->ws->buffer_create(queue->device->ws,
1671 tess_factor_ring_size,
1672 256,
1673 RADEON_DOMAIN_VRAM,
1674 RADEON_FLAG_NO_CPU_ACCESS);
1675 if (!tess_factor_ring_bo)
1676 goto fail;
1677 tess_offchip_ring_bo = queue->device->ws->buffer_create(queue->device->ws,
1678 tess_offchip_ring_size,
1679 256,
1680 RADEON_DOMAIN_VRAM,
1681 RADEON_FLAG_NO_CPU_ACCESS);
1682 if (!tess_offchip_ring_bo)
1683 goto fail;
1684 } else {
1685 tess_factor_ring_bo = queue->tess_factor_ring_bo;
1686 tess_offchip_ring_bo = queue->tess_offchip_ring_bo;
1687 }
1688
1689 if (scratch_bo != queue->scratch_bo ||
1690 esgs_ring_bo != queue->esgs_ring_bo ||
1691 gsvs_ring_bo != queue->gsvs_ring_bo ||
1692 tess_factor_ring_bo != queue->tess_factor_ring_bo ||
1693 tess_offchip_ring_bo != queue->tess_offchip_ring_bo || add_sample_positions) {
1694 uint32_t size = 0;
1695 if (gsvs_ring_bo || esgs_ring_bo ||
1696 tess_factor_ring_bo || tess_offchip_ring_bo || add_sample_positions) {
1697 size = 112; /* 2 dword + 2 padding + 4 dword * 6 */
1698 if (add_sample_positions)
1699 size += 256; /* 32+16+8+4+2+1 samples * 4 * 2 = 248 bytes. */
1700 }
1701 else if (scratch_bo)
1702 size = 8; /* 2 dword */
1703
1704 descriptor_bo = queue->device->ws->buffer_create(queue->device->ws,
1705 size,
1706 4096,
1707 RADEON_DOMAIN_VRAM,
1708 RADEON_FLAG_CPU_ACCESS);
1709 if (!descriptor_bo)
1710 goto fail;
1711 } else
1712 descriptor_bo = queue->descriptor_bo;
1713
1714 for(int i = 0; i < 2; ++i) {
1715 struct radeon_winsys_cs *cs = NULL;
1716 cs = queue->device->ws->cs_create(queue->device->ws,
1717 queue->queue_family_index ? RING_COMPUTE : RING_GFX);
1718 if (!cs)
1719 goto fail;
1720
1721 dest_cs[i] = cs;
1722
1723 if (scratch_bo)
1724 queue->device->ws->cs_add_buffer(cs, scratch_bo, 8);
1725
1726 if (esgs_ring_bo)
1727 queue->device->ws->cs_add_buffer(cs, esgs_ring_bo, 8);
1728
1729 if (gsvs_ring_bo)
1730 queue->device->ws->cs_add_buffer(cs, gsvs_ring_bo, 8);
1731
1732 if (tess_factor_ring_bo)
1733 queue->device->ws->cs_add_buffer(cs, tess_factor_ring_bo, 8);
1734
1735 if (tess_offchip_ring_bo)
1736 queue->device->ws->cs_add_buffer(cs, tess_offchip_ring_bo, 8);
1737
1738 if (descriptor_bo)
1739 queue->device->ws->cs_add_buffer(cs, descriptor_bo, 8);
1740
1741 if (descriptor_bo != queue->descriptor_bo) {
1742 uint32_t *map = (uint32_t*)queue->device->ws->buffer_map(descriptor_bo);
1743
1744 if (scratch_bo) {
1745 uint64_t scratch_va = queue->device->ws->buffer_get_va(scratch_bo);
1746 uint32_t rsrc1 = S_008F04_BASE_ADDRESS_HI(scratch_va >> 32) |
1747 S_008F04_SWIZZLE_ENABLE(1);
1748 map[0] = scratch_va;
1749 map[1] = rsrc1;
1750 }
1751
1752 if (esgs_ring_bo || gsvs_ring_bo || tess_factor_ring_bo || tess_offchip_ring_bo ||
1753 add_sample_positions)
1754 fill_geom_tess_rings(queue, map, add_sample_positions,
1755 esgs_ring_size, esgs_ring_bo,
1756 gsvs_ring_size, gsvs_ring_bo,
1757 tess_factor_ring_size, tess_factor_ring_bo,
1758 tess_offchip_ring_size, tess_offchip_ring_bo);
1759
1760 queue->device->ws->buffer_unmap(descriptor_bo);
1761 }
1762
1763 if (esgs_ring_bo || gsvs_ring_bo || tess_factor_ring_bo || tess_offchip_ring_bo) {
1764 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1765 radeon_emit(cs, EVENT_TYPE(V_028A90_VS_PARTIAL_FLUSH) | EVENT_INDEX(4));
1766 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1767 radeon_emit(cs, EVENT_TYPE(V_028A90_VGT_FLUSH) | EVENT_INDEX(0));
1768 }
1769
1770 if (esgs_ring_bo || gsvs_ring_bo) {
1771 if (queue->device->physical_device->rad_info.chip_class >= CIK) {
1772 radeon_set_uconfig_reg_seq(cs, R_030900_VGT_ESGS_RING_SIZE, 2);
1773 radeon_emit(cs, esgs_ring_size >> 8);
1774 radeon_emit(cs, gsvs_ring_size >> 8);
1775 } else {
1776 radeon_set_config_reg_seq(cs, R_0088C8_VGT_ESGS_RING_SIZE, 2);
1777 radeon_emit(cs, esgs_ring_size >> 8);
1778 radeon_emit(cs, gsvs_ring_size >> 8);
1779 }
1780 }
1781
1782 if (tess_factor_ring_bo) {
1783 uint64_t tf_va = queue->device->ws->buffer_get_va(tess_factor_ring_bo);
1784 if (queue->device->physical_device->rad_info.chip_class >= CIK) {
1785 radeon_set_uconfig_reg(cs, R_030938_VGT_TF_RING_SIZE,
1786 S_030938_SIZE(tess_factor_ring_size / 4));
1787 radeon_set_uconfig_reg(cs, R_030940_VGT_TF_MEMORY_BASE,
1788 tf_va >> 8);
1789 if (queue->device->physical_device->rad_info.chip_class >= GFX9) {
1790 radeon_set_uconfig_reg(cs, R_030944_VGT_TF_MEMORY_BASE_HI,
1791 tf_va >> 40);
1792 }
1793 radeon_set_uconfig_reg(cs, R_03093C_VGT_HS_OFFCHIP_PARAM, hs_offchip_param);
1794 } else {
1795 radeon_set_config_reg(cs, R_008988_VGT_TF_RING_SIZE,
1796 S_008988_SIZE(tess_factor_ring_size / 4));
1797 radeon_set_config_reg(cs, R_0089B8_VGT_TF_MEMORY_BASE,
1798 tf_va >> 8);
1799 radeon_set_config_reg(cs, R_0089B0_VGT_HS_OFFCHIP_PARAM,
1800 hs_offchip_param);
1801 }
1802 }
1803
1804 if (descriptor_bo) {
1805 uint32_t regs[] = {R_00B030_SPI_SHADER_USER_DATA_PS_0,
1806 R_00B130_SPI_SHADER_USER_DATA_VS_0,
1807 R_00B230_SPI_SHADER_USER_DATA_GS_0,
1808 R_00B330_SPI_SHADER_USER_DATA_ES_0,
1809 R_00B430_SPI_SHADER_USER_DATA_HS_0,
1810 R_00B530_SPI_SHADER_USER_DATA_LS_0};
1811
1812 uint64_t va = queue->device->ws->buffer_get_va(descriptor_bo);
1813
1814 for (int i = 0; i < ARRAY_SIZE(regs); ++i) {
1815 radeon_set_sh_reg_seq(cs, regs[i], 2);
1816 radeon_emit(cs, va);
1817 radeon_emit(cs, va >> 32);
1818 }
1819 }
1820
1821 if (compute_scratch_bo) {
1822 uint64_t scratch_va = queue->device->ws->buffer_get_va(compute_scratch_bo);
1823 uint32_t rsrc1 = S_008F04_BASE_ADDRESS_HI(scratch_va >> 32) |
1824 S_008F04_SWIZZLE_ENABLE(1);
1825
1826 queue->device->ws->cs_add_buffer(cs, compute_scratch_bo, 8);
1827
1828 radeon_set_sh_reg_seq(cs, R_00B900_COMPUTE_USER_DATA_0, 2);
1829 radeon_emit(cs, scratch_va);
1830 radeon_emit(cs, rsrc1);
1831 }
1832
1833 if (!i) {
1834 si_cs_emit_cache_flush(cs,
1835 false,
1836 queue->device->physical_device->rad_info.chip_class,
1837 NULL, 0,
1838 queue->queue_family_index == RING_COMPUTE &&
1839 queue->device->physical_device->rad_info.chip_class >= CIK,
1840 RADV_CMD_FLAG_INV_ICACHE |
1841 RADV_CMD_FLAG_INV_SMEM_L1 |
1842 RADV_CMD_FLAG_INV_VMEM_L1 |
1843 RADV_CMD_FLAG_INV_GLOBAL_L2);
1844 }
1845
1846 if (!queue->device->ws->cs_finalize(cs))
1847 goto fail;
1848 }
1849
1850 if (queue->initial_preamble_cs)
1851 queue->device->ws->cs_destroy(queue->initial_preamble_cs);
1852
1853 if (queue->continue_preamble_cs)
1854 queue->device->ws->cs_destroy(queue->continue_preamble_cs);
1855
1856 queue->initial_preamble_cs = dest_cs[0];
1857 queue->continue_preamble_cs = dest_cs[1];
1858
1859 if (scratch_bo != queue->scratch_bo) {
1860 if (queue->scratch_bo)
1861 queue->device->ws->buffer_destroy(queue->scratch_bo);
1862 queue->scratch_bo = scratch_bo;
1863 queue->scratch_size = scratch_size;
1864 }
1865
1866 if (compute_scratch_bo != queue->compute_scratch_bo) {
1867 if (queue->compute_scratch_bo)
1868 queue->device->ws->buffer_destroy(queue->compute_scratch_bo);
1869 queue->compute_scratch_bo = compute_scratch_bo;
1870 queue->compute_scratch_size = compute_scratch_size;
1871 }
1872
1873 if (esgs_ring_bo != queue->esgs_ring_bo) {
1874 if (queue->esgs_ring_bo)
1875 queue->device->ws->buffer_destroy(queue->esgs_ring_bo);
1876 queue->esgs_ring_bo = esgs_ring_bo;
1877 queue->esgs_ring_size = esgs_ring_size;
1878 }
1879
1880 if (gsvs_ring_bo != queue->gsvs_ring_bo) {
1881 if (queue->gsvs_ring_bo)
1882 queue->device->ws->buffer_destroy(queue->gsvs_ring_bo);
1883 queue->gsvs_ring_bo = gsvs_ring_bo;
1884 queue->gsvs_ring_size = gsvs_ring_size;
1885 }
1886
1887 if (tess_factor_ring_bo != queue->tess_factor_ring_bo) {
1888 queue->tess_factor_ring_bo = tess_factor_ring_bo;
1889 }
1890
1891 if (tess_offchip_ring_bo != queue->tess_offchip_ring_bo) {
1892 queue->tess_offchip_ring_bo = tess_offchip_ring_bo;
1893 queue->has_tess_rings = true;
1894 }
1895
1896 if (descriptor_bo != queue->descriptor_bo) {
1897 if (queue->descriptor_bo)
1898 queue->device->ws->buffer_destroy(queue->descriptor_bo);
1899
1900 queue->descriptor_bo = descriptor_bo;
1901 }
1902
1903 if (add_sample_positions)
1904 queue->has_sample_positions = true;
1905
1906 *initial_preamble_cs = queue->initial_preamble_cs;
1907 *continue_preamble_cs = queue->continue_preamble_cs;
1908 if (!scratch_size && !compute_scratch_size && !esgs_ring_size && !gsvs_ring_size)
1909 *continue_preamble_cs = NULL;
1910 return VK_SUCCESS;
1911 fail:
1912 for (int i = 0; i < ARRAY_SIZE(dest_cs); ++i)
1913 if (dest_cs[i])
1914 queue->device->ws->cs_destroy(dest_cs[i]);
1915 if (descriptor_bo && descriptor_bo != queue->descriptor_bo)
1916 queue->device->ws->buffer_destroy(descriptor_bo);
1917 if (scratch_bo && scratch_bo != queue->scratch_bo)
1918 queue->device->ws->buffer_destroy(scratch_bo);
1919 if (compute_scratch_bo && compute_scratch_bo != queue->compute_scratch_bo)
1920 queue->device->ws->buffer_destroy(compute_scratch_bo);
1921 if (esgs_ring_bo && esgs_ring_bo != queue->esgs_ring_bo)
1922 queue->device->ws->buffer_destroy(esgs_ring_bo);
1923 if (gsvs_ring_bo && gsvs_ring_bo != queue->gsvs_ring_bo)
1924 queue->device->ws->buffer_destroy(gsvs_ring_bo);
1925 if (tess_factor_ring_bo && tess_factor_ring_bo != queue->tess_factor_ring_bo)
1926 queue->device->ws->buffer_destroy(tess_factor_ring_bo);
1927 if (tess_offchip_ring_bo && tess_offchip_ring_bo != queue->tess_offchip_ring_bo)
1928 queue->device->ws->buffer_destroy(tess_offchip_ring_bo);
1929 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
1930 }
1931
1932 static VkResult radv_alloc_sem_counts(struct radv_winsys_sem_counts *counts,
1933 int num_sems,
1934 const VkSemaphore *sems,
1935 bool reset_temp)
1936 {
1937 int syncobj_idx = 0, sem_idx = 0;
1938
1939 if (num_sems == 0)
1940 return VK_SUCCESS;
1941 for (uint32_t i = 0; i < num_sems; i++) {
1942 RADV_FROM_HANDLE(radv_semaphore, sem, sems[i]);
1943
1944 if (sem->temp_syncobj || sem->syncobj)
1945 counts->syncobj_count++;
1946 else
1947 counts->sem_count++;
1948 }
1949
1950 if (counts->syncobj_count) {
1951 counts->syncobj = (uint32_t *)malloc(sizeof(uint32_t) * counts->syncobj_count);
1952 if (!counts->syncobj)
1953 return VK_ERROR_OUT_OF_HOST_MEMORY;
1954 }
1955
1956 if (counts->sem_count) {
1957 counts->sem = (struct radeon_winsys_sem **)malloc(sizeof(struct radeon_winsys_sem *) * counts->sem_count);
1958 if (!counts->sem) {
1959 free(counts->syncobj);
1960 return VK_ERROR_OUT_OF_HOST_MEMORY;
1961 }
1962 }
1963
1964 for (uint32_t i = 0; i < num_sems; i++) {
1965 RADV_FROM_HANDLE(radv_semaphore, sem, sems[i]);
1966
1967 if (sem->temp_syncobj) {
1968 counts->syncobj[syncobj_idx++] = sem->temp_syncobj;
1969 if (reset_temp) {
1970 /* after we wait on a temp import - drop it */
1971 sem->temp_syncobj = 0;
1972 }
1973 }
1974 else if (sem->syncobj)
1975 counts->syncobj[syncobj_idx++] = sem->syncobj;
1976 else {
1977 assert(sem->sem);
1978 counts->sem[sem_idx++] = sem->sem;
1979 }
1980 }
1981
1982 return VK_SUCCESS;
1983 }
1984
1985 void radv_free_sem_info(struct radv_winsys_sem_info *sem_info)
1986 {
1987 free(sem_info->wait.syncobj);
1988 free(sem_info->wait.sem);
1989 free(sem_info->signal.syncobj);
1990 free(sem_info->signal.sem);
1991 }
1992
1993 VkResult radv_alloc_sem_info(struct radv_winsys_sem_info *sem_info,
1994 int num_wait_sems,
1995 const VkSemaphore *wait_sems,
1996 int num_signal_sems,
1997 const VkSemaphore *signal_sems)
1998 {
1999 VkResult ret;
2000 memset(sem_info, 0, sizeof(*sem_info));
2001
2002 ret = radv_alloc_sem_counts(&sem_info->wait, num_wait_sems, wait_sems, true);
2003 if (ret)
2004 return ret;
2005 ret = radv_alloc_sem_counts(&sem_info->signal, num_signal_sems, signal_sems, false);
2006 if (ret)
2007 radv_free_sem_info(sem_info);
2008
2009 /* caller can override these */
2010 sem_info->cs_emit_wait = true;
2011 sem_info->cs_emit_signal = true;
2012 return ret;
2013 }
2014
2015 VkResult radv_QueueSubmit(
2016 VkQueue _queue,
2017 uint32_t submitCount,
2018 const VkSubmitInfo* pSubmits,
2019 VkFence _fence)
2020 {
2021 RADV_FROM_HANDLE(radv_queue, queue, _queue);
2022 RADV_FROM_HANDLE(radv_fence, fence, _fence);
2023 struct radeon_winsys_fence *base_fence = fence ? fence->fence : NULL;
2024 struct radeon_winsys_ctx *ctx = queue->hw_ctx;
2025 int ret;
2026 uint32_t max_cs_submission = queue->device->trace_bo ? 1 : UINT32_MAX;
2027 uint32_t scratch_size = 0;
2028 uint32_t compute_scratch_size = 0;
2029 uint32_t esgs_ring_size = 0, gsvs_ring_size = 0;
2030 struct radeon_winsys_cs *initial_preamble_cs = NULL, *continue_preamble_cs = NULL;
2031 VkResult result;
2032 bool fence_emitted = false;
2033 bool tess_rings_needed = false;
2034 bool sample_positions_needed = false;
2035
2036 /* Do this first so failing to allocate scratch buffers can't result in
2037 * partially executed submissions. */
2038 for (uint32_t i = 0; i < submitCount; i++) {
2039 for (uint32_t j = 0; j < pSubmits[i].commandBufferCount; j++) {
2040 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer,
2041 pSubmits[i].pCommandBuffers[j]);
2042
2043 scratch_size = MAX2(scratch_size, cmd_buffer->scratch_size_needed);
2044 compute_scratch_size = MAX2(compute_scratch_size,
2045 cmd_buffer->compute_scratch_size_needed);
2046 esgs_ring_size = MAX2(esgs_ring_size, cmd_buffer->esgs_ring_size_needed);
2047 gsvs_ring_size = MAX2(gsvs_ring_size, cmd_buffer->gsvs_ring_size_needed);
2048 tess_rings_needed |= cmd_buffer->tess_rings_needed;
2049 sample_positions_needed |= cmd_buffer->sample_positions_needed;
2050 }
2051 }
2052
2053 result = radv_get_preamble_cs(queue, scratch_size, compute_scratch_size,
2054 esgs_ring_size, gsvs_ring_size, tess_rings_needed,
2055 sample_positions_needed,
2056 &initial_preamble_cs, &continue_preamble_cs);
2057 if (result != VK_SUCCESS)
2058 return result;
2059
2060 for (uint32_t i = 0; i < submitCount; i++) {
2061 struct radeon_winsys_cs **cs_array;
2062 bool do_flush = !i || pSubmits[i].pWaitDstStageMask;
2063 bool can_patch = !do_flush;
2064 uint32_t advance;
2065 struct radv_winsys_sem_info sem_info;
2066
2067 result = radv_alloc_sem_info(&sem_info,
2068 pSubmits[i].waitSemaphoreCount,
2069 pSubmits[i].pWaitSemaphores,
2070 pSubmits[i].signalSemaphoreCount,
2071 pSubmits[i].pSignalSemaphores);
2072 if (result != VK_SUCCESS)
2073 return result;
2074
2075 if (!pSubmits[i].commandBufferCount) {
2076 if (pSubmits[i].waitSemaphoreCount || pSubmits[i].signalSemaphoreCount) {
2077 ret = queue->device->ws->cs_submit(ctx, queue->queue_idx,
2078 &queue->device->empty_cs[queue->queue_family_index],
2079 1, NULL, NULL,
2080 &sem_info,
2081 false, base_fence);
2082 if (ret) {
2083 radv_loge("failed to submit CS %d\n", i);
2084 abort();
2085 }
2086 fence_emitted = true;
2087 }
2088 radv_free_sem_info(&sem_info);
2089 continue;
2090 }
2091
2092 cs_array = malloc(sizeof(struct radeon_winsys_cs *) *
2093 (pSubmits[i].commandBufferCount + do_flush));
2094
2095 if(do_flush)
2096 cs_array[0] = pSubmits[i].waitSemaphoreCount ?
2097 queue->device->flush_shader_cs[queue->queue_family_index] :
2098 queue->device->flush_cs[queue->queue_family_index];
2099
2100 for (uint32_t j = 0; j < pSubmits[i].commandBufferCount; j++) {
2101 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer,
2102 pSubmits[i].pCommandBuffers[j]);
2103 assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
2104
2105 cs_array[j + do_flush] = cmd_buffer->cs;
2106 if ((cmd_buffer->usage_flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT))
2107 can_patch = false;
2108 }
2109
2110 for (uint32_t j = 0; j < pSubmits[i].commandBufferCount + do_flush; j += advance) {
2111 advance = MIN2(max_cs_submission,
2112 pSubmits[i].commandBufferCount + do_flush - j);
2113
2114 if (queue->device->trace_bo)
2115 *queue->device->trace_id_ptr = 0;
2116
2117 sem_info.cs_emit_wait = j == 0;
2118 sem_info.cs_emit_signal = j + advance == pSubmits[i].commandBufferCount + do_flush;
2119
2120 ret = queue->device->ws->cs_submit(ctx, queue->queue_idx, cs_array + j,
2121 advance, initial_preamble_cs, continue_preamble_cs,
2122 &sem_info,
2123 can_patch, base_fence);
2124
2125 if (ret) {
2126 radv_loge("failed to submit CS %d\n", i);
2127 abort();
2128 }
2129 fence_emitted = true;
2130 if (queue->device->trace_bo) {
2131 bool success = queue->device->ws->ctx_wait_idle(
2132 queue->hw_ctx,
2133 radv_queue_family_to_ring(
2134 queue->queue_family_index),
2135 queue->queue_idx);
2136
2137 if (!success) { /* Hang */
2138 radv_dump_trace(queue->device, cs_array[j]);
2139 abort();
2140 }
2141 }
2142 }
2143
2144 radv_free_sem_info(&sem_info);
2145 free(cs_array);
2146 }
2147
2148 if (fence) {
2149 if (!fence_emitted) {
2150 struct radv_winsys_sem_info sem_info = {0};
2151 ret = queue->device->ws->cs_submit(ctx, queue->queue_idx,
2152 &queue->device->empty_cs[queue->queue_family_index],
2153 1, NULL, NULL, &sem_info,
2154 false, base_fence);
2155 }
2156 fence->submitted = true;
2157 }
2158
2159 return VK_SUCCESS;
2160 }
2161
2162 VkResult radv_QueueWaitIdle(
2163 VkQueue _queue)
2164 {
2165 RADV_FROM_HANDLE(radv_queue, queue, _queue);
2166
2167 queue->device->ws->ctx_wait_idle(queue->hw_ctx,
2168 radv_queue_family_to_ring(queue->queue_family_index),
2169 queue->queue_idx);
2170 return VK_SUCCESS;
2171 }
2172
2173 VkResult radv_DeviceWaitIdle(
2174 VkDevice _device)
2175 {
2176 RADV_FROM_HANDLE(radv_device, device, _device);
2177
2178 for (unsigned i = 0; i < RADV_MAX_QUEUE_FAMILIES; i++) {
2179 for (unsigned q = 0; q < device->queue_count[i]; q++) {
2180 radv_QueueWaitIdle(radv_queue_to_handle(&device->queues[i][q]));
2181 }
2182 }
2183 return VK_SUCCESS;
2184 }
2185
2186 PFN_vkVoidFunction radv_GetInstanceProcAddr(
2187 VkInstance instance,
2188 const char* pName)
2189 {
2190 return radv_lookup_entrypoint(pName);
2191 }
2192
2193 /* The loader wants us to expose a second GetInstanceProcAddr function
2194 * to work around certain LD_PRELOAD issues seen in apps.
2195 */
2196 PUBLIC
2197 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vk_icdGetInstanceProcAddr(
2198 VkInstance instance,
2199 const char* pName);
2200
2201 PUBLIC
2202 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vk_icdGetInstanceProcAddr(
2203 VkInstance instance,
2204 const char* pName)
2205 {
2206 return radv_GetInstanceProcAddr(instance, pName);
2207 }
2208
2209 PFN_vkVoidFunction radv_GetDeviceProcAddr(
2210 VkDevice device,
2211 const char* pName)
2212 {
2213 return radv_lookup_entrypoint(pName);
2214 }
2215
2216 bool radv_get_memory_fd(struct radv_device *device,
2217 struct radv_device_memory *memory,
2218 int *pFD)
2219 {
2220 struct radeon_bo_metadata metadata;
2221
2222 if (memory->image) {
2223 radv_init_metadata(device, memory->image, &metadata);
2224 device->ws->buffer_set_metadata(memory->bo, &metadata);
2225 }
2226
2227 return device->ws->buffer_get_fd(device->ws, memory->bo,
2228 pFD);
2229 }
2230
2231 VkResult radv_AllocateMemory(
2232 VkDevice _device,
2233 const VkMemoryAllocateInfo* pAllocateInfo,
2234 const VkAllocationCallbacks* pAllocator,
2235 VkDeviceMemory* pMem)
2236 {
2237 RADV_FROM_HANDLE(radv_device, device, _device);
2238 struct radv_device_memory *mem;
2239 VkResult result;
2240 enum radeon_bo_domain domain;
2241 uint32_t flags = 0;
2242
2243 assert(pAllocateInfo->sType == VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO);
2244
2245 if (pAllocateInfo->allocationSize == 0) {
2246 /* Apparently, this is allowed */
2247 *pMem = VK_NULL_HANDLE;
2248 return VK_SUCCESS;
2249 }
2250
2251 const VkImportMemoryFdInfoKHR *import_info =
2252 vk_find_struct_const(pAllocateInfo->pNext, IMPORT_MEMORY_FD_INFO_KHR);
2253 const VkMemoryDedicatedAllocateInfoKHR *dedicate_info =
2254 vk_find_struct_const(pAllocateInfo->pNext, MEMORY_DEDICATED_ALLOCATE_INFO_KHR);
2255
2256 mem = vk_alloc2(&device->alloc, pAllocator, sizeof(*mem), 8,
2257 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2258 if (mem == NULL)
2259 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
2260
2261 if (dedicate_info) {
2262 mem->image = radv_image_from_handle(dedicate_info->image);
2263 mem->buffer = radv_buffer_from_handle(dedicate_info->buffer);
2264 } else {
2265 mem->image = NULL;
2266 mem->buffer = NULL;
2267 }
2268
2269 if (import_info) {
2270 assert(import_info->handleType ==
2271 VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR);
2272 mem->bo = device->ws->buffer_from_fd(device->ws, import_info->fd,
2273 NULL, NULL);
2274 if (!mem->bo) {
2275 result = VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR;
2276 goto fail;
2277 } else {
2278 close(import_info->fd);
2279 goto out_success;
2280 }
2281 }
2282
2283 uint64_t alloc_size = align_u64(pAllocateInfo->allocationSize, 4096);
2284 if (pAllocateInfo->memoryTypeIndex == RADV_MEM_TYPE_GTT_WRITE_COMBINE ||
2285 pAllocateInfo->memoryTypeIndex == RADV_MEM_TYPE_GTT_CACHED)
2286 domain = RADEON_DOMAIN_GTT;
2287 else
2288 domain = RADEON_DOMAIN_VRAM;
2289
2290 if (pAllocateInfo->memoryTypeIndex == RADV_MEM_TYPE_VRAM)
2291 flags |= RADEON_FLAG_NO_CPU_ACCESS;
2292 else
2293 flags |= RADEON_FLAG_CPU_ACCESS;
2294
2295 if (pAllocateInfo->memoryTypeIndex == RADV_MEM_TYPE_GTT_WRITE_COMBINE)
2296 flags |= RADEON_FLAG_GTT_WC;
2297
2298 mem->bo = device->ws->buffer_create(device->ws, alloc_size, device->physical_device->rad_info.max_alignment,
2299 domain, flags);
2300
2301 if (!mem->bo) {
2302 result = VK_ERROR_OUT_OF_DEVICE_MEMORY;
2303 goto fail;
2304 }
2305 mem->type_index = pAllocateInfo->memoryTypeIndex;
2306 out_success:
2307 *pMem = radv_device_memory_to_handle(mem);
2308
2309 return VK_SUCCESS;
2310
2311 fail:
2312 vk_free2(&device->alloc, pAllocator, mem);
2313
2314 return result;
2315 }
2316
2317 void radv_FreeMemory(
2318 VkDevice _device,
2319 VkDeviceMemory _mem,
2320 const VkAllocationCallbacks* pAllocator)
2321 {
2322 RADV_FROM_HANDLE(radv_device, device, _device);
2323 RADV_FROM_HANDLE(radv_device_memory, mem, _mem);
2324
2325 if (mem == NULL)
2326 return;
2327
2328 device->ws->buffer_destroy(mem->bo);
2329 mem->bo = NULL;
2330
2331 vk_free2(&device->alloc, pAllocator, mem);
2332 }
2333
2334 VkResult radv_MapMemory(
2335 VkDevice _device,
2336 VkDeviceMemory _memory,
2337 VkDeviceSize offset,
2338 VkDeviceSize size,
2339 VkMemoryMapFlags flags,
2340 void** ppData)
2341 {
2342 RADV_FROM_HANDLE(radv_device, device, _device);
2343 RADV_FROM_HANDLE(radv_device_memory, mem, _memory);
2344
2345 if (mem == NULL) {
2346 *ppData = NULL;
2347 return VK_SUCCESS;
2348 }
2349
2350 *ppData = device->ws->buffer_map(mem->bo);
2351 if (*ppData) {
2352 *ppData += offset;
2353 return VK_SUCCESS;
2354 }
2355
2356 return VK_ERROR_MEMORY_MAP_FAILED;
2357 }
2358
2359 void radv_UnmapMemory(
2360 VkDevice _device,
2361 VkDeviceMemory _memory)
2362 {
2363 RADV_FROM_HANDLE(radv_device, device, _device);
2364 RADV_FROM_HANDLE(radv_device_memory, mem, _memory);
2365
2366 if (mem == NULL)
2367 return;
2368
2369 device->ws->buffer_unmap(mem->bo);
2370 }
2371
2372 VkResult radv_FlushMappedMemoryRanges(
2373 VkDevice _device,
2374 uint32_t memoryRangeCount,
2375 const VkMappedMemoryRange* pMemoryRanges)
2376 {
2377 return VK_SUCCESS;
2378 }
2379
2380 VkResult radv_InvalidateMappedMemoryRanges(
2381 VkDevice _device,
2382 uint32_t memoryRangeCount,
2383 const VkMappedMemoryRange* pMemoryRanges)
2384 {
2385 return VK_SUCCESS;
2386 }
2387
2388 void radv_GetBufferMemoryRequirements(
2389 VkDevice device,
2390 VkBuffer _buffer,
2391 VkMemoryRequirements* pMemoryRequirements)
2392 {
2393 RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
2394
2395 pMemoryRequirements->memoryTypeBits = (1u << RADV_MEM_TYPE_COUNT) - 1;
2396
2397 if (buffer->flags & VK_BUFFER_CREATE_SPARSE_BINDING_BIT)
2398 pMemoryRequirements->alignment = 4096;
2399 else
2400 pMemoryRequirements->alignment = 16;
2401
2402 pMemoryRequirements->size = align64(buffer->size, pMemoryRequirements->alignment);
2403 }
2404
2405 void radv_GetBufferMemoryRequirements2KHR(
2406 VkDevice device,
2407 const VkBufferMemoryRequirementsInfo2KHR* pInfo,
2408 VkMemoryRequirements2KHR* pMemoryRequirements)
2409 {
2410 radv_GetBufferMemoryRequirements(device, pInfo->buffer,
2411 &pMemoryRequirements->memoryRequirements);
2412
2413 vk_foreach_struct(ext, pMemoryRequirements->pNext) {
2414 switch (ext->sType) {
2415 case VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR: {
2416 VkMemoryDedicatedRequirementsKHR *req =
2417 (VkMemoryDedicatedRequirementsKHR *) ext;
2418 req->requiresDedicatedAllocation = false;
2419 req->prefersDedicatedAllocation = req->requiresDedicatedAllocation;
2420 break;
2421 }
2422 default:
2423 break;
2424 }
2425 }
2426 }
2427
2428 void radv_GetImageMemoryRequirements(
2429 VkDevice device,
2430 VkImage _image,
2431 VkMemoryRequirements* pMemoryRequirements)
2432 {
2433 RADV_FROM_HANDLE(radv_image, image, _image);
2434
2435 pMemoryRequirements->memoryTypeBits = (1u << RADV_MEM_TYPE_COUNT) - 1;
2436
2437 pMemoryRequirements->size = image->size;
2438 pMemoryRequirements->alignment = image->alignment;
2439 }
2440
2441 void radv_GetImageMemoryRequirements2KHR(
2442 VkDevice device,
2443 const VkImageMemoryRequirementsInfo2KHR* pInfo,
2444 VkMemoryRequirements2KHR* pMemoryRequirements)
2445 {
2446 radv_GetImageMemoryRequirements(device, pInfo->image,
2447 &pMemoryRequirements->memoryRequirements);
2448
2449 RADV_FROM_HANDLE(radv_image, image, pInfo->image);
2450
2451 vk_foreach_struct(ext, pMemoryRequirements->pNext) {
2452 switch (ext->sType) {
2453 case VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR: {
2454 VkMemoryDedicatedRequirementsKHR *req =
2455 (VkMemoryDedicatedRequirementsKHR *) ext;
2456 req->requiresDedicatedAllocation = image->shareable;
2457 req->prefersDedicatedAllocation = req->requiresDedicatedAllocation;
2458 break;
2459 }
2460 default:
2461 break;
2462 }
2463 }
2464 }
2465
2466 void radv_GetImageSparseMemoryRequirements(
2467 VkDevice device,
2468 VkImage image,
2469 uint32_t* pSparseMemoryRequirementCount,
2470 VkSparseImageMemoryRequirements* pSparseMemoryRequirements)
2471 {
2472 stub();
2473 }
2474
2475 void radv_GetImageSparseMemoryRequirements2KHR(
2476 VkDevice device,
2477 const VkImageSparseMemoryRequirementsInfo2KHR* pInfo,
2478 uint32_t* pSparseMemoryRequirementCount,
2479 VkSparseImageMemoryRequirements2KHR* pSparseMemoryRequirements)
2480 {
2481 stub();
2482 }
2483
2484 void radv_GetDeviceMemoryCommitment(
2485 VkDevice device,
2486 VkDeviceMemory memory,
2487 VkDeviceSize* pCommittedMemoryInBytes)
2488 {
2489 *pCommittedMemoryInBytes = 0;
2490 }
2491
2492 VkResult radv_BindBufferMemory(
2493 VkDevice device,
2494 VkBuffer _buffer,
2495 VkDeviceMemory _memory,
2496 VkDeviceSize memoryOffset)
2497 {
2498 RADV_FROM_HANDLE(radv_device_memory, mem, _memory);
2499 RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
2500
2501 if (mem) {
2502 buffer->bo = mem->bo;
2503 buffer->offset = memoryOffset;
2504 } else {
2505 buffer->bo = NULL;
2506 buffer->offset = 0;
2507 }
2508
2509 return VK_SUCCESS;
2510 }
2511
2512 VkResult radv_BindImageMemory(
2513 VkDevice device,
2514 VkImage _image,
2515 VkDeviceMemory _memory,
2516 VkDeviceSize memoryOffset)
2517 {
2518 RADV_FROM_HANDLE(radv_device_memory, mem, _memory);
2519 RADV_FROM_HANDLE(radv_image, image, _image);
2520
2521 if (mem) {
2522 image->bo = mem->bo;
2523 image->offset = memoryOffset;
2524 } else {
2525 image->bo = NULL;
2526 image->offset = 0;
2527 }
2528
2529 return VK_SUCCESS;
2530 }
2531
2532
2533 static void
2534 radv_sparse_buffer_bind_memory(struct radv_device *device,
2535 const VkSparseBufferMemoryBindInfo *bind)
2536 {
2537 RADV_FROM_HANDLE(radv_buffer, buffer, bind->buffer);
2538
2539 for (uint32_t i = 0; i < bind->bindCount; ++i) {
2540 struct radv_device_memory *mem = NULL;
2541
2542 if (bind->pBinds[i].memory != VK_NULL_HANDLE)
2543 mem = radv_device_memory_from_handle(bind->pBinds[i].memory);
2544
2545 device->ws->buffer_virtual_bind(buffer->bo,
2546 bind->pBinds[i].resourceOffset,
2547 bind->pBinds[i].size,
2548 mem ? mem->bo : NULL,
2549 bind->pBinds[i].memoryOffset);
2550 }
2551 }
2552
2553 static void
2554 radv_sparse_image_opaque_bind_memory(struct radv_device *device,
2555 const VkSparseImageOpaqueMemoryBindInfo *bind)
2556 {
2557 RADV_FROM_HANDLE(radv_image, image, bind->image);
2558
2559 for (uint32_t i = 0; i < bind->bindCount; ++i) {
2560 struct radv_device_memory *mem = NULL;
2561
2562 if (bind->pBinds[i].memory != VK_NULL_HANDLE)
2563 mem = radv_device_memory_from_handle(bind->pBinds[i].memory);
2564
2565 device->ws->buffer_virtual_bind(image->bo,
2566 bind->pBinds[i].resourceOffset,
2567 bind->pBinds[i].size,
2568 mem ? mem->bo : NULL,
2569 bind->pBinds[i].memoryOffset);
2570 }
2571 }
2572
2573 VkResult radv_QueueBindSparse(
2574 VkQueue _queue,
2575 uint32_t bindInfoCount,
2576 const VkBindSparseInfo* pBindInfo,
2577 VkFence _fence)
2578 {
2579 RADV_FROM_HANDLE(radv_fence, fence, _fence);
2580 RADV_FROM_HANDLE(radv_queue, queue, _queue);
2581 struct radeon_winsys_fence *base_fence = fence ? fence->fence : NULL;
2582 bool fence_emitted = false;
2583
2584 for (uint32_t i = 0; i < bindInfoCount; ++i) {
2585 struct radv_winsys_sem_info sem_info;
2586 for (uint32_t j = 0; j < pBindInfo[i].bufferBindCount; ++j) {
2587 radv_sparse_buffer_bind_memory(queue->device,
2588 pBindInfo[i].pBufferBinds + j);
2589 }
2590
2591 for (uint32_t j = 0; j < pBindInfo[i].imageOpaqueBindCount; ++j) {
2592 radv_sparse_image_opaque_bind_memory(queue->device,
2593 pBindInfo[i].pImageOpaqueBinds + j);
2594 }
2595
2596 VkResult result;
2597 result = radv_alloc_sem_info(&sem_info,
2598 pBindInfo[i].waitSemaphoreCount,
2599 pBindInfo[i].pWaitSemaphores,
2600 pBindInfo[i].signalSemaphoreCount,
2601 pBindInfo[i].pSignalSemaphores);
2602 if (result != VK_SUCCESS)
2603 return result;
2604
2605 if (pBindInfo[i].waitSemaphoreCount || pBindInfo[i].signalSemaphoreCount) {
2606 queue->device->ws->cs_submit(queue->hw_ctx, queue->queue_idx,
2607 &queue->device->empty_cs[queue->queue_family_index],
2608 1, NULL, NULL,
2609 &sem_info,
2610 false, base_fence);
2611 fence_emitted = true;
2612 if (fence)
2613 fence->submitted = true;
2614 }
2615
2616 radv_free_sem_info(&sem_info);
2617
2618 }
2619
2620 if (fence && !fence_emitted) {
2621 fence->signalled = true;
2622 }
2623
2624 return VK_SUCCESS;
2625 }
2626
2627 VkResult radv_CreateFence(
2628 VkDevice _device,
2629 const VkFenceCreateInfo* pCreateInfo,
2630 const VkAllocationCallbacks* pAllocator,
2631 VkFence* pFence)
2632 {
2633 RADV_FROM_HANDLE(radv_device, device, _device);
2634 struct radv_fence *fence = vk_alloc2(&device->alloc, pAllocator,
2635 sizeof(*fence), 8,
2636 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2637
2638 if (!fence)
2639 return VK_ERROR_OUT_OF_HOST_MEMORY;
2640
2641 memset(fence, 0, sizeof(*fence));
2642 fence->submitted = false;
2643 fence->signalled = !!(pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT);
2644 fence->fence = device->ws->create_fence();
2645 if (!fence->fence) {
2646 vk_free2(&device->alloc, pAllocator, fence);
2647 return VK_ERROR_OUT_OF_HOST_MEMORY;
2648 }
2649
2650 *pFence = radv_fence_to_handle(fence);
2651
2652 return VK_SUCCESS;
2653 }
2654
2655 void radv_DestroyFence(
2656 VkDevice _device,
2657 VkFence _fence,
2658 const VkAllocationCallbacks* pAllocator)
2659 {
2660 RADV_FROM_HANDLE(radv_device, device, _device);
2661 RADV_FROM_HANDLE(radv_fence, fence, _fence);
2662
2663 if (!fence)
2664 return;
2665 device->ws->destroy_fence(fence->fence);
2666 vk_free2(&device->alloc, pAllocator, fence);
2667 }
2668
2669 static uint64_t radv_get_absolute_timeout(uint64_t timeout)
2670 {
2671 uint64_t current_time;
2672 struct timespec tv;
2673
2674 clock_gettime(CLOCK_MONOTONIC, &tv);
2675 current_time = tv.tv_nsec + tv.tv_sec*1000000000ull;
2676
2677 timeout = MIN2(UINT64_MAX - current_time, timeout);
2678
2679 return current_time + timeout;
2680 }
2681
2682 VkResult radv_WaitForFences(
2683 VkDevice _device,
2684 uint32_t fenceCount,
2685 const VkFence* pFences,
2686 VkBool32 waitAll,
2687 uint64_t timeout)
2688 {
2689 RADV_FROM_HANDLE(radv_device, device, _device);
2690 timeout = radv_get_absolute_timeout(timeout);
2691
2692 if (!waitAll && fenceCount > 1) {
2693 fprintf(stderr, "radv: WaitForFences without waitAll not implemented yet\n");
2694 }
2695
2696 for (uint32_t i = 0; i < fenceCount; ++i) {
2697 RADV_FROM_HANDLE(radv_fence, fence, pFences[i]);
2698 bool expired = false;
2699
2700 if (fence->signalled)
2701 continue;
2702
2703 if (!fence->submitted)
2704 return VK_TIMEOUT;
2705
2706 expired = device->ws->fence_wait(device->ws, fence->fence, true, timeout);
2707 if (!expired)
2708 return VK_TIMEOUT;
2709
2710 fence->signalled = true;
2711 }
2712
2713 return VK_SUCCESS;
2714 }
2715
2716 VkResult radv_ResetFences(VkDevice device,
2717 uint32_t fenceCount,
2718 const VkFence *pFences)
2719 {
2720 for (unsigned i = 0; i < fenceCount; ++i) {
2721 RADV_FROM_HANDLE(radv_fence, fence, pFences[i]);
2722 fence->submitted = fence->signalled = false;
2723 }
2724
2725 return VK_SUCCESS;
2726 }
2727
2728 VkResult radv_GetFenceStatus(VkDevice _device, VkFence _fence)
2729 {
2730 RADV_FROM_HANDLE(radv_device, device, _device);
2731 RADV_FROM_HANDLE(radv_fence, fence, _fence);
2732
2733 if (fence->signalled)
2734 return VK_SUCCESS;
2735 if (!fence->submitted)
2736 return VK_NOT_READY;
2737
2738 if (!device->ws->fence_wait(device->ws, fence->fence, false, 0))
2739 return VK_NOT_READY;
2740
2741 return VK_SUCCESS;
2742 }
2743
2744
2745 // Queue semaphore functions
2746
2747 VkResult radv_CreateSemaphore(
2748 VkDevice _device,
2749 const VkSemaphoreCreateInfo* pCreateInfo,
2750 const VkAllocationCallbacks* pAllocator,
2751 VkSemaphore* pSemaphore)
2752 {
2753 RADV_FROM_HANDLE(radv_device, device, _device);
2754 const VkExportSemaphoreCreateInfoKHR *export =
2755 vk_find_struct_const(pCreateInfo->pNext, EXPORT_SEMAPHORE_CREATE_INFO_KHR);
2756 VkExternalSemaphoreHandleTypeFlagsKHR handleTypes =
2757 export ? export->handleTypes : 0;
2758
2759 struct radv_semaphore *sem = vk_alloc2(&device->alloc, pAllocator,
2760 sizeof(*sem), 8,
2761 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2762 if (!sem)
2763 return VK_ERROR_OUT_OF_HOST_MEMORY;
2764
2765 sem->temp_syncobj = 0;
2766 /* create a syncobject if we are going to export this semaphore */
2767 if (handleTypes) {
2768 assert (device->physical_device->rad_info.has_syncobj);
2769 assert (handleTypes == VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR);
2770 int ret = device->ws->create_syncobj(device->ws, &sem->syncobj);
2771 if (ret) {
2772 vk_free2(&device->alloc, pAllocator, sem);
2773 return VK_ERROR_OUT_OF_HOST_MEMORY;
2774 }
2775 sem->sem = NULL;
2776 } else {
2777 sem->sem = device->ws->create_sem(device->ws);
2778 if (!sem->sem) {
2779 vk_free2(&device->alloc, pAllocator, sem);
2780 return VK_ERROR_OUT_OF_HOST_MEMORY;
2781 }
2782 sem->syncobj = 0;
2783 }
2784
2785 *pSemaphore = radv_semaphore_to_handle(sem);
2786 return VK_SUCCESS;
2787 }
2788
2789 void radv_DestroySemaphore(
2790 VkDevice _device,
2791 VkSemaphore _semaphore,
2792 const VkAllocationCallbacks* pAllocator)
2793 {
2794 RADV_FROM_HANDLE(radv_device, device, _device);
2795 RADV_FROM_HANDLE(radv_semaphore, sem, _semaphore);
2796 if (!_semaphore)
2797 return;
2798
2799 if (sem->syncobj)
2800 device->ws->destroy_syncobj(device->ws, sem->syncobj);
2801 else
2802 device->ws->destroy_sem(sem->sem);
2803 vk_free2(&device->alloc, pAllocator, sem);
2804 }
2805
2806 VkResult radv_CreateEvent(
2807 VkDevice _device,
2808 const VkEventCreateInfo* pCreateInfo,
2809 const VkAllocationCallbacks* pAllocator,
2810 VkEvent* pEvent)
2811 {
2812 RADV_FROM_HANDLE(radv_device, device, _device);
2813 struct radv_event *event = vk_alloc2(&device->alloc, pAllocator,
2814 sizeof(*event), 8,
2815 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2816
2817 if (!event)
2818 return VK_ERROR_OUT_OF_HOST_MEMORY;
2819
2820 event->bo = device->ws->buffer_create(device->ws, 8, 8,
2821 RADEON_DOMAIN_GTT,
2822 RADEON_FLAG_CPU_ACCESS);
2823 if (!event->bo) {
2824 vk_free2(&device->alloc, pAllocator, event);
2825 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
2826 }
2827
2828 event->map = (uint64_t*)device->ws->buffer_map(event->bo);
2829
2830 *pEvent = radv_event_to_handle(event);
2831
2832 return VK_SUCCESS;
2833 }
2834
2835 void radv_DestroyEvent(
2836 VkDevice _device,
2837 VkEvent _event,
2838 const VkAllocationCallbacks* pAllocator)
2839 {
2840 RADV_FROM_HANDLE(radv_device, device, _device);
2841 RADV_FROM_HANDLE(radv_event, event, _event);
2842
2843 if (!event)
2844 return;
2845 device->ws->buffer_destroy(event->bo);
2846 vk_free2(&device->alloc, pAllocator, event);
2847 }
2848
2849 VkResult radv_GetEventStatus(
2850 VkDevice _device,
2851 VkEvent _event)
2852 {
2853 RADV_FROM_HANDLE(radv_event, event, _event);
2854
2855 if (*event->map == 1)
2856 return VK_EVENT_SET;
2857 return VK_EVENT_RESET;
2858 }
2859
2860 VkResult radv_SetEvent(
2861 VkDevice _device,
2862 VkEvent _event)
2863 {
2864 RADV_FROM_HANDLE(radv_event, event, _event);
2865 *event->map = 1;
2866
2867 return VK_SUCCESS;
2868 }
2869
2870 VkResult radv_ResetEvent(
2871 VkDevice _device,
2872 VkEvent _event)
2873 {
2874 RADV_FROM_HANDLE(radv_event, event, _event);
2875 *event->map = 0;
2876
2877 return VK_SUCCESS;
2878 }
2879
2880 VkResult radv_CreateBuffer(
2881 VkDevice _device,
2882 const VkBufferCreateInfo* pCreateInfo,
2883 const VkAllocationCallbacks* pAllocator,
2884 VkBuffer* pBuffer)
2885 {
2886 RADV_FROM_HANDLE(radv_device, device, _device);
2887 struct radv_buffer *buffer;
2888
2889 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO);
2890
2891 buffer = vk_alloc2(&device->alloc, pAllocator, sizeof(*buffer), 8,
2892 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2893 if (buffer == NULL)
2894 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
2895
2896 buffer->size = pCreateInfo->size;
2897 buffer->usage = pCreateInfo->usage;
2898 buffer->bo = NULL;
2899 buffer->offset = 0;
2900 buffer->flags = pCreateInfo->flags;
2901
2902 if (pCreateInfo->flags & VK_BUFFER_CREATE_SPARSE_BINDING_BIT) {
2903 buffer->bo = device->ws->buffer_create(device->ws,
2904 align64(buffer->size, 4096),
2905 4096, 0, RADEON_FLAG_VIRTUAL);
2906 if (!buffer->bo) {
2907 vk_free2(&device->alloc, pAllocator, buffer);
2908 return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
2909 }
2910 }
2911
2912 *pBuffer = radv_buffer_to_handle(buffer);
2913
2914 return VK_SUCCESS;
2915 }
2916
2917 void radv_DestroyBuffer(
2918 VkDevice _device,
2919 VkBuffer _buffer,
2920 const VkAllocationCallbacks* pAllocator)
2921 {
2922 RADV_FROM_HANDLE(radv_device, device, _device);
2923 RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
2924
2925 if (!buffer)
2926 return;
2927
2928 if (buffer->flags & VK_BUFFER_CREATE_SPARSE_BINDING_BIT)
2929 device->ws->buffer_destroy(buffer->bo);
2930
2931 vk_free2(&device->alloc, pAllocator, buffer);
2932 }
2933
2934 static inline unsigned
2935 si_tile_mode_index(const struct radv_image *image, unsigned level, bool stencil)
2936 {
2937 if (stencil)
2938 return image->surface.u.legacy.stencil_tiling_index[level];
2939 else
2940 return image->surface.u.legacy.tiling_index[level];
2941 }
2942
2943 static uint32_t radv_surface_layer_count(struct radv_image_view *iview)
2944 {
2945 return iview->type == VK_IMAGE_VIEW_TYPE_3D ? iview->extent.depth : iview->layer_count;
2946 }
2947
2948 static void
2949 radv_initialise_color_surface(struct radv_device *device,
2950 struct radv_color_buffer_info *cb,
2951 struct radv_image_view *iview)
2952 {
2953 const struct vk_format_description *desc;
2954 unsigned ntype, format, swap, endian;
2955 unsigned blend_clamp = 0, blend_bypass = 0;
2956 uint64_t va;
2957 const struct radeon_surf *surf = &iview->image->surface;
2958
2959 desc = vk_format_description(iview->vk_format);
2960
2961 memset(cb, 0, sizeof(*cb));
2962
2963 /* Intensity is implemented as Red, so treat it that way. */
2964 cb->cb_color_attrib = S_028C74_FORCE_DST_ALPHA_1(desc->swizzle[3] == VK_SWIZZLE_1);
2965
2966 va = device->ws->buffer_get_va(iview->bo) + iview->image->offset;
2967
2968 cb->cb_color_base = va >> 8;
2969
2970 if (device->physical_device->rad_info.chip_class >= GFX9) {
2971 struct gfx9_surf_meta_flags meta;
2972 if (iview->image->dcc_offset)
2973 meta = iview->image->surface.u.gfx9.dcc;
2974 else
2975 meta = iview->image->surface.u.gfx9.cmask;
2976
2977 cb->cb_color_attrib |= S_028C74_COLOR_SW_MODE(iview->image->surface.u.gfx9.surf.swizzle_mode) |
2978 S_028C74_FMASK_SW_MODE(iview->image->surface.u.gfx9.fmask.swizzle_mode) |
2979 S_028C74_RB_ALIGNED(meta.rb_aligned) |
2980 S_028C74_PIPE_ALIGNED(meta.pipe_aligned);
2981
2982 cb->cb_color_base += iview->image->surface.u.gfx9.surf_offset >> 8;
2983 cb->cb_color_base |= iview->image->surface.tile_swizzle;
2984 } else {
2985 const struct legacy_surf_level *level_info = &surf->u.legacy.level[iview->base_mip];
2986 unsigned pitch_tile_max, slice_tile_max, tile_mode_index;
2987
2988 cb->cb_color_base += level_info->offset >> 8;
2989 if (level_info->mode == RADEON_SURF_MODE_2D)
2990 cb->cb_color_base |= iview->image->surface.tile_swizzle;
2991
2992 pitch_tile_max = level_info->nblk_x / 8 - 1;
2993 slice_tile_max = (level_info->nblk_x * level_info->nblk_y) / 64 - 1;
2994 tile_mode_index = si_tile_mode_index(iview->image, iview->base_mip, false);
2995
2996 cb->cb_color_pitch = S_028C64_TILE_MAX(pitch_tile_max);
2997 cb->cb_color_slice = S_028C68_TILE_MAX(slice_tile_max);
2998 cb->cb_color_cmask_slice = iview->image->cmask.slice_tile_max;
2999
3000 cb->cb_color_attrib |= S_028C74_TILE_MODE_INDEX(tile_mode_index);
3001 cb->micro_tile_mode = iview->image->surface.micro_tile_mode;
3002
3003 if (iview->image->fmask.size) {
3004 if (device->physical_device->rad_info.chip_class >= CIK)
3005 cb->cb_color_pitch |= S_028C64_FMASK_TILE_MAX(iview->image->fmask.pitch_in_pixels / 8 - 1);
3006 cb->cb_color_attrib |= S_028C74_FMASK_TILE_MODE_INDEX(iview->image->fmask.tile_mode_index);
3007 cb->cb_color_fmask_slice = S_028C88_TILE_MAX(iview->image->fmask.slice_tile_max);
3008 } else {
3009 /* This must be set for fast clear to work without FMASK. */
3010 if (device->physical_device->rad_info.chip_class >= CIK)
3011 cb->cb_color_pitch |= S_028C64_FMASK_TILE_MAX(pitch_tile_max);
3012 cb->cb_color_attrib |= S_028C74_FMASK_TILE_MODE_INDEX(tile_mode_index);
3013 cb->cb_color_fmask_slice = S_028C88_TILE_MAX(slice_tile_max);
3014 }
3015 }
3016
3017 /* CMASK variables */
3018 va = device->ws->buffer_get_va(iview->bo) + iview->image->offset;
3019 va += iview->image->cmask.offset;
3020 cb->cb_color_cmask = va >> 8;
3021
3022 va = device->ws->buffer_get_va(iview->bo) + iview->image->offset;
3023 va += iview->image->dcc_offset;
3024 cb->cb_dcc_base = va >> 8;
3025 cb->cb_dcc_base |= iview->image->surface.tile_swizzle;
3026
3027 uint32_t max_slice = radv_surface_layer_count(iview);
3028 cb->cb_color_view = S_028C6C_SLICE_START(iview->base_layer) |
3029 S_028C6C_SLICE_MAX(iview->base_layer + max_slice - 1);
3030
3031 if (iview->image->info.samples > 1) {
3032 unsigned log_samples = util_logbase2(iview->image->info.samples);
3033
3034 cb->cb_color_attrib |= S_028C74_NUM_SAMPLES(log_samples) |
3035 S_028C74_NUM_FRAGMENTS(log_samples);
3036 }
3037
3038 if (iview->image->fmask.size) {
3039 va = device->ws->buffer_get_va(iview->bo) + iview->image->offset + iview->image->fmask.offset;
3040 cb->cb_color_fmask = va >> 8;
3041 cb->cb_color_fmask |= iview->image->fmask.tile_swizzle;
3042 } else {
3043 cb->cb_color_fmask = cb->cb_color_base;
3044 }
3045
3046 ntype = radv_translate_color_numformat(iview->vk_format,
3047 desc,
3048 vk_format_get_first_non_void_channel(iview->vk_format));
3049 format = radv_translate_colorformat(iview->vk_format);
3050 if (format == V_028C70_COLOR_INVALID || ntype == ~0u)
3051 radv_finishme("Illegal color\n");
3052 swap = radv_translate_colorswap(iview->vk_format, FALSE);
3053 endian = radv_colorformat_endian_swap(format);
3054
3055 /* blend clamp should be set for all NORM/SRGB types */
3056 if (ntype == V_028C70_NUMBER_UNORM ||
3057 ntype == V_028C70_NUMBER_SNORM ||
3058 ntype == V_028C70_NUMBER_SRGB)
3059 blend_clamp = 1;
3060
3061 /* set blend bypass according to docs if SINT/UINT or
3062 8/24 COLOR variants */
3063 if (ntype == V_028C70_NUMBER_UINT || ntype == V_028C70_NUMBER_SINT ||
3064 format == V_028C70_COLOR_8_24 || format == V_028C70_COLOR_24_8 ||
3065 format == V_028C70_COLOR_X24_8_32_FLOAT) {
3066 blend_clamp = 0;
3067 blend_bypass = 1;
3068 }
3069 #if 0
3070 if ((ntype == V_028C70_NUMBER_UINT || ntype == V_028C70_NUMBER_SINT) &&
3071 (format == V_028C70_COLOR_8 ||
3072 format == V_028C70_COLOR_8_8 ||
3073 format == V_028C70_COLOR_8_8_8_8))
3074 ->color_is_int8 = true;
3075 #endif
3076 cb->cb_color_info = S_028C70_FORMAT(format) |
3077 S_028C70_COMP_SWAP(swap) |
3078 S_028C70_BLEND_CLAMP(blend_clamp) |
3079 S_028C70_BLEND_BYPASS(blend_bypass) |
3080 S_028C70_SIMPLE_FLOAT(1) |
3081 S_028C70_ROUND_MODE(ntype != V_028C70_NUMBER_UNORM &&
3082 ntype != V_028C70_NUMBER_SNORM &&
3083 ntype != V_028C70_NUMBER_SRGB &&
3084 format != V_028C70_COLOR_8_24 &&
3085 format != V_028C70_COLOR_24_8) |
3086 S_028C70_NUMBER_TYPE(ntype) |
3087 S_028C70_ENDIAN(endian);
3088 if ((iview->image->info.samples > 1) && iview->image->fmask.size) {
3089 cb->cb_color_info |= S_028C70_COMPRESSION(1);
3090 if (device->physical_device->rad_info.chip_class == SI) {
3091 unsigned fmask_bankh = util_logbase2(iview->image->fmask.bank_height);
3092 cb->cb_color_attrib |= S_028C74_FMASK_BANK_HEIGHT(fmask_bankh);
3093 }
3094 }
3095
3096 if (iview->image->cmask.size &&
3097 !(device->debug_flags & RADV_DEBUG_NO_FAST_CLEARS))
3098 cb->cb_color_info |= S_028C70_FAST_CLEAR(1);
3099
3100 if (iview->image->surface.dcc_size && iview->base_mip < surf->num_dcc_levels)
3101 cb->cb_color_info |= S_028C70_DCC_ENABLE(1);
3102
3103 if (device->physical_device->rad_info.chip_class >= VI) {
3104 unsigned max_uncompressed_block_size = 2;
3105 if (iview->image->info.samples > 1) {
3106 if (iview->image->surface.bpe == 1)
3107 max_uncompressed_block_size = 0;
3108 else if (iview->image->surface.bpe == 2)
3109 max_uncompressed_block_size = 1;
3110 }
3111
3112 cb->cb_dcc_control = S_028C78_MAX_UNCOMPRESSED_BLOCK_SIZE(max_uncompressed_block_size) |
3113 S_028C78_INDEPENDENT_64B_BLOCKS(1);
3114 }
3115
3116 /* This must be set for fast clear to work without FMASK. */
3117 if (!iview->image->fmask.size &&
3118 device->physical_device->rad_info.chip_class == SI) {
3119 unsigned bankh = util_logbase2(iview->image->surface.u.legacy.bankh);
3120 cb->cb_color_attrib |= S_028C74_FMASK_BANK_HEIGHT(bankh);
3121 }
3122
3123 if (device->physical_device->rad_info.chip_class >= GFX9) {
3124 uint32_t max_slice = radv_surface_layer_count(iview);
3125 unsigned mip0_depth = iview->base_layer + max_slice - 1;
3126
3127 cb->cb_color_view |= S_028C6C_MIP_LEVEL(iview->base_mip);
3128 cb->cb_color_attrib |= S_028C74_MIP0_DEPTH(mip0_depth) |
3129 S_028C74_RESOURCE_TYPE(iview->image->surface.u.gfx9.resource_type);
3130 cb->cb_color_attrib2 = S_028C68_MIP0_WIDTH(iview->extent.width - 1) |
3131 S_028C68_MIP0_HEIGHT(iview->extent.height - 1) |
3132 S_028C68_MAX_MIP(iview->image->info.levels - 1);
3133
3134 cb->gfx9_epitch = S_0287A0_EPITCH(iview->image->surface.u.gfx9.surf.epitch);
3135
3136 }
3137 }
3138
3139 static void
3140 radv_initialise_ds_surface(struct radv_device *device,
3141 struct radv_ds_buffer_info *ds,
3142 struct radv_image_view *iview)
3143 {
3144 unsigned level = iview->base_mip;
3145 unsigned format, stencil_format;
3146 uint64_t va, s_offs, z_offs;
3147 bool stencil_only = false;
3148 memset(ds, 0, sizeof(*ds));
3149 switch (iview->image->vk_format) {
3150 case VK_FORMAT_D24_UNORM_S8_UINT:
3151 case VK_FORMAT_X8_D24_UNORM_PACK32:
3152 ds->pa_su_poly_offset_db_fmt_cntl = S_028B78_POLY_OFFSET_NEG_NUM_DB_BITS(-24);
3153 ds->offset_scale = 2.0f;
3154 break;
3155 case VK_FORMAT_D16_UNORM:
3156 case VK_FORMAT_D16_UNORM_S8_UINT:
3157 ds->pa_su_poly_offset_db_fmt_cntl = S_028B78_POLY_OFFSET_NEG_NUM_DB_BITS(-16);
3158 ds->offset_scale = 4.0f;
3159 break;
3160 case VK_FORMAT_D32_SFLOAT:
3161 case VK_FORMAT_D32_SFLOAT_S8_UINT:
3162 ds->pa_su_poly_offset_db_fmt_cntl = S_028B78_POLY_OFFSET_NEG_NUM_DB_BITS(-23) |
3163 S_028B78_POLY_OFFSET_DB_IS_FLOAT_FMT(1);
3164 ds->offset_scale = 1.0f;
3165 break;
3166 case VK_FORMAT_S8_UINT:
3167 stencil_only = true;
3168 break;
3169 default:
3170 break;
3171 }
3172
3173 format = radv_translate_dbformat(iview->image->vk_format);
3174 stencil_format = iview->image->surface.flags & RADEON_SURF_SBUFFER ?
3175 V_028044_STENCIL_8 : V_028044_STENCIL_INVALID;
3176
3177 uint32_t max_slice = radv_surface_layer_count(iview);
3178 ds->db_depth_view = S_028008_SLICE_START(iview->base_layer) |
3179 S_028008_SLICE_MAX(iview->base_layer + max_slice - 1);
3180
3181 ds->db_htile_data_base = 0;
3182 ds->db_htile_surface = 0;
3183
3184 va = device->ws->buffer_get_va(iview->bo) + iview->image->offset;
3185 s_offs = z_offs = va;
3186
3187 if (device->physical_device->rad_info.chip_class >= GFX9) {
3188 assert(iview->image->surface.u.gfx9.surf_offset == 0);
3189 s_offs += iview->image->surface.u.gfx9.stencil_offset;
3190
3191 ds->db_z_info = S_028038_FORMAT(format) |
3192 S_028038_NUM_SAMPLES(util_logbase2(iview->image->info.samples)) |
3193 S_028038_SW_MODE(iview->image->surface.u.gfx9.surf.swizzle_mode) |
3194 S_028038_MAXMIP(iview->image->info.levels - 1);
3195 ds->db_stencil_info = S_02803C_FORMAT(stencil_format) |
3196 S_02803C_SW_MODE(iview->image->surface.u.gfx9.stencil.swizzle_mode);
3197
3198 ds->db_z_info2 = S_028068_EPITCH(iview->image->surface.u.gfx9.surf.epitch);
3199 ds->db_stencil_info2 = S_02806C_EPITCH(iview->image->surface.u.gfx9.stencil.epitch);
3200 ds->db_depth_view |= S_028008_MIPID(level);
3201
3202 ds->db_depth_size = S_02801C_X_MAX(iview->image->info.width - 1) |
3203 S_02801C_Y_MAX(iview->image->info.height - 1);
3204
3205 /* Only use HTILE for the first level. */
3206 if (iview->image->surface.htile_size && !level) {
3207 ds->db_z_info |= S_028038_TILE_SURFACE_ENABLE(1);
3208
3209 if (!(iview->image->surface.flags & RADEON_SURF_SBUFFER))
3210 /* Use all of the htile_buffer for depth if there's no stencil. */
3211 ds->db_stencil_info |= S_02803C_TILE_STENCIL_DISABLE(1);
3212 va = device->ws->buffer_get_va(iview->bo) + iview->image->offset +
3213 iview->image->htile_offset;
3214 ds->db_htile_data_base = va >> 8;
3215 ds->db_htile_surface = S_028ABC_FULL_CACHE(1) |
3216 S_028ABC_PIPE_ALIGNED(iview->image->surface.u.gfx9.htile.pipe_aligned) |
3217 S_028ABC_RB_ALIGNED(iview->image->surface.u.gfx9.htile.rb_aligned);
3218 }
3219 } else {
3220 const struct legacy_surf_level *level_info = &iview->image->surface.u.legacy.level[level];
3221
3222 if (stencil_only)
3223 level_info = &iview->image->surface.u.legacy.stencil_level[level];
3224
3225 z_offs += iview->image->surface.u.legacy.level[level].offset;
3226 s_offs += iview->image->surface.u.legacy.stencil_level[level].offset;
3227
3228 ds->db_depth_info = S_02803C_ADDR5_SWIZZLE_MASK(1);
3229 ds->db_z_info = S_028040_FORMAT(format) | S_028040_ZRANGE_PRECISION(1);
3230 ds->db_stencil_info = S_028044_FORMAT(stencil_format);
3231
3232 if (iview->image->info.samples > 1)
3233 ds->db_z_info |= S_028040_NUM_SAMPLES(util_logbase2(iview->image->info.samples));
3234
3235 if (device->physical_device->rad_info.chip_class >= CIK) {
3236 struct radeon_info *info = &device->physical_device->rad_info;
3237 unsigned tiling_index = iview->image->surface.u.legacy.tiling_index[level];
3238 unsigned stencil_index = iview->image->surface.u.legacy.stencil_tiling_index[level];
3239 unsigned macro_index = iview->image->surface.u.legacy.macro_tile_index;
3240 unsigned tile_mode = info->si_tile_mode_array[tiling_index];
3241 unsigned stencil_tile_mode = info->si_tile_mode_array[stencil_index];
3242 unsigned macro_mode = info->cik_macrotile_mode_array[macro_index];
3243
3244 if (stencil_only)
3245 tile_mode = stencil_tile_mode;
3246
3247 ds->db_depth_info |=
3248 S_02803C_ARRAY_MODE(G_009910_ARRAY_MODE(tile_mode)) |
3249 S_02803C_PIPE_CONFIG(G_009910_PIPE_CONFIG(tile_mode)) |
3250 S_02803C_BANK_WIDTH(G_009990_BANK_WIDTH(macro_mode)) |
3251 S_02803C_BANK_HEIGHT(G_009990_BANK_HEIGHT(macro_mode)) |
3252 S_02803C_MACRO_TILE_ASPECT(G_009990_MACRO_TILE_ASPECT(macro_mode)) |
3253 S_02803C_NUM_BANKS(G_009990_NUM_BANKS(macro_mode));
3254 ds->db_z_info |= S_028040_TILE_SPLIT(G_009910_TILE_SPLIT(tile_mode));
3255 ds->db_stencil_info |= S_028044_TILE_SPLIT(G_009910_TILE_SPLIT(stencil_tile_mode));
3256 } else {
3257 unsigned tile_mode_index = si_tile_mode_index(iview->image, level, false);
3258 ds->db_z_info |= S_028040_TILE_MODE_INDEX(tile_mode_index);
3259 tile_mode_index = si_tile_mode_index(iview->image, level, true);
3260 ds->db_stencil_info |= S_028044_TILE_MODE_INDEX(tile_mode_index);
3261 if (stencil_only)
3262 ds->db_z_info |= S_028040_TILE_MODE_INDEX(tile_mode_index);
3263 }
3264
3265 ds->db_depth_size = S_028058_PITCH_TILE_MAX((level_info->nblk_x / 8) - 1) |
3266 S_028058_HEIGHT_TILE_MAX((level_info->nblk_y / 8) - 1);
3267 ds->db_depth_slice = S_02805C_SLICE_TILE_MAX((level_info->nblk_x * level_info->nblk_y) / 64 - 1);
3268
3269 if (iview->image->surface.htile_size && !level) {
3270 ds->db_z_info |= S_028040_TILE_SURFACE_ENABLE(1);
3271
3272 if (!(iview->image->surface.flags & RADEON_SURF_SBUFFER))
3273 /* Use all of the htile_buffer for depth if there's no stencil. */
3274 ds->db_stencil_info |= S_028044_TILE_STENCIL_DISABLE(1);
3275
3276 va = device->ws->buffer_get_va(iview->bo) + iview->image->offset +
3277 iview->image->htile_offset;
3278 ds->db_htile_data_base = va >> 8;
3279 ds->db_htile_surface = S_028ABC_FULL_CACHE(1);
3280 }
3281 }
3282
3283 ds->db_z_read_base = ds->db_z_write_base = z_offs >> 8;
3284 ds->db_stencil_read_base = ds->db_stencil_write_base = s_offs >> 8;
3285 }
3286
3287 VkResult radv_CreateFramebuffer(
3288 VkDevice _device,
3289 const VkFramebufferCreateInfo* pCreateInfo,
3290 const VkAllocationCallbacks* pAllocator,
3291 VkFramebuffer* pFramebuffer)
3292 {
3293 RADV_FROM_HANDLE(radv_device, device, _device);
3294 struct radv_framebuffer *framebuffer;
3295
3296 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO);
3297
3298 size_t size = sizeof(*framebuffer) +
3299 sizeof(struct radv_attachment_info) * pCreateInfo->attachmentCount;
3300 framebuffer = vk_alloc2(&device->alloc, pAllocator, size, 8,
3301 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
3302 if (framebuffer == NULL)
3303 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
3304
3305 framebuffer->attachment_count = pCreateInfo->attachmentCount;
3306 framebuffer->width = pCreateInfo->width;
3307 framebuffer->height = pCreateInfo->height;
3308 framebuffer->layers = pCreateInfo->layers;
3309 for (uint32_t i = 0; i < pCreateInfo->attachmentCount; i++) {
3310 VkImageView _iview = pCreateInfo->pAttachments[i];
3311 struct radv_image_view *iview = radv_image_view_from_handle(_iview);
3312 framebuffer->attachments[i].attachment = iview;
3313 if (iview->aspect_mask & VK_IMAGE_ASPECT_COLOR_BIT) {
3314 radv_initialise_color_surface(device, &framebuffer->attachments[i].cb, iview);
3315 } else if (iview->aspect_mask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) {
3316 radv_initialise_ds_surface(device, &framebuffer->attachments[i].ds, iview);
3317 }
3318 framebuffer->width = MIN2(framebuffer->width, iview->extent.width);
3319 framebuffer->height = MIN2(framebuffer->height, iview->extent.height);
3320 framebuffer->layers = MIN2(framebuffer->layers, radv_surface_layer_count(iview));
3321 }
3322
3323 *pFramebuffer = radv_framebuffer_to_handle(framebuffer);
3324 return VK_SUCCESS;
3325 }
3326
3327 void radv_DestroyFramebuffer(
3328 VkDevice _device,
3329 VkFramebuffer _fb,
3330 const VkAllocationCallbacks* pAllocator)
3331 {
3332 RADV_FROM_HANDLE(radv_device, device, _device);
3333 RADV_FROM_HANDLE(radv_framebuffer, fb, _fb);
3334
3335 if (!fb)
3336 return;
3337 vk_free2(&device->alloc, pAllocator, fb);
3338 }
3339
3340 static unsigned radv_tex_wrap(VkSamplerAddressMode address_mode)
3341 {
3342 switch (address_mode) {
3343 case VK_SAMPLER_ADDRESS_MODE_REPEAT:
3344 return V_008F30_SQ_TEX_WRAP;
3345 case VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT:
3346 return V_008F30_SQ_TEX_MIRROR;
3347 case VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE:
3348 return V_008F30_SQ_TEX_CLAMP_LAST_TEXEL;
3349 case VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER:
3350 return V_008F30_SQ_TEX_CLAMP_BORDER;
3351 case VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE:
3352 return V_008F30_SQ_TEX_MIRROR_ONCE_LAST_TEXEL;
3353 default:
3354 unreachable("illegal tex wrap mode");
3355 break;
3356 }
3357 }
3358
3359 static unsigned
3360 radv_tex_compare(VkCompareOp op)
3361 {
3362 switch (op) {
3363 case VK_COMPARE_OP_NEVER:
3364 return V_008F30_SQ_TEX_DEPTH_COMPARE_NEVER;
3365 case VK_COMPARE_OP_LESS:
3366 return V_008F30_SQ_TEX_DEPTH_COMPARE_LESS;
3367 case VK_COMPARE_OP_EQUAL:
3368 return V_008F30_SQ_TEX_DEPTH_COMPARE_EQUAL;
3369 case VK_COMPARE_OP_LESS_OR_EQUAL:
3370 return V_008F30_SQ_TEX_DEPTH_COMPARE_LESSEQUAL;
3371 case VK_COMPARE_OP_GREATER:
3372 return V_008F30_SQ_TEX_DEPTH_COMPARE_GREATER;
3373 case VK_COMPARE_OP_NOT_EQUAL:
3374 return V_008F30_SQ_TEX_DEPTH_COMPARE_NOTEQUAL;
3375 case VK_COMPARE_OP_GREATER_OR_EQUAL:
3376 return V_008F30_SQ_TEX_DEPTH_COMPARE_GREATEREQUAL;
3377 case VK_COMPARE_OP_ALWAYS:
3378 return V_008F30_SQ_TEX_DEPTH_COMPARE_ALWAYS;
3379 default:
3380 unreachable("illegal compare mode");
3381 break;
3382 }
3383 }
3384
3385 static unsigned
3386 radv_tex_filter(VkFilter filter, unsigned max_ansio)
3387 {
3388 switch (filter) {
3389 case VK_FILTER_NEAREST:
3390 return (max_ansio > 1 ? V_008F38_SQ_TEX_XY_FILTER_ANISO_POINT :
3391 V_008F38_SQ_TEX_XY_FILTER_POINT);
3392 case VK_FILTER_LINEAR:
3393 return (max_ansio > 1 ? V_008F38_SQ_TEX_XY_FILTER_ANISO_BILINEAR :
3394 V_008F38_SQ_TEX_XY_FILTER_BILINEAR);
3395 case VK_FILTER_CUBIC_IMG:
3396 default:
3397 fprintf(stderr, "illegal texture filter");
3398 return 0;
3399 }
3400 }
3401
3402 static unsigned
3403 radv_tex_mipfilter(VkSamplerMipmapMode mode)
3404 {
3405 switch (mode) {
3406 case VK_SAMPLER_MIPMAP_MODE_NEAREST:
3407 return V_008F38_SQ_TEX_Z_FILTER_POINT;
3408 case VK_SAMPLER_MIPMAP_MODE_LINEAR:
3409 return V_008F38_SQ_TEX_Z_FILTER_LINEAR;
3410 default:
3411 return V_008F38_SQ_TEX_Z_FILTER_NONE;
3412 }
3413 }
3414
3415 static unsigned
3416 radv_tex_bordercolor(VkBorderColor bcolor)
3417 {
3418 switch (bcolor) {
3419 case VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK:
3420 case VK_BORDER_COLOR_INT_TRANSPARENT_BLACK:
3421 return V_008F3C_SQ_TEX_BORDER_COLOR_TRANS_BLACK;
3422 case VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK:
3423 case VK_BORDER_COLOR_INT_OPAQUE_BLACK:
3424 return V_008F3C_SQ_TEX_BORDER_COLOR_OPAQUE_BLACK;
3425 case VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE:
3426 case VK_BORDER_COLOR_INT_OPAQUE_WHITE:
3427 return V_008F3C_SQ_TEX_BORDER_COLOR_OPAQUE_WHITE;
3428 default:
3429 break;
3430 }
3431 return 0;
3432 }
3433
3434 static unsigned
3435 radv_tex_aniso_filter(unsigned filter)
3436 {
3437 if (filter < 2)
3438 return 0;
3439 if (filter < 4)
3440 return 1;
3441 if (filter < 8)
3442 return 2;
3443 if (filter < 16)
3444 return 3;
3445 return 4;
3446 }
3447
3448 static void
3449 radv_init_sampler(struct radv_device *device,
3450 struct radv_sampler *sampler,
3451 const VkSamplerCreateInfo *pCreateInfo)
3452 {
3453 uint32_t max_aniso = pCreateInfo->anisotropyEnable && pCreateInfo->maxAnisotropy > 1.0 ?
3454 (uint32_t) pCreateInfo->maxAnisotropy : 0;
3455 uint32_t max_aniso_ratio = radv_tex_aniso_filter(max_aniso);
3456 bool is_vi = (device->physical_device->rad_info.chip_class >= VI);
3457
3458 sampler->state[0] = (S_008F30_CLAMP_X(radv_tex_wrap(pCreateInfo->addressModeU)) |
3459 S_008F30_CLAMP_Y(radv_tex_wrap(pCreateInfo->addressModeV)) |
3460 S_008F30_CLAMP_Z(radv_tex_wrap(pCreateInfo->addressModeW)) |
3461 S_008F30_MAX_ANISO_RATIO(max_aniso_ratio) |
3462 S_008F30_DEPTH_COMPARE_FUNC(radv_tex_compare(pCreateInfo->compareOp)) |
3463 S_008F30_FORCE_UNNORMALIZED(pCreateInfo->unnormalizedCoordinates ? 1 : 0) |
3464 S_008F30_ANISO_THRESHOLD(max_aniso_ratio >> 1) |
3465 S_008F30_ANISO_BIAS(max_aniso_ratio) |
3466 S_008F30_DISABLE_CUBE_WRAP(0) |
3467 S_008F30_COMPAT_MODE(is_vi));
3468 sampler->state[1] = (S_008F34_MIN_LOD(S_FIXED(CLAMP(pCreateInfo->minLod, 0, 15), 8)) |
3469 S_008F34_MAX_LOD(S_FIXED(CLAMP(pCreateInfo->maxLod, 0, 15), 8)) |
3470 S_008F34_PERF_MIP(max_aniso_ratio ? max_aniso_ratio + 6 : 0));
3471 sampler->state[2] = (S_008F38_LOD_BIAS(S_FIXED(CLAMP(pCreateInfo->mipLodBias, -16, 16), 8)) |
3472 S_008F38_XY_MAG_FILTER(radv_tex_filter(pCreateInfo->magFilter, max_aniso)) |
3473 S_008F38_XY_MIN_FILTER(radv_tex_filter(pCreateInfo->minFilter, max_aniso)) |
3474 S_008F38_MIP_FILTER(radv_tex_mipfilter(pCreateInfo->mipmapMode)) |
3475 S_008F38_MIP_POINT_PRECLAMP(0) |
3476 S_008F38_DISABLE_LSB_CEIL(1) |
3477 S_008F38_FILTER_PREC_FIX(1) |
3478 S_008F38_ANISO_OVERRIDE(is_vi));
3479 sampler->state[3] = (S_008F3C_BORDER_COLOR_PTR(0) |
3480 S_008F3C_BORDER_COLOR_TYPE(radv_tex_bordercolor(pCreateInfo->borderColor)));
3481 }
3482
3483 VkResult radv_CreateSampler(
3484 VkDevice _device,
3485 const VkSamplerCreateInfo* pCreateInfo,
3486 const VkAllocationCallbacks* pAllocator,
3487 VkSampler* pSampler)
3488 {
3489 RADV_FROM_HANDLE(radv_device, device, _device);
3490 struct radv_sampler *sampler;
3491
3492 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO);
3493
3494 sampler = vk_alloc2(&device->alloc, pAllocator, sizeof(*sampler), 8,
3495 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
3496 if (!sampler)
3497 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
3498
3499 radv_init_sampler(device, sampler, pCreateInfo);
3500 *pSampler = radv_sampler_to_handle(sampler);
3501
3502 return VK_SUCCESS;
3503 }
3504
3505 void radv_DestroySampler(
3506 VkDevice _device,
3507 VkSampler _sampler,
3508 const VkAllocationCallbacks* pAllocator)
3509 {
3510 RADV_FROM_HANDLE(radv_device, device, _device);
3511 RADV_FROM_HANDLE(radv_sampler, sampler, _sampler);
3512
3513 if (!sampler)
3514 return;
3515 vk_free2(&device->alloc, pAllocator, sampler);
3516 }
3517
3518 /* vk_icd.h does not declare this function, so we declare it here to
3519 * suppress Wmissing-prototypes.
3520 */
3521 PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
3522 vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion);
3523
3524 PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
3525 vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion)
3526 {
3527 /* For the full details on loader interface versioning, see
3528 * <https://github.com/KhronosGroup/Vulkan-LoaderAndValidationLayers/blob/master/loader/LoaderAndLayerInterface.md>.
3529 * What follows is a condensed summary, to help you navigate the large and
3530 * confusing official doc.
3531 *
3532 * - Loader interface v0 is incompatible with later versions. We don't
3533 * support it.
3534 *
3535 * - In loader interface v1:
3536 * - The first ICD entrypoint called by the loader is
3537 * vk_icdGetInstanceProcAddr(). The ICD must statically expose this
3538 * entrypoint.
3539 * - The ICD must statically expose no other Vulkan symbol unless it is
3540 * linked with -Bsymbolic.
3541 * - Each dispatchable Vulkan handle created by the ICD must be
3542 * a pointer to a struct whose first member is VK_LOADER_DATA. The
3543 * ICD must initialize VK_LOADER_DATA.loadMagic to ICD_LOADER_MAGIC.
3544 * - The loader implements vkCreate{PLATFORM}SurfaceKHR() and
3545 * vkDestroySurfaceKHR(). The ICD must be capable of working with
3546 * such loader-managed surfaces.
3547 *
3548 * - Loader interface v2 differs from v1 in:
3549 * - The first ICD entrypoint called by the loader is
3550 * vk_icdNegotiateLoaderICDInterfaceVersion(). The ICD must
3551 * statically expose this entrypoint.
3552 *
3553 * - Loader interface v3 differs from v2 in:
3554 * - The ICD must implement vkCreate{PLATFORM}SurfaceKHR(),
3555 * vkDestroySurfaceKHR(), and other API which uses VKSurfaceKHR,
3556 * because the loader no longer does so.
3557 */
3558 *pSupportedVersion = MIN2(*pSupportedVersion, 3u);
3559 return VK_SUCCESS;
3560 }
3561
3562 VkResult radv_GetMemoryFdKHR(VkDevice _device,
3563 const VkMemoryGetFdInfoKHR *pGetFdInfo,
3564 int *pFD)
3565 {
3566 RADV_FROM_HANDLE(radv_device, device, _device);
3567 RADV_FROM_HANDLE(radv_device_memory, memory, pGetFdInfo->memory);
3568
3569 assert(pGetFdInfo->sType == VK_STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR);
3570
3571 /* We support only one handle type. */
3572 assert(pGetFdInfo->handleType ==
3573 VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR);
3574
3575 bool ret = radv_get_memory_fd(device, memory, pFD);
3576 if (ret == false)
3577 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
3578 return VK_SUCCESS;
3579 }
3580
3581 VkResult radv_GetMemoryFdPropertiesKHR(VkDevice _device,
3582 VkExternalMemoryHandleTypeFlagBitsKHR handleType,
3583 int fd,
3584 VkMemoryFdPropertiesKHR *pMemoryFdProperties)
3585 {
3586 /* The valid usage section for this function says:
3587 *
3588 * "handleType must not be one of the handle types defined as opaque."
3589 *
3590 * Since we only handle opaque handles for now, there are no FD properties.
3591 */
3592 return VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR;
3593 }
3594
3595 VkResult radv_ImportSemaphoreFdKHR(VkDevice _device,
3596 const VkImportSemaphoreFdInfoKHR *pImportSemaphoreFdInfo)
3597 {
3598 RADV_FROM_HANDLE(radv_device, device, _device);
3599 RADV_FROM_HANDLE(radv_semaphore, sem, pImportSemaphoreFdInfo->semaphore);
3600 uint32_t syncobj_handle = 0;
3601 assert(pImportSemaphoreFdInfo->handleType == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR);
3602
3603 int ret = device->ws->import_syncobj(device->ws, pImportSemaphoreFdInfo->fd, &syncobj_handle);
3604 if (ret != 0)
3605 return VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR;
3606
3607 if (pImportSemaphoreFdInfo->flags & VK_SEMAPHORE_IMPORT_TEMPORARY_BIT_KHR) {
3608 sem->temp_syncobj = syncobj_handle;
3609 } else {
3610 sem->syncobj = syncobj_handle;
3611 }
3612 close(pImportSemaphoreFdInfo->fd);
3613 return VK_SUCCESS;
3614 }
3615
3616 VkResult radv_GetSemaphoreFdKHR(VkDevice _device,
3617 const VkSemaphoreGetFdInfoKHR *pGetFdInfo,
3618 int *pFd)
3619 {
3620 RADV_FROM_HANDLE(radv_device, device, _device);
3621 RADV_FROM_HANDLE(radv_semaphore, sem, pGetFdInfo->semaphore);
3622 int ret;
3623 uint32_t syncobj_handle;
3624
3625 assert(pGetFdInfo->handleType == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR);
3626 if (sem->temp_syncobj)
3627 syncobj_handle = sem->temp_syncobj;
3628 else
3629 syncobj_handle = sem->syncobj;
3630 ret = device->ws->export_syncobj(device->ws, syncobj_handle, pFd);
3631 if (ret)
3632 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR);
3633 return VK_SUCCESS;
3634 }
3635
3636 void radv_GetPhysicalDeviceExternalSemaphorePropertiesKHR(
3637 VkPhysicalDevice physicalDevice,
3638 const VkPhysicalDeviceExternalSemaphoreInfoKHR* pExternalSemaphoreInfo,
3639 VkExternalSemaphorePropertiesKHR* pExternalSemaphoreProperties)
3640 {
3641 if (pExternalSemaphoreInfo->handleType == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR) {
3642 pExternalSemaphoreProperties->exportFromImportedHandleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR;
3643 pExternalSemaphoreProperties->compatibleHandleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR;
3644 pExternalSemaphoreProperties->externalSemaphoreFeatures = VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT_KHR |
3645 VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT_KHR;
3646 } else {
3647 pExternalSemaphoreProperties->exportFromImportedHandleTypes = 0;
3648 pExternalSemaphoreProperties->compatibleHandleTypes = 0;
3649 pExternalSemaphoreProperties->externalSemaphoreFeatures = 0;
3650 }
3651 }