radv: advertise v6 of the wayland surface extension
[mesa.git] / src / amd / vulkan / radv_device.c
1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 */
27
28 #include <stdbool.h>
29 #include <string.h>
30 #include <unistd.h>
31 #include <fcntl.h>
32 #include "radv_private.h"
33 #include "radv_cs.h"
34 #include "util/disk_cache.h"
35 #include "util/strtod.h"
36 #include "vk_util.h"
37 #include <xf86drm.h>
38 #include <amdgpu.h>
39 #include <amdgpu_drm.h>
40 #include "amdgpu_id.h"
41 #include "winsys/amdgpu/radv_amdgpu_winsys_public.h"
42 #include "ac_llvm_util.h"
43 #include "vk_format.h"
44 #include "sid.h"
45 #include "gfx9d.h"
46 #include "util/debug.h"
47
48 static int
49 radv_device_get_cache_uuid(enum radeon_family family, void *uuid)
50 {
51 uint32_t mesa_timestamp, llvm_timestamp;
52 uint16_t f = family;
53 memset(uuid, 0, VK_UUID_SIZE);
54 if (!disk_cache_get_function_timestamp(radv_device_get_cache_uuid, &mesa_timestamp) ||
55 !disk_cache_get_function_timestamp(LLVMInitializeAMDGPUTargetInfo, &llvm_timestamp))
56 return -1;
57
58 memcpy(uuid, &mesa_timestamp, 4);
59 memcpy((char*)uuid + 4, &llvm_timestamp, 4);
60 memcpy((char*)uuid + 8, &f, 2);
61 snprintf((char*)uuid + 10, VK_UUID_SIZE - 10, "radv");
62 return 0;
63 }
64
65 static void
66 radv_get_device_uuid(drmDevicePtr device, void *uuid) {
67 memset(uuid, 0, VK_UUID_SIZE);
68 memcpy((char*)uuid + 0, &device->businfo.pci->domain, 2);
69 memcpy((char*)uuid + 2, &device->businfo.pci->bus, 1);
70 memcpy((char*)uuid + 3, &device->businfo.pci->dev, 1);
71 memcpy((char*)uuid + 4, &device->businfo.pci->func, 1);
72 }
73
74 static const VkExtensionProperties instance_extensions[] = {
75 {
76 .extensionName = VK_KHR_SURFACE_EXTENSION_NAME,
77 .specVersion = 25,
78 },
79 #ifdef VK_USE_PLATFORM_XCB_KHR
80 {
81 .extensionName = VK_KHR_XCB_SURFACE_EXTENSION_NAME,
82 .specVersion = 6,
83 },
84 #endif
85 #ifdef VK_USE_PLATFORM_XLIB_KHR
86 {
87 .extensionName = VK_KHR_XLIB_SURFACE_EXTENSION_NAME,
88 .specVersion = 6,
89 },
90 #endif
91 #ifdef VK_USE_PLATFORM_WAYLAND_KHR
92 {
93 .extensionName = VK_KHR_WAYLAND_SURFACE_EXTENSION_NAME,
94 .specVersion = 6,
95 },
96 #endif
97 {
98 .extensionName = VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME,
99 .specVersion = 1,
100 },
101 {
102 .extensionName = VK_KHR_EXTERNAL_MEMORY_CAPABILITIES_EXTENSION_NAME,
103 .specVersion = 1,
104 },
105 };
106
107 static const VkExtensionProperties common_device_extensions[] = {
108 {
109 .extensionName = VK_KHR_DESCRIPTOR_UPDATE_TEMPLATE_EXTENSION_NAME,
110 .specVersion = 1,
111 },
112 {
113 .extensionName = VK_KHR_INCREMENTAL_PRESENT_EXTENSION_NAME,
114 .specVersion = 1,
115 },
116 {
117 .extensionName = VK_KHR_MAINTENANCE1_EXTENSION_NAME,
118 .specVersion = 1,
119 },
120 {
121 .extensionName = VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME,
122 .specVersion = 1,
123 },
124 {
125 .extensionName = VK_KHR_SAMPLER_MIRROR_CLAMP_TO_EDGE_EXTENSION_NAME,
126 .specVersion = 1,
127 },
128 {
129 .extensionName = VK_KHR_SWAPCHAIN_EXTENSION_NAME,
130 .specVersion = 68,
131 },
132 {
133 .extensionName = VK_AMD_DRAW_INDIRECT_COUNT_EXTENSION_NAME,
134 .specVersion = 1,
135 },
136 {
137 .extensionName = VK_KHR_SHADER_DRAW_PARAMETERS_EXTENSION_NAME,
138 .specVersion = 1,
139 },
140 {
141 .extensionName = VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME,
142 .specVersion = 1,
143 },
144 {
145 .extensionName = VK_KHR_DEDICATED_ALLOCATION_EXTENSION_NAME,
146 .specVersion = 1,
147 },
148 {
149 .extensionName = VK_KHR_EXTERNAL_MEMORY_EXTENSION_NAME,
150 .specVersion = 1,
151 },
152 {
153 .extensionName = VK_KHR_EXTERNAL_MEMORY_FD_EXTENSION_NAME,
154 .specVersion = 1,
155 },
156 };
157
158 static VkResult
159 radv_extensions_register(struct radv_instance *instance,
160 struct radv_extensions *extensions,
161 const VkExtensionProperties *new_ext,
162 uint32_t num_ext)
163 {
164 size_t new_size;
165 VkExtensionProperties *new_ptr;
166
167 assert(new_ext && num_ext > 0);
168
169 if (!new_ext)
170 return VK_ERROR_INITIALIZATION_FAILED;
171
172 new_size = (extensions->num_ext + num_ext) * sizeof(VkExtensionProperties);
173 new_ptr = vk_realloc(&instance->alloc, extensions->ext_array,
174 new_size, 8, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
175
176 /* Old array continues to be valid, update nothing */
177 if (!new_ptr)
178 return VK_ERROR_OUT_OF_HOST_MEMORY;
179
180 memcpy(&new_ptr[extensions->num_ext], new_ext,
181 num_ext * sizeof(VkExtensionProperties));
182 extensions->ext_array = new_ptr;
183 extensions->num_ext += num_ext;
184
185 return VK_SUCCESS;
186 }
187
188 static void
189 radv_extensions_finish(struct radv_instance *instance,
190 struct radv_extensions *extensions)
191 {
192 assert(extensions);
193
194 if (!extensions)
195 radv_loge("Attemted to free invalid extension struct\n");
196
197 if (extensions->ext_array)
198 vk_free(&instance->alloc, extensions->ext_array);
199 }
200
201 static bool
202 is_extension_enabled(const VkExtensionProperties *extensions,
203 size_t num_ext,
204 const char *name)
205 {
206 assert(extensions && name);
207
208 for (uint32_t i = 0; i < num_ext; i++) {
209 if (strcmp(name, extensions[i].extensionName) == 0)
210 return true;
211 }
212
213 return false;
214 }
215
216 static const char *
217 get_chip_name(enum radeon_family family)
218 {
219 switch (family) {
220 case CHIP_TAHITI: return "AMD RADV TAHITI";
221 case CHIP_PITCAIRN: return "AMD RADV PITCAIRN";
222 case CHIP_VERDE: return "AMD RADV CAPE VERDE";
223 case CHIP_OLAND: return "AMD RADV OLAND";
224 case CHIP_HAINAN: return "AMD RADV HAINAN";
225 case CHIP_BONAIRE: return "AMD RADV BONAIRE";
226 case CHIP_KAVERI: return "AMD RADV KAVERI";
227 case CHIP_KABINI: return "AMD RADV KABINI";
228 case CHIP_HAWAII: return "AMD RADV HAWAII";
229 case CHIP_MULLINS: return "AMD RADV MULLINS";
230 case CHIP_TONGA: return "AMD RADV TONGA";
231 case CHIP_ICELAND: return "AMD RADV ICELAND";
232 case CHIP_CARRIZO: return "AMD RADV CARRIZO";
233 case CHIP_FIJI: return "AMD RADV FIJI";
234 case CHIP_POLARIS10: return "AMD RADV POLARIS10";
235 case CHIP_POLARIS11: return "AMD RADV POLARIS11";
236 case CHIP_POLARIS12: return "AMD RADV POLARIS12";
237 case CHIP_STONEY: return "AMD RADV STONEY";
238 case CHIP_VEGA10: return "AMD RADV VEGA";
239 case CHIP_RAVEN: return "AMD RADV RAVEN";
240 default: return "AMD RADV unknown";
241 }
242 }
243
244 static VkResult
245 radv_physical_device_init(struct radv_physical_device *device,
246 struct radv_instance *instance,
247 drmDevicePtr drm_device)
248 {
249 const char *path = drm_device->nodes[DRM_NODE_RENDER];
250 VkResult result;
251 drmVersionPtr version;
252 int fd;
253
254 fd = open(path, O_RDWR | O_CLOEXEC);
255 if (fd < 0)
256 return VK_ERROR_INCOMPATIBLE_DRIVER;
257
258 version = drmGetVersion(fd);
259 if (!version) {
260 close(fd);
261 return vk_errorf(VK_ERROR_INCOMPATIBLE_DRIVER,
262 "failed to get version %s: %m", path);
263 }
264
265 if (strcmp(version->name, "amdgpu")) {
266 drmFreeVersion(version);
267 close(fd);
268 return VK_ERROR_INCOMPATIBLE_DRIVER;
269 }
270 drmFreeVersion(version);
271
272 device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
273 device->instance = instance;
274 assert(strlen(path) < ARRAY_SIZE(device->path));
275 strncpy(device->path, path, ARRAY_SIZE(device->path));
276
277 device->ws = radv_amdgpu_winsys_create(fd, instance->debug_flags,
278 instance->perftest_flags);
279 if (!device->ws) {
280 result = VK_ERROR_INCOMPATIBLE_DRIVER;
281 goto fail;
282 }
283
284 device->local_fd = fd;
285 device->ws->query_info(device->ws, &device->rad_info);
286 result = radv_init_wsi(device);
287 if (result != VK_SUCCESS) {
288 device->ws->destroy(device->ws);
289 goto fail;
290 }
291
292 if (radv_device_get_cache_uuid(device->rad_info.family, device->uuid)) {
293 radv_finish_wsi(device);
294 device->ws->destroy(device->ws);
295 result = vk_errorf(VK_ERROR_INITIALIZATION_FAILED,
296 "cannot generate UUID");
297 goto fail;
298 }
299
300 result = radv_extensions_register(instance,
301 &device->extensions,
302 common_device_extensions,
303 ARRAY_SIZE(common_device_extensions));
304 if (result != VK_SUCCESS)
305 goto fail;
306
307 fprintf(stderr, "WARNING: radv is not a conformant vulkan implementation, testing use only.\n");
308 device->name = get_chip_name(device->rad_info.family);
309
310 radv_get_device_uuid(drm_device, device->device_uuid);
311
312 if (device->rad_info.family == CHIP_STONEY ||
313 device->rad_info.chip_class >= GFX9) {
314 device->has_rbplus = true;
315 device->rbplus_allowed = device->rad_info.family == CHIP_STONEY;
316 }
317
318 return VK_SUCCESS;
319
320 fail:
321 close(fd);
322 return result;
323 }
324
325 static void
326 radv_physical_device_finish(struct radv_physical_device *device)
327 {
328 radv_extensions_finish(device->instance, &device->extensions);
329 radv_finish_wsi(device);
330 device->ws->destroy(device->ws);
331 close(device->local_fd);
332 }
333
334 static void *
335 default_alloc_func(void *pUserData, size_t size, size_t align,
336 VkSystemAllocationScope allocationScope)
337 {
338 return malloc(size);
339 }
340
341 static void *
342 default_realloc_func(void *pUserData, void *pOriginal, size_t size,
343 size_t align, VkSystemAllocationScope allocationScope)
344 {
345 return realloc(pOriginal, size);
346 }
347
348 static void
349 default_free_func(void *pUserData, void *pMemory)
350 {
351 free(pMemory);
352 }
353
354 static const VkAllocationCallbacks default_alloc = {
355 .pUserData = NULL,
356 .pfnAllocation = default_alloc_func,
357 .pfnReallocation = default_realloc_func,
358 .pfnFree = default_free_func,
359 };
360
361 static const struct debug_control radv_debug_options[] = {
362 {"nofastclears", RADV_DEBUG_NO_FAST_CLEARS},
363 {"nodcc", RADV_DEBUG_NO_DCC},
364 {"shaders", RADV_DEBUG_DUMP_SHADERS},
365 {"nocache", RADV_DEBUG_NO_CACHE},
366 {"shaderstats", RADV_DEBUG_DUMP_SHADER_STATS},
367 {"nohiz", RADV_DEBUG_NO_HIZ},
368 {"nocompute", RADV_DEBUG_NO_COMPUTE_QUEUE},
369 {"unsafemath", RADV_DEBUG_UNSAFE_MATH},
370 {"allbos", RADV_DEBUG_ALL_BOS},
371 {"noibs", RADV_DEBUG_NO_IBS},
372 {NULL, 0}
373 };
374
375 static const struct debug_control radv_perftest_options[] = {
376 {"batchchain", RADV_PERFTEST_BATCHCHAIN},
377 {"sisched", RADV_PERFTEST_SISCHED},
378 {NULL, 0}
379 };
380
381 VkResult radv_CreateInstance(
382 const VkInstanceCreateInfo* pCreateInfo,
383 const VkAllocationCallbacks* pAllocator,
384 VkInstance* pInstance)
385 {
386 struct radv_instance *instance;
387
388 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO);
389
390 uint32_t client_version;
391 if (pCreateInfo->pApplicationInfo &&
392 pCreateInfo->pApplicationInfo->apiVersion != 0) {
393 client_version = pCreateInfo->pApplicationInfo->apiVersion;
394 } else {
395 client_version = VK_MAKE_VERSION(1, 0, 0);
396 }
397
398 if (VK_MAKE_VERSION(1, 0, 0) > client_version ||
399 client_version > VK_MAKE_VERSION(1, 0, 0xfff)) {
400 return vk_errorf(VK_ERROR_INCOMPATIBLE_DRIVER,
401 "Client requested version %d.%d.%d",
402 VK_VERSION_MAJOR(client_version),
403 VK_VERSION_MINOR(client_version),
404 VK_VERSION_PATCH(client_version));
405 }
406
407 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
408 if (!is_extension_enabled(instance_extensions,
409 ARRAY_SIZE(instance_extensions),
410 pCreateInfo->ppEnabledExtensionNames[i]))
411 return vk_error(VK_ERROR_EXTENSION_NOT_PRESENT);
412 }
413
414 instance = vk_alloc2(&default_alloc, pAllocator, sizeof(*instance), 8,
415 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
416 if (!instance)
417 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
418
419 memset(instance, 0, sizeof(*instance));
420
421 instance->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
422
423 if (pAllocator)
424 instance->alloc = *pAllocator;
425 else
426 instance->alloc = default_alloc;
427
428 instance->apiVersion = client_version;
429 instance->physicalDeviceCount = -1;
430
431 _mesa_locale_init();
432
433 VG(VALGRIND_CREATE_MEMPOOL(instance, 0, false));
434
435 instance->debug_flags = parse_debug_string(getenv("RADV_DEBUG"),
436 radv_debug_options);
437
438 instance->perftest_flags = parse_debug_string(getenv("RADV_PERFTEST"),
439 radv_perftest_options);
440
441 *pInstance = radv_instance_to_handle(instance);
442
443 return VK_SUCCESS;
444 }
445
446 void radv_DestroyInstance(
447 VkInstance _instance,
448 const VkAllocationCallbacks* pAllocator)
449 {
450 RADV_FROM_HANDLE(radv_instance, instance, _instance);
451
452 if (!instance)
453 return;
454
455 for (int i = 0; i < instance->physicalDeviceCount; ++i) {
456 radv_physical_device_finish(instance->physicalDevices + i);
457 }
458
459 VG(VALGRIND_DESTROY_MEMPOOL(instance));
460
461 _mesa_locale_fini();
462
463 vk_free(&instance->alloc, instance);
464 }
465
466 static VkResult
467 radv_enumerate_devices(struct radv_instance *instance)
468 {
469 /* TODO: Check for more devices ? */
470 drmDevicePtr devices[8];
471 VkResult result = VK_ERROR_INCOMPATIBLE_DRIVER;
472 int max_devices;
473
474 instance->physicalDeviceCount = 0;
475
476 max_devices = drmGetDevices2(0, devices, ARRAY_SIZE(devices));
477 if (max_devices < 1)
478 return VK_ERROR_INCOMPATIBLE_DRIVER;
479
480 for (unsigned i = 0; i < (unsigned)max_devices; i++) {
481 if (devices[i]->available_nodes & 1 << DRM_NODE_RENDER &&
482 devices[i]->bustype == DRM_BUS_PCI &&
483 devices[i]->deviceinfo.pci->vendor_id == 0x1002) {
484
485 result = radv_physical_device_init(instance->physicalDevices +
486 instance->physicalDeviceCount,
487 instance,
488 devices[i]);
489 if (result == VK_SUCCESS)
490 ++instance->physicalDeviceCount;
491 else if (result != VK_ERROR_INCOMPATIBLE_DRIVER)
492 break;
493 }
494 }
495 drmFreeDevices(devices, max_devices);
496
497 return result;
498 }
499
500 VkResult radv_EnumeratePhysicalDevices(
501 VkInstance _instance,
502 uint32_t* pPhysicalDeviceCount,
503 VkPhysicalDevice* pPhysicalDevices)
504 {
505 RADV_FROM_HANDLE(radv_instance, instance, _instance);
506 VkResult result;
507
508 if (instance->physicalDeviceCount < 0) {
509 result = radv_enumerate_devices(instance);
510 if (result != VK_SUCCESS &&
511 result != VK_ERROR_INCOMPATIBLE_DRIVER)
512 return result;
513 }
514
515 if (!pPhysicalDevices) {
516 *pPhysicalDeviceCount = instance->physicalDeviceCount;
517 } else {
518 *pPhysicalDeviceCount = MIN2(*pPhysicalDeviceCount, instance->physicalDeviceCount);
519 for (unsigned i = 0; i < *pPhysicalDeviceCount; ++i)
520 pPhysicalDevices[i] = radv_physical_device_to_handle(instance->physicalDevices + i);
521 }
522
523 return *pPhysicalDeviceCount < instance->physicalDeviceCount ? VK_INCOMPLETE
524 : VK_SUCCESS;
525 }
526
527 void radv_GetPhysicalDeviceFeatures(
528 VkPhysicalDevice physicalDevice,
529 VkPhysicalDeviceFeatures* pFeatures)
530 {
531 RADV_FROM_HANDLE(radv_physical_device, pdevice, physicalDevice);
532 bool is_gfx9 = pdevice->rad_info.chip_class >= GFX9;
533 memset(pFeatures, 0, sizeof(*pFeatures));
534
535 *pFeatures = (VkPhysicalDeviceFeatures) {
536 .robustBufferAccess = true,
537 .fullDrawIndexUint32 = true,
538 .imageCubeArray = true,
539 .independentBlend = true,
540 .geometryShader = !is_gfx9,
541 .tessellationShader = !is_gfx9,
542 .sampleRateShading = false,
543 .dualSrcBlend = true,
544 .logicOp = true,
545 .multiDrawIndirect = true,
546 .drawIndirectFirstInstance = true,
547 .depthClamp = true,
548 .depthBiasClamp = true,
549 .fillModeNonSolid = true,
550 .depthBounds = true,
551 .wideLines = true,
552 .largePoints = true,
553 .alphaToOne = true,
554 .multiViewport = true,
555 .samplerAnisotropy = true,
556 .textureCompressionETC2 = false,
557 .textureCompressionASTC_LDR = false,
558 .textureCompressionBC = true,
559 .occlusionQueryPrecise = true,
560 .pipelineStatisticsQuery = true,
561 .vertexPipelineStoresAndAtomics = true,
562 .fragmentStoresAndAtomics = true,
563 .shaderTessellationAndGeometryPointSize = true,
564 .shaderImageGatherExtended = true,
565 .shaderStorageImageExtendedFormats = true,
566 .shaderStorageImageMultisample = false,
567 .shaderUniformBufferArrayDynamicIndexing = true,
568 .shaderSampledImageArrayDynamicIndexing = true,
569 .shaderStorageBufferArrayDynamicIndexing = true,
570 .shaderStorageImageArrayDynamicIndexing = true,
571 .shaderStorageImageReadWithoutFormat = true,
572 .shaderStorageImageWriteWithoutFormat = true,
573 .shaderClipDistance = true,
574 .shaderCullDistance = true,
575 .shaderFloat64 = true,
576 .shaderInt64 = true,
577 .shaderInt16 = false,
578 .sparseBinding = true,
579 .variableMultisampleRate = true,
580 .inheritedQueries = true,
581 };
582 }
583
584 void radv_GetPhysicalDeviceFeatures2KHR(
585 VkPhysicalDevice physicalDevice,
586 VkPhysicalDeviceFeatures2KHR *pFeatures)
587 {
588 return radv_GetPhysicalDeviceFeatures(physicalDevice, &pFeatures->features);
589 }
590
591 void radv_GetPhysicalDeviceProperties(
592 VkPhysicalDevice physicalDevice,
593 VkPhysicalDeviceProperties* pProperties)
594 {
595 RADV_FROM_HANDLE(radv_physical_device, pdevice, physicalDevice);
596 VkSampleCountFlags sample_counts = 0xf;
597
598 /* make sure that the entire descriptor set is addressable with a signed
599 * 32-bit int. So the sum of all limits scaled by descriptor size has to
600 * be at most 2 GiB. the combined image & samples object count as one of
601 * both. This limit is for the pipeline layout, not for the set layout, but
602 * there is no set limit, so we just set a pipeline limit. I don't think
603 * any app is going to hit this soon. */
604 size_t max_descriptor_set_size = ((1ull << 31) - 16 * MAX_DYNAMIC_BUFFERS) /
605 (32 /* uniform buffer, 32 due to potential space wasted on alignement */ +
606 32 /* storage buffer, 32 due to potential space wasted on alignement */ +
607 32 /* sampler, largest when combined with image */ +
608 64 /* sampled image */ +
609 64 /* storage image */);
610
611 VkPhysicalDeviceLimits limits = {
612 .maxImageDimension1D = (1 << 14),
613 .maxImageDimension2D = (1 << 14),
614 .maxImageDimension3D = (1 << 11),
615 .maxImageDimensionCube = (1 << 14),
616 .maxImageArrayLayers = (1 << 11),
617 .maxTexelBufferElements = 128 * 1024 * 1024,
618 .maxUniformBufferRange = UINT32_MAX,
619 .maxStorageBufferRange = UINT32_MAX,
620 .maxPushConstantsSize = MAX_PUSH_CONSTANTS_SIZE,
621 .maxMemoryAllocationCount = UINT32_MAX,
622 .maxSamplerAllocationCount = 64 * 1024,
623 .bufferImageGranularity = 64, /* A cache line */
624 .sparseAddressSpaceSize = 0xffffffffu, /* buffer max size */
625 .maxBoundDescriptorSets = MAX_SETS,
626 .maxPerStageDescriptorSamplers = max_descriptor_set_size,
627 .maxPerStageDescriptorUniformBuffers = max_descriptor_set_size,
628 .maxPerStageDescriptorStorageBuffers = max_descriptor_set_size,
629 .maxPerStageDescriptorSampledImages = max_descriptor_set_size,
630 .maxPerStageDescriptorStorageImages = max_descriptor_set_size,
631 .maxPerStageDescriptorInputAttachments = max_descriptor_set_size,
632 .maxPerStageResources = max_descriptor_set_size,
633 .maxDescriptorSetSamplers = max_descriptor_set_size,
634 .maxDescriptorSetUniformBuffers = max_descriptor_set_size,
635 .maxDescriptorSetUniformBuffersDynamic = MAX_DYNAMIC_BUFFERS / 2,
636 .maxDescriptorSetStorageBuffers = max_descriptor_set_size,
637 .maxDescriptorSetStorageBuffersDynamic = MAX_DYNAMIC_BUFFERS / 2,
638 .maxDescriptorSetSampledImages = max_descriptor_set_size,
639 .maxDescriptorSetStorageImages = max_descriptor_set_size,
640 .maxDescriptorSetInputAttachments = max_descriptor_set_size,
641 .maxVertexInputAttributes = 32,
642 .maxVertexInputBindings = 32,
643 .maxVertexInputAttributeOffset = 2047,
644 .maxVertexInputBindingStride = 2048,
645 .maxVertexOutputComponents = 128,
646 .maxTessellationGenerationLevel = 64,
647 .maxTessellationPatchSize = 32,
648 .maxTessellationControlPerVertexInputComponents = 128,
649 .maxTessellationControlPerVertexOutputComponents = 128,
650 .maxTessellationControlPerPatchOutputComponents = 120,
651 .maxTessellationControlTotalOutputComponents = 4096,
652 .maxTessellationEvaluationInputComponents = 128,
653 .maxTessellationEvaluationOutputComponents = 128,
654 .maxGeometryShaderInvocations = 127,
655 .maxGeometryInputComponents = 64,
656 .maxGeometryOutputComponents = 128,
657 .maxGeometryOutputVertices = 256,
658 .maxGeometryTotalOutputComponents = 1024,
659 .maxFragmentInputComponents = 128,
660 .maxFragmentOutputAttachments = 8,
661 .maxFragmentDualSrcAttachments = 1,
662 .maxFragmentCombinedOutputResources = 8,
663 .maxComputeSharedMemorySize = 32768,
664 .maxComputeWorkGroupCount = { 65535, 65535, 65535 },
665 .maxComputeWorkGroupInvocations = 2048,
666 .maxComputeWorkGroupSize = {
667 2048,
668 2048,
669 2048
670 },
671 .subPixelPrecisionBits = 4 /* FIXME */,
672 .subTexelPrecisionBits = 4 /* FIXME */,
673 .mipmapPrecisionBits = 4 /* FIXME */,
674 .maxDrawIndexedIndexValue = UINT32_MAX,
675 .maxDrawIndirectCount = UINT32_MAX,
676 .maxSamplerLodBias = 16,
677 .maxSamplerAnisotropy = 16,
678 .maxViewports = MAX_VIEWPORTS,
679 .maxViewportDimensions = { (1 << 14), (1 << 14) },
680 .viewportBoundsRange = { INT16_MIN, INT16_MAX },
681 .viewportSubPixelBits = 13, /* We take a float? */
682 .minMemoryMapAlignment = 4096, /* A page */
683 .minTexelBufferOffsetAlignment = 1,
684 .minUniformBufferOffsetAlignment = 4,
685 .minStorageBufferOffsetAlignment = 4,
686 .minTexelOffset = -32,
687 .maxTexelOffset = 31,
688 .minTexelGatherOffset = -32,
689 .maxTexelGatherOffset = 31,
690 .minInterpolationOffset = -2,
691 .maxInterpolationOffset = 2,
692 .subPixelInterpolationOffsetBits = 8,
693 .maxFramebufferWidth = (1 << 14),
694 .maxFramebufferHeight = (1 << 14),
695 .maxFramebufferLayers = (1 << 10),
696 .framebufferColorSampleCounts = sample_counts,
697 .framebufferDepthSampleCounts = sample_counts,
698 .framebufferStencilSampleCounts = sample_counts,
699 .framebufferNoAttachmentsSampleCounts = sample_counts,
700 .maxColorAttachments = MAX_RTS,
701 .sampledImageColorSampleCounts = sample_counts,
702 .sampledImageIntegerSampleCounts = VK_SAMPLE_COUNT_1_BIT,
703 .sampledImageDepthSampleCounts = sample_counts,
704 .sampledImageStencilSampleCounts = sample_counts,
705 .storageImageSampleCounts = VK_SAMPLE_COUNT_1_BIT,
706 .maxSampleMaskWords = 1,
707 .timestampComputeAndGraphics = true,
708 .timestampPeriod = 1000000.0 / pdevice->rad_info.clock_crystal_freq,
709 .maxClipDistances = 8,
710 .maxCullDistances = 8,
711 .maxCombinedClipAndCullDistances = 8,
712 .discreteQueuePriorities = 1,
713 .pointSizeRange = { 0.125, 255.875 },
714 .lineWidthRange = { 0.0, 7.9921875 },
715 .pointSizeGranularity = (1.0 / 8.0),
716 .lineWidthGranularity = (1.0 / 128.0),
717 .strictLines = false, /* FINISHME */
718 .standardSampleLocations = true,
719 .optimalBufferCopyOffsetAlignment = 128,
720 .optimalBufferCopyRowPitchAlignment = 128,
721 .nonCoherentAtomSize = 64,
722 };
723
724 *pProperties = (VkPhysicalDeviceProperties) {
725 .apiVersion = VK_MAKE_VERSION(1, 0, 42),
726 .driverVersion = vk_get_driver_version(),
727 .vendorID = 0x1002,
728 .deviceID = pdevice->rad_info.pci_id,
729 .deviceType = pdevice->rad_info.has_dedicated_vram ? VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU : VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU,
730 .limits = limits,
731 .sparseProperties = {0},
732 };
733
734 strcpy(pProperties->deviceName, pdevice->name);
735 memcpy(pProperties->pipelineCacheUUID, pdevice->uuid, VK_UUID_SIZE);
736 }
737
738 void radv_GetPhysicalDeviceProperties2KHR(
739 VkPhysicalDevice physicalDevice,
740 VkPhysicalDeviceProperties2KHR *pProperties)
741 {
742 RADV_FROM_HANDLE(radv_physical_device, pdevice, physicalDevice);
743 radv_GetPhysicalDeviceProperties(physicalDevice, &pProperties->properties);
744
745 vk_foreach_struct(ext, pProperties->pNext) {
746 switch (ext->sType) {
747 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR: {
748 VkPhysicalDevicePushDescriptorPropertiesKHR *properties =
749 (VkPhysicalDevicePushDescriptorPropertiesKHR *) ext;
750 properties->maxPushDescriptors = MAX_PUSH_DESCRIPTORS;
751 break;
752 }
753 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES_KHR: {
754 VkPhysicalDeviceIDPropertiesKHR *properties = (VkPhysicalDeviceIDPropertiesKHR*)ext;
755 radv_device_get_cache_uuid(0, properties->driverUUID);
756 memcpy(properties->deviceUUID, pdevice->device_uuid, VK_UUID_SIZE);
757 properties->deviceLUIDValid = false;
758 break;
759 }
760 default:
761 break;
762 }
763 }
764 }
765
766 static void radv_get_physical_device_queue_family_properties(
767 struct radv_physical_device* pdevice,
768 uint32_t* pCount,
769 VkQueueFamilyProperties** pQueueFamilyProperties)
770 {
771 int num_queue_families = 1;
772 int idx;
773 if (pdevice->rad_info.num_compute_rings > 0 &&
774 pdevice->rad_info.chip_class >= CIK &&
775 !(pdevice->instance->debug_flags & RADV_DEBUG_NO_COMPUTE_QUEUE))
776 num_queue_families++;
777
778 if (pQueueFamilyProperties == NULL) {
779 *pCount = num_queue_families;
780 return;
781 }
782
783 if (!*pCount)
784 return;
785
786 idx = 0;
787 if (*pCount >= 1) {
788 *pQueueFamilyProperties[idx] = (VkQueueFamilyProperties) {
789 .queueFlags = VK_QUEUE_GRAPHICS_BIT |
790 VK_QUEUE_COMPUTE_BIT |
791 VK_QUEUE_TRANSFER_BIT |
792 VK_QUEUE_SPARSE_BINDING_BIT,
793 .queueCount = 1,
794 .timestampValidBits = 64,
795 .minImageTransferGranularity = (VkExtent3D) { 1, 1, 1 },
796 };
797 idx++;
798 }
799
800 if (pdevice->rad_info.num_compute_rings > 0 &&
801 pdevice->rad_info.chip_class >= CIK &&
802 !(pdevice->instance->debug_flags & RADV_DEBUG_NO_COMPUTE_QUEUE)) {
803 if (*pCount > idx) {
804 *pQueueFamilyProperties[idx] = (VkQueueFamilyProperties) {
805 .queueFlags = VK_QUEUE_COMPUTE_BIT |
806 VK_QUEUE_TRANSFER_BIT |
807 VK_QUEUE_SPARSE_BINDING_BIT,
808 .queueCount = pdevice->rad_info.num_compute_rings,
809 .timestampValidBits = 64,
810 .minImageTransferGranularity = (VkExtent3D) { 1, 1, 1 },
811 };
812 idx++;
813 }
814 }
815 *pCount = idx;
816 }
817
818 void radv_GetPhysicalDeviceQueueFamilyProperties(
819 VkPhysicalDevice physicalDevice,
820 uint32_t* pCount,
821 VkQueueFamilyProperties* pQueueFamilyProperties)
822 {
823 RADV_FROM_HANDLE(radv_physical_device, pdevice, physicalDevice);
824 if (!pQueueFamilyProperties) {
825 return radv_get_physical_device_queue_family_properties(pdevice, pCount, NULL);
826 return;
827 }
828 VkQueueFamilyProperties *properties[] = {
829 pQueueFamilyProperties + 0,
830 pQueueFamilyProperties + 1,
831 pQueueFamilyProperties + 2,
832 };
833 radv_get_physical_device_queue_family_properties(pdevice, pCount, properties);
834 assert(*pCount <= 3);
835 }
836
837 void radv_GetPhysicalDeviceQueueFamilyProperties2KHR(
838 VkPhysicalDevice physicalDevice,
839 uint32_t* pCount,
840 VkQueueFamilyProperties2KHR *pQueueFamilyProperties)
841 {
842 RADV_FROM_HANDLE(radv_physical_device, pdevice, physicalDevice);
843 if (!pQueueFamilyProperties) {
844 return radv_get_physical_device_queue_family_properties(pdevice, pCount, NULL);
845 return;
846 }
847 VkQueueFamilyProperties *properties[] = {
848 &pQueueFamilyProperties[0].queueFamilyProperties,
849 &pQueueFamilyProperties[1].queueFamilyProperties,
850 &pQueueFamilyProperties[2].queueFamilyProperties,
851 };
852 radv_get_physical_device_queue_family_properties(pdevice, pCount, properties);
853 assert(*pCount <= 3);
854 }
855
856 void radv_GetPhysicalDeviceMemoryProperties(
857 VkPhysicalDevice physicalDevice,
858 VkPhysicalDeviceMemoryProperties *pMemoryProperties)
859 {
860 RADV_FROM_HANDLE(radv_physical_device, physical_device, physicalDevice);
861
862 STATIC_ASSERT(RADV_MEM_TYPE_COUNT <= VK_MAX_MEMORY_TYPES);
863
864 pMemoryProperties->memoryTypeCount = RADV_MEM_TYPE_COUNT;
865 pMemoryProperties->memoryTypes[RADV_MEM_TYPE_VRAM] = (VkMemoryType) {
866 .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT,
867 .heapIndex = RADV_MEM_HEAP_VRAM,
868 };
869 pMemoryProperties->memoryTypes[RADV_MEM_TYPE_GTT_WRITE_COMBINE] = (VkMemoryType) {
870 .propertyFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
871 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
872 .heapIndex = RADV_MEM_HEAP_GTT,
873 };
874 pMemoryProperties->memoryTypes[RADV_MEM_TYPE_VRAM_CPU_ACCESS] = (VkMemoryType) {
875 .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
876 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
877 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
878 .heapIndex = RADV_MEM_HEAP_VRAM_CPU_ACCESS,
879 };
880 pMemoryProperties->memoryTypes[RADV_MEM_TYPE_GTT_CACHED] = (VkMemoryType) {
881 .propertyFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
882 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT |
883 VK_MEMORY_PROPERTY_HOST_CACHED_BIT,
884 .heapIndex = RADV_MEM_HEAP_GTT,
885 };
886
887 STATIC_ASSERT(RADV_MEM_HEAP_COUNT <= VK_MAX_MEMORY_HEAPS);
888
889 pMemoryProperties->memoryHeapCount = RADV_MEM_HEAP_COUNT;
890 pMemoryProperties->memoryHeaps[RADV_MEM_HEAP_VRAM] = (VkMemoryHeap) {
891 .size = physical_device->rad_info.vram_size -
892 physical_device->rad_info.vram_vis_size,
893 .flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT,
894 };
895 pMemoryProperties->memoryHeaps[RADV_MEM_HEAP_VRAM_CPU_ACCESS] = (VkMemoryHeap) {
896 .size = physical_device->rad_info.vram_vis_size,
897 .flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT,
898 };
899 pMemoryProperties->memoryHeaps[RADV_MEM_HEAP_GTT] = (VkMemoryHeap) {
900 .size = physical_device->rad_info.gart_size,
901 .flags = 0,
902 };
903 }
904
905 void radv_GetPhysicalDeviceMemoryProperties2KHR(
906 VkPhysicalDevice physicalDevice,
907 VkPhysicalDeviceMemoryProperties2KHR *pMemoryProperties)
908 {
909 return radv_GetPhysicalDeviceMemoryProperties(physicalDevice,
910 &pMemoryProperties->memoryProperties);
911 }
912
913 static int
914 radv_queue_init(struct radv_device *device, struct radv_queue *queue,
915 int queue_family_index, int idx)
916 {
917 queue->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
918 queue->device = device;
919 queue->queue_family_index = queue_family_index;
920 queue->queue_idx = idx;
921
922 queue->hw_ctx = device->ws->ctx_create(device->ws);
923 if (!queue->hw_ctx)
924 return VK_ERROR_OUT_OF_HOST_MEMORY;
925
926 return VK_SUCCESS;
927 }
928
929 static void
930 radv_queue_finish(struct radv_queue *queue)
931 {
932 if (queue->hw_ctx)
933 queue->device->ws->ctx_destroy(queue->hw_ctx);
934
935 if (queue->initial_preamble_cs)
936 queue->device->ws->cs_destroy(queue->initial_preamble_cs);
937 if (queue->continue_preamble_cs)
938 queue->device->ws->cs_destroy(queue->continue_preamble_cs);
939 if (queue->descriptor_bo)
940 queue->device->ws->buffer_destroy(queue->descriptor_bo);
941 if (queue->scratch_bo)
942 queue->device->ws->buffer_destroy(queue->scratch_bo);
943 if (queue->esgs_ring_bo)
944 queue->device->ws->buffer_destroy(queue->esgs_ring_bo);
945 if (queue->gsvs_ring_bo)
946 queue->device->ws->buffer_destroy(queue->gsvs_ring_bo);
947 if (queue->tess_factor_ring_bo)
948 queue->device->ws->buffer_destroy(queue->tess_factor_ring_bo);
949 if (queue->tess_offchip_ring_bo)
950 queue->device->ws->buffer_destroy(queue->tess_offchip_ring_bo);
951 if (queue->compute_scratch_bo)
952 queue->device->ws->buffer_destroy(queue->compute_scratch_bo);
953 }
954
955 static void
956 radv_device_init_gs_info(struct radv_device *device)
957 {
958 switch (device->physical_device->rad_info.family) {
959 case CHIP_OLAND:
960 case CHIP_HAINAN:
961 case CHIP_KAVERI:
962 case CHIP_KABINI:
963 case CHIP_MULLINS:
964 case CHIP_ICELAND:
965 case CHIP_CARRIZO:
966 case CHIP_STONEY:
967 device->gs_table_depth = 16;
968 return;
969 case CHIP_TAHITI:
970 case CHIP_PITCAIRN:
971 case CHIP_VERDE:
972 case CHIP_BONAIRE:
973 case CHIP_HAWAII:
974 case CHIP_TONGA:
975 case CHIP_FIJI:
976 case CHIP_POLARIS10:
977 case CHIP_POLARIS11:
978 case CHIP_POLARIS12:
979 case CHIP_VEGA10:
980 case CHIP_RAVEN:
981 device->gs_table_depth = 32;
982 return;
983 default:
984 unreachable("unknown GPU");
985 }
986 }
987
988 VkResult radv_CreateDevice(
989 VkPhysicalDevice physicalDevice,
990 const VkDeviceCreateInfo* pCreateInfo,
991 const VkAllocationCallbacks* pAllocator,
992 VkDevice* pDevice)
993 {
994 RADV_FROM_HANDLE(radv_physical_device, physical_device, physicalDevice);
995 VkResult result;
996 struct radv_device *device;
997
998 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
999 if (!is_extension_enabled(physical_device->extensions.ext_array,
1000 physical_device->extensions.num_ext,
1001 pCreateInfo->ppEnabledExtensionNames[i]))
1002 return vk_error(VK_ERROR_EXTENSION_NOT_PRESENT);
1003 }
1004
1005 device = vk_alloc2(&physical_device->instance->alloc, pAllocator,
1006 sizeof(*device), 8,
1007 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
1008 if (!device)
1009 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1010
1011 memset(device, 0, sizeof(*device));
1012
1013 device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
1014 device->instance = physical_device->instance;
1015 device->physical_device = physical_device;
1016
1017 device->debug_flags = device->instance->debug_flags;
1018
1019 device->ws = physical_device->ws;
1020 if (pAllocator)
1021 device->alloc = *pAllocator;
1022 else
1023 device->alloc = physical_device->instance->alloc;
1024
1025 for (unsigned i = 0; i < pCreateInfo->queueCreateInfoCount; i++) {
1026 const VkDeviceQueueCreateInfo *queue_create = &pCreateInfo->pQueueCreateInfos[i];
1027 uint32_t qfi = queue_create->queueFamilyIndex;
1028
1029 device->queues[qfi] = vk_alloc(&device->alloc,
1030 queue_create->queueCount * sizeof(struct radv_queue), 8, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
1031 if (!device->queues[qfi]) {
1032 result = VK_ERROR_OUT_OF_HOST_MEMORY;
1033 goto fail;
1034 }
1035
1036 memset(device->queues[qfi], 0, queue_create->queueCount * sizeof(struct radv_queue));
1037
1038 device->queue_count[qfi] = queue_create->queueCount;
1039
1040 for (unsigned q = 0; q < queue_create->queueCount; q++) {
1041 result = radv_queue_init(device, &device->queues[qfi][q], qfi, q);
1042 if (result != VK_SUCCESS)
1043 goto fail;
1044 }
1045 }
1046
1047 #if HAVE_LLVM < 0x0400
1048 device->llvm_supports_spill = false;
1049 #else
1050 device->llvm_supports_spill = true;
1051 #endif
1052
1053 /* The maximum number of scratch waves. Scratch space isn't divided
1054 * evenly between CUs. The number is only a function of the number of CUs.
1055 * We can decrease the constant to decrease the scratch buffer size.
1056 *
1057 * sctx->scratch_waves must be >= the maximum posible size of
1058 * 1 threadgroup, so that the hw doesn't hang from being unable
1059 * to start any.
1060 *
1061 * The recommended value is 4 per CU at most. Higher numbers don't
1062 * bring much benefit, but they still occupy chip resources (think
1063 * async compute). I've seen ~2% performance difference between 4 and 32.
1064 */
1065 uint32_t max_threads_per_block = 2048;
1066 device->scratch_waves = MAX2(32 * physical_device->rad_info.num_good_compute_units,
1067 max_threads_per_block / 64);
1068
1069 radv_device_init_gs_info(device);
1070
1071 device->tess_offchip_block_dw_size =
1072 device->physical_device->rad_info.family == CHIP_HAWAII ? 4096 : 8192;
1073 device->has_distributed_tess =
1074 device->physical_device->rad_info.chip_class >= VI &&
1075 device->physical_device->rad_info.max_se >= 2;
1076
1077 result = radv_device_init_meta(device);
1078 if (result != VK_SUCCESS)
1079 goto fail;
1080
1081 radv_device_init_msaa(device);
1082
1083 for (int family = 0; family < RADV_MAX_QUEUE_FAMILIES; ++family) {
1084 device->empty_cs[family] = device->ws->cs_create(device->ws, family);
1085 switch (family) {
1086 case RADV_QUEUE_GENERAL:
1087 radeon_emit(device->empty_cs[family], PKT3(PKT3_CONTEXT_CONTROL, 1, 0));
1088 radeon_emit(device->empty_cs[family], CONTEXT_CONTROL_LOAD_ENABLE(1));
1089 radeon_emit(device->empty_cs[family], CONTEXT_CONTROL_SHADOW_ENABLE(1));
1090 break;
1091 case RADV_QUEUE_COMPUTE:
1092 radeon_emit(device->empty_cs[family], PKT3(PKT3_NOP, 0, 0));
1093 radeon_emit(device->empty_cs[family], 0);
1094 break;
1095 }
1096 device->ws->cs_finalize(device->empty_cs[family]);
1097
1098 device->flush_cs[family] = device->ws->cs_create(device->ws, family);
1099 switch (family) {
1100 case RADV_QUEUE_GENERAL:
1101 case RADV_QUEUE_COMPUTE:
1102 si_cs_emit_cache_flush(device->flush_cs[family],
1103 false,
1104 device->physical_device->rad_info.chip_class,
1105 NULL, 0,
1106 family == RADV_QUEUE_COMPUTE && device->physical_device->rad_info.chip_class >= CIK,
1107 RADV_CMD_FLAG_INV_ICACHE |
1108 RADV_CMD_FLAG_INV_SMEM_L1 |
1109 RADV_CMD_FLAG_INV_VMEM_L1 |
1110 RADV_CMD_FLAG_INV_GLOBAL_L2);
1111 break;
1112 }
1113 device->ws->cs_finalize(device->flush_cs[family]);
1114
1115 device->flush_shader_cs[family] = device->ws->cs_create(device->ws, family);
1116 switch (family) {
1117 case RADV_QUEUE_GENERAL:
1118 case RADV_QUEUE_COMPUTE:
1119 si_cs_emit_cache_flush(device->flush_shader_cs[family],
1120 false,
1121 device->physical_device->rad_info.chip_class,
1122 NULL, 0,
1123 family == RADV_QUEUE_COMPUTE && device->physical_device->rad_info.chip_class >= CIK,
1124 family == RADV_QUEUE_COMPUTE ? RADV_CMD_FLAG_CS_PARTIAL_FLUSH : (RADV_CMD_FLAG_CS_PARTIAL_FLUSH | RADV_CMD_FLAG_PS_PARTIAL_FLUSH) |
1125 RADV_CMD_FLAG_INV_ICACHE |
1126 RADV_CMD_FLAG_INV_SMEM_L1 |
1127 RADV_CMD_FLAG_INV_VMEM_L1 |
1128 RADV_CMD_FLAG_INV_GLOBAL_L2);
1129 break;
1130 }
1131 device->ws->cs_finalize(device->flush_shader_cs[family]);
1132 }
1133
1134 if (getenv("RADV_TRACE_FILE")) {
1135 device->trace_bo = device->ws->buffer_create(device->ws, 4096, 8,
1136 RADEON_DOMAIN_VRAM, RADEON_FLAG_CPU_ACCESS);
1137 if (!device->trace_bo)
1138 goto fail;
1139
1140 device->trace_id_ptr = device->ws->buffer_map(device->trace_bo);
1141 if (!device->trace_id_ptr)
1142 goto fail;
1143 }
1144
1145 if (device->physical_device->rad_info.chip_class >= CIK)
1146 cik_create_gfx_config(device);
1147
1148 VkPipelineCacheCreateInfo ci;
1149 ci.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
1150 ci.pNext = NULL;
1151 ci.flags = 0;
1152 ci.pInitialData = NULL;
1153 ci.initialDataSize = 0;
1154 VkPipelineCache pc;
1155 result = radv_CreatePipelineCache(radv_device_to_handle(device),
1156 &ci, NULL, &pc);
1157 if (result != VK_SUCCESS)
1158 goto fail;
1159
1160 device->mem_cache = radv_pipeline_cache_from_handle(pc);
1161
1162 *pDevice = radv_device_to_handle(device);
1163 return VK_SUCCESS;
1164
1165 fail:
1166 if (device->trace_bo)
1167 device->ws->buffer_destroy(device->trace_bo);
1168
1169 if (device->gfx_init)
1170 device->ws->buffer_destroy(device->gfx_init);
1171
1172 for (unsigned i = 0; i < RADV_MAX_QUEUE_FAMILIES; i++) {
1173 for (unsigned q = 0; q < device->queue_count[i]; q++)
1174 radv_queue_finish(&device->queues[i][q]);
1175 if (device->queue_count[i])
1176 vk_free(&device->alloc, device->queues[i]);
1177 }
1178
1179 vk_free(&device->alloc, device);
1180 return result;
1181 }
1182
1183 void radv_DestroyDevice(
1184 VkDevice _device,
1185 const VkAllocationCallbacks* pAllocator)
1186 {
1187 RADV_FROM_HANDLE(radv_device, device, _device);
1188
1189 if (!device)
1190 return;
1191
1192 if (device->trace_bo)
1193 device->ws->buffer_destroy(device->trace_bo);
1194
1195 if (device->gfx_init)
1196 device->ws->buffer_destroy(device->gfx_init);
1197
1198 for (unsigned i = 0; i < RADV_MAX_QUEUE_FAMILIES; i++) {
1199 for (unsigned q = 0; q < device->queue_count[i]; q++)
1200 radv_queue_finish(&device->queues[i][q]);
1201 if (device->queue_count[i])
1202 vk_free(&device->alloc, device->queues[i]);
1203 if (device->empty_cs[i])
1204 device->ws->cs_destroy(device->empty_cs[i]);
1205 if (device->flush_cs[i])
1206 device->ws->cs_destroy(device->flush_cs[i]);
1207 if (device->flush_shader_cs[i])
1208 device->ws->cs_destroy(device->flush_shader_cs[i]);
1209 }
1210 radv_device_finish_meta(device);
1211
1212 VkPipelineCache pc = radv_pipeline_cache_to_handle(device->mem_cache);
1213 radv_DestroyPipelineCache(radv_device_to_handle(device), pc, NULL);
1214
1215 vk_free(&device->alloc, device);
1216 }
1217
1218 VkResult radv_EnumerateInstanceExtensionProperties(
1219 const char* pLayerName,
1220 uint32_t* pPropertyCount,
1221 VkExtensionProperties* pProperties)
1222 {
1223 if (pProperties == NULL) {
1224 *pPropertyCount = ARRAY_SIZE(instance_extensions);
1225 return VK_SUCCESS;
1226 }
1227
1228 *pPropertyCount = MIN2(*pPropertyCount, ARRAY_SIZE(instance_extensions));
1229 typed_memcpy(pProperties, instance_extensions, *pPropertyCount);
1230
1231 if (*pPropertyCount < ARRAY_SIZE(instance_extensions))
1232 return VK_INCOMPLETE;
1233
1234 return VK_SUCCESS;
1235 }
1236
1237 VkResult radv_EnumerateDeviceExtensionProperties(
1238 VkPhysicalDevice physicalDevice,
1239 const char* pLayerName,
1240 uint32_t* pPropertyCount,
1241 VkExtensionProperties* pProperties)
1242 {
1243 RADV_FROM_HANDLE(radv_physical_device, pdevice, physicalDevice);
1244
1245 if (pProperties == NULL) {
1246 *pPropertyCount = pdevice->extensions.num_ext;
1247 return VK_SUCCESS;
1248 }
1249
1250 *pPropertyCount = MIN2(*pPropertyCount, pdevice->extensions.num_ext);
1251 typed_memcpy(pProperties, pdevice->extensions.ext_array, *pPropertyCount);
1252
1253 if (*pPropertyCount < pdevice->extensions.num_ext)
1254 return VK_INCOMPLETE;
1255
1256 return VK_SUCCESS;
1257 }
1258
1259 VkResult radv_EnumerateInstanceLayerProperties(
1260 uint32_t* pPropertyCount,
1261 VkLayerProperties* pProperties)
1262 {
1263 if (pProperties == NULL) {
1264 *pPropertyCount = 0;
1265 return VK_SUCCESS;
1266 }
1267
1268 /* None supported at this time */
1269 return vk_error(VK_ERROR_LAYER_NOT_PRESENT);
1270 }
1271
1272 VkResult radv_EnumerateDeviceLayerProperties(
1273 VkPhysicalDevice physicalDevice,
1274 uint32_t* pPropertyCount,
1275 VkLayerProperties* pProperties)
1276 {
1277 if (pProperties == NULL) {
1278 *pPropertyCount = 0;
1279 return VK_SUCCESS;
1280 }
1281
1282 /* None supported at this time */
1283 return vk_error(VK_ERROR_LAYER_NOT_PRESENT);
1284 }
1285
1286 void radv_GetDeviceQueue(
1287 VkDevice _device,
1288 uint32_t queueFamilyIndex,
1289 uint32_t queueIndex,
1290 VkQueue* pQueue)
1291 {
1292 RADV_FROM_HANDLE(radv_device, device, _device);
1293
1294 *pQueue = radv_queue_to_handle(&device->queues[queueFamilyIndex][queueIndex]);
1295 }
1296
1297 static void radv_dump_trace(struct radv_device *device,
1298 struct radeon_winsys_cs *cs)
1299 {
1300 const char *filename = getenv("RADV_TRACE_FILE");
1301 FILE *f = fopen(filename, "w");
1302 if (!f) {
1303 fprintf(stderr, "Failed to write trace dump to %s\n", filename);
1304 return;
1305 }
1306
1307 fprintf(f, "Trace ID: %x\n", *device->trace_id_ptr);
1308 device->ws->cs_dump(cs, f, *device->trace_id_ptr);
1309 fclose(f);
1310 }
1311
1312 static void
1313 fill_geom_tess_rings(struct radv_queue *queue,
1314 uint32_t *map,
1315 bool add_sample_positions,
1316 uint32_t esgs_ring_size,
1317 struct radeon_winsys_bo *esgs_ring_bo,
1318 uint32_t gsvs_ring_size,
1319 struct radeon_winsys_bo *gsvs_ring_bo,
1320 uint32_t tess_factor_ring_size,
1321 struct radeon_winsys_bo *tess_factor_ring_bo,
1322 uint32_t tess_offchip_ring_size,
1323 struct radeon_winsys_bo *tess_offchip_ring_bo)
1324 {
1325 uint64_t esgs_va = 0, gsvs_va = 0;
1326 uint64_t tess_factor_va = 0, tess_offchip_va = 0;
1327 uint32_t *desc = &map[4];
1328
1329 if (esgs_ring_bo)
1330 esgs_va = queue->device->ws->buffer_get_va(esgs_ring_bo);
1331 if (gsvs_ring_bo)
1332 gsvs_va = queue->device->ws->buffer_get_va(gsvs_ring_bo);
1333 if (tess_factor_ring_bo)
1334 tess_factor_va = queue->device->ws->buffer_get_va(tess_factor_ring_bo);
1335 if (tess_offchip_ring_bo)
1336 tess_offchip_va = queue->device->ws->buffer_get_va(tess_offchip_ring_bo);
1337
1338 /* stride 0, num records - size, add tid, swizzle, elsize4,
1339 index stride 64 */
1340 desc[0] = esgs_va;
1341 desc[1] = S_008F04_BASE_ADDRESS_HI(esgs_va >> 32) |
1342 S_008F04_STRIDE(0) |
1343 S_008F04_SWIZZLE_ENABLE(true);
1344 desc[2] = esgs_ring_size;
1345 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
1346 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
1347 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
1348 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
1349 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
1350 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32) |
1351 S_008F0C_ELEMENT_SIZE(1) |
1352 S_008F0C_INDEX_STRIDE(3) |
1353 S_008F0C_ADD_TID_ENABLE(true);
1354
1355 desc += 4;
1356 /* GS entry for ES->GS ring */
1357 /* stride 0, num records - size, elsize0,
1358 index stride 0 */
1359 desc[0] = esgs_va;
1360 desc[1] = S_008F04_BASE_ADDRESS_HI(esgs_va >> 32)|
1361 S_008F04_STRIDE(0) |
1362 S_008F04_SWIZZLE_ENABLE(false);
1363 desc[2] = esgs_ring_size;
1364 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
1365 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
1366 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
1367 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
1368 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
1369 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32) |
1370 S_008F0C_ELEMENT_SIZE(0) |
1371 S_008F0C_INDEX_STRIDE(0) |
1372 S_008F0C_ADD_TID_ENABLE(false);
1373
1374 desc += 4;
1375 /* VS entry for GS->VS ring */
1376 /* stride 0, num records - size, elsize0,
1377 index stride 0 */
1378 desc[0] = gsvs_va;
1379 desc[1] = S_008F04_BASE_ADDRESS_HI(gsvs_va >> 32)|
1380 S_008F04_STRIDE(0) |
1381 S_008F04_SWIZZLE_ENABLE(false);
1382 desc[2] = gsvs_ring_size;
1383 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
1384 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
1385 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
1386 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
1387 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
1388 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32) |
1389 S_008F0C_ELEMENT_SIZE(0) |
1390 S_008F0C_INDEX_STRIDE(0) |
1391 S_008F0C_ADD_TID_ENABLE(false);
1392 desc += 4;
1393
1394 /* stride gsvs_itemsize, num records 64
1395 elsize 4, index stride 16 */
1396 /* shader will patch stride and desc[2] */
1397 desc[0] = gsvs_va;
1398 desc[1] = S_008F04_BASE_ADDRESS_HI(gsvs_va >> 32)|
1399 S_008F04_STRIDE(0) |
1400 S_008F04_SWIZZLE_ENABLE(true);
1401 desc[2] = 0;
1402 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
1403 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
1404 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
1405 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
1406 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
1407 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32) |
1408 S_008F0C_ELEMENT_SIZE(1) |
1409 S_008F0C_INDEX_STRIDE(1) |
1410 S_008F0C_ADD_TID_ENABLE(true);
1411 desc += 4;
1412
1413 desc[0] = tess_factor_va;
1414 desc[1] = S_008F04_BASE_ADDRESS_HI(tess_factor_va >> 32) |
1415 S_008F04_STRIDE(0) |
1416 S_008F04_SWIZZLE_ENABLE(false);
1417 desc[2] = tess_factor_ring_size;
1418 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
1419 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
1420 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
1421 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
1422 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
1423 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32) |
1424 S_008F0C_ELEMENT_SIZE(0) |
1425 S_008F0C_INDEX_STRIDE(0) |
1426 S_008F0C_ADD_TID_ENABLE(false);
1427 desc += 4;
1428
1429 desc[0] = tess_offchip_va;
1430 desc[1] = S_008F04_BASE_ADDRESS_HI(tess_offchip_va >> 32) |
1431 S_008F04_STRIDE(0) |
1432 S_008F04_SWIZZLE_ENABLE(false);
1433 desc[2] = tess_offchip_ring_size;
1434 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
1435 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
1436 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
1437 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
1438 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
1439 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32) |
1440 S_008F0C_ELEMENT_SIZE(0) |
1441 S_008F0C_INDEX_STRIDE(0) |
1442 S_008F0C_ADD_TID_ENABLE(false);
1443 desc += 4;
1444
1445 /* add sample positions after all rings */
1446 memcpy(desc, queue->device->sample_locations_1x, 8);
1447 desc += 2;
1448 memcpy(desc, queue->device->sample_locations_2x, 16);
1449 desc += 4;
1450 memcpy(desc, queue->device->sample_locations_4x, 32);
1451 desc += 8;
1452 memcpy(desc, queue->device->sample_locations_8x, 64);
1453 desc += 16;
1454 memcpy(desc, queue->device->sample_locations_16x, 128);
1455 }
1456
1457 static unsigned
1458 radv_get_hs_offchip_param(struct radv_device *device, uint32_t *max_offchip_buffers_p)
1459 {
1460 bool double_offchip_buffers = device->physical_device->rad_info.chip_class >= CIK &&
1461 device->physical_device->rad_info.family != CHIP_CARRIZO &&
1462 device->physical_device->rad_info.family != CHIP_STONEY;
1463 unsigned max_offchip_buffers_per_se = double_offchip_buffers ? 128 : 64;
1464 unsigned max_offchip_buffers = max_offchip_buffers_per_se *
1465 device->physical_device->rad_info.max_se;
1466 unsigned offchip_granularity;
1467 unsigned hs_offchip_param;
1468 switch (device->tess_offchip_block_dw_size) {
1469 default:
1470 assert(0);
1471 /* fall through */
1472 case 8192:
1473 offchip_granularity = V_03093C_X_8K_DWORDS;
1474 break;
1475 case 4096:
1476 offchip_granularity = V_03093C_X_4K_DWORDS;
1477 break;
1478 }
1479
1480 switch (device->physical_device->rad_info.chip_class) {
1481 case SI:
1482 max_offchip_buffers = MIN2(max_offchip_buffers, 126);
1483 break;
1484 case CIK:
1485 case VI:
1486 case GFX9:
1487 default:
1488 max_offchip_buffers = MIN2(max_offchip_buffers, 508);
1489 break;
1490 }
1491
1492 *max_offchip_buffers_p = max_offchip_buffers;
1493 if (device->physical_device->rad_info.chip_class >= CIK) {
1494 if (device->physical_device->rad_info.chip_class >= VI)
1495 --max_offchip_buffers;
1496 hs_offchip_param =
1497 S_03093C_OFFCHIP_BUFFERING(max_offchip_buffers) |
1498 S_03093C_OFFCHIP_GRANULARITY(offchip_granularity);
1499 } else {
1500 hs_offchip_param =
1501 S_0089B0_OFFCHIP_BUFFERING(max_offchip_buffers);
1502 }
1503 return hs_offchip_param;
1504 }
1505
1506 static VkResult
1507 radv_get_preamble_cs(struct radv_queue *queue,
1508 uint32_t scratch_size,
1509 uint32_t compute_scratch_size,
1510 uint32_t esgs_ring_size,
1511 uint32_t gsvs_ring_size,
1512 bool needs_tess_rings,
1513 bool needs_sample_positions,
1514 struct radeon_winsys_cs **initial_preamble_cs,
1515 struct radeon_winsys_cs **continue_preamble_cs)
1516 {
1517 struct radeon_winsys_bo *scratch_bo = NULL;
1518 struct radeon_winsys_bo *descriptor_bo = NULL;
1519 struct radeon_winsys_bo *compute_scratch_bo = NULL;
1520 struct radeon_winsys_bo *esgs_ring_bo = NULL;
1521 struct radeon_winsys_bo *gsvs_ring_bo = NULL;
1522 struct radeon_winsys_bo *tess_factor_ring_bo = NULL;
1523 struct radeon_winsys_bo *tess_offchip_ring_bo = NULL;
1524 struct radeon_winsys_cs *dest_cs[2] = {0};
1525 bool add_tess_rings = false, add_sample_positions = false;
1526 unsigned tess_factor_ring_size = 0, tess_offchip_ring_size = 0;
1527 unsigned max_offchip_buffers;
1528 unsigned hs_offchip_param = 0;
1529 if (!queue->has_tess_rings) {
1530 if (needs_tess_rings)
1531 add_tess_rings = true;
1532 }
1533 if (!queue->has_sample_positions) {
1534 if (needs_sample_positions)
1535 add_sample_positions = true;
1536 }
1537 tess_factor_ring_size = 32768 * queue->device->physical_device->rad_info.max_se;
1538 hs_offchip_param = radv_get_hs_offchip_param(queue->device,
1539 &max_offchip_buffers);
1540 tess_offchip_ring_size = max_offchip_buffers *
1541 queue->device->tess_offchip_block_dw_size * 4;
1542
1543 if (scratch_size <= queue->scratch_size &&
1544 compute_scratch_size <= queue->compute_scratch_size &&
1545 esgs_ring_size <= queue->esgs_ring_size &&
1546 gsvs_ring_size <= queue->gsvs_ring_size &&
1547 !add_tess_rings && !add_sample_positions &&
1548 queue->initial_preamble_cs) {
1549 *initial_preamble_cs = queue->initial_preamble_cs;
1550 *continue_preamble_cs = queue->continue_preamble_cs;
1551 if (!scratch_size && !compute_scratch_size && !esgs_ring_size && !gsvs_ring_size)
1552 *continue_preamble_cs = NULL;
1553 return VK_SUCCESS;
1554 }
1555
1556 if (scratch_size > queue->scratch_size) {
1557 scratch_bo = queue->device->ws->buffer_create(queue->device->ws,
1558 scratch_size,
1559 4096,
1560 RADEON_DOMAIN_VRAM,
1561 RADEON_FLAG_NO_CPU_ACCESS);
1562 if (!scratch_bo)
1563 goto fail;
1564 } else
1565 scratch_bo = queue->scratch_bo;
1566
1567 if (compute_scratch_size > queue->compute_scratch_size) {
1568 compute_scratch_bo = queue->device->ws->buffer_create(queue->device->ws,
1569 compute_scratch_size,
1570 4096,
1571 RADEON_DOMAIN_VRAM,
1572 RADEON_FLAG_NO_CPU_ACCESS);
1573 if (!compute_scratch_bo)
1574 goto fail;
1575
1576 } else
1577 compute_scratch_bo = queue->compute_scratch_bo;
1578
1579 if (esgs_ring_size > queue->esgs_ring_size) {
1580 esgs_ring_bo = queue->device->ws->buffer_create(queue->device->ws,
1581 esgs_ring_size,
1582 4096,
1583 RADEON_DOMAIN_VRAM,
1584 RADEON_FLAG_NO_CPU_ACCESS);
1585 if (!esgs_ring_bo)
1586 goto fail;
1587 } else {
1588 esgs_ring_bo = queue->esgs_ring_bo;
1589 esgs_ring_size = queue->esgs_ring_size;
1590 }
1591
1592 if (gsvs_ring_size > queue->gsvs_ring_size) {
1593 gsvs_ring_bo = queue->device->ws->buffer_create(queue->device->ws,
1594 gsvs_ring_size,
1595 4096,
1596 RADEON_DOMAIN_VRAM,
1597 RADEON_FLAG_NO_CPU_ACCESS);
1598 if (!gsvs_ring_bo)
1599 goto fail;
1600 } else {
1601 gsvs_ring_bo = queue->gsvs_ring_bo;
1602 gsvs_ring_size = queue->gsvs_ring_size;
1603 }
1604
1605 if (add_tess_rings) {
1606 tess_factor_ring_bo = queue->device->ws->buffer_create(queue->device->ws,
1607 tess_factor_ring_size,
1608 256,
1609 RADEON_DOMAIN_VRAM,
1610 RADEON_FLAG_NO_CPU_ACCESS);
1611 if (!tess_factor_ring_bo)
1612 goto fail;
1613 tess_offchip_ring_bo = queue->device->ws->buffer_create(queue->device->ws,
1614 tess_offchip_ring_size,
1615 256,
1616 RADEON_DOMAIN_VRAM,
1617 RADEON_FLAG_NO_CPU_ACCESS);
1618 if (!tess_offchip_ring_bo)
1619 goto fail;
1620 } else {
1621 tess_factor_ring_bo = queue->tess_factor_ring_bo;
1622 tess_offchip_ring_bo = queue->tess_offchip_ring_bo;
1623 }
1624
1625 if (scratch_bo != queue->scratch_bo ||
1626 esgs_ring_bo != queue->esgs_ring_bo ||
1627 gsvs_ring_bo != queue->gsvs_ring_bo ||
1628 tess_factor_ring_bo != queue->tess_factor_ring_bo ||
1629 tess_offchip_ring_bo != queue->tess_offchip_ring_bo || add_sample_positions) {
1630 uint32_t size = 0;
1631 if (gsvs_ring_bo || esgs_ring_bo ||
1632 tess_factor_ring_bo || tess_offchip_ring_bo || add_sample_positions) {
1633 size = 112; /* 2 dword + 2 padding + 4 dword * 6 */
1634 if (add_sample_positions)
1635 size += 256; /* 32+16+8+4+2+1 samples * 4 * 2 = 248 bytes. */
1636 }
1637 else if (scratch_bo)
1638 size = 8; /* 2 dword */
1639
1640 descriptor_bo = queue->device->ws->buffer_create(queue->device->ws,
1641 size,
1642 4096,
1643 RADEON_DOMAIN_VRAM,
1644 RADEON_FLAG_CPU_ACCESS);
1645 if (!descriptor_bo)
1646 goto fail;
1647 } else
1648 descriptor_bo = queue->descriptor_bo;
1649
1650 for(int i = 0; i < 2; ++i) {
1651 struct radeon_winsys_cs *cs = NULL;
1652 cs = queue->device->ws->cs_create(queue->device->ws,
1653 queue->queue_family_index ? RING_COMPUTE : RING_GFX);
1654 if (!cs)
1655 goto fail;
1656
1657 dest_cs[i] = cs;
1658
1659 if (scratch_bo)
1660 queue->device->ws->cs_add_buffer(cs, scratch_bo, 8);
1661
1662 if (esgs_ring_bo)
1663 queue->device->ws->cs_add_buffer(cs, esgs_ring_bo, 8);
1664
1665 if (gsvs_ring_bo)
1666 queue->device->ws->cs_add_buffer(cs, gsvs_ring_bo, 8);
1667
1668 if (tess_factor_ring_bo)
1669 queue->device->ws->cs_add_buffer(cs, tess_factor_ring_bo, 8);
1670
1671 if (tess_offchip_ring_bo)
1672 queue->device->ws->cs_add_buffer(cs, tess_offchip_ring_bo, 8);
1673
1674 if (descriptor_bo)
1675 queue->device->ws->cs_add_buffer(cs, descriptor_bo, 8);
1676
1677 if (descriptor_bo != queue->descriptor_bo) {
1678 uint32_t *map = (uint32_t*)queue->device->ws->buffer_map(descriptor_bo);
1679
1680 if (scratch_bo) {
1681 uint64_t scratch_va = queue->device->ws->buffer_get_va(scratch_bo);
1682 uint32_t rsrc1 = S_008F04_BASE_ADDRESS_HI(scratch_va >> 32) |
1683 S_008F04_SWIZZLE_ENABLE(1);
1684 map[0] = scratch_va;
1685 map[1] = rsrc1;
1686 }
1687
1688 if (esgs_ring_bo || gsvs_ring_bo || tess_factor_ring_bo || tess_offchip_ring_bo ||
1689 add_sample_positions)
1690 fill_geom_tess_rings(queue, map, add_sample_positions,
1691 esgs_ring_size, esgs_ring_bo,
1692 gsvs_ring_size, gsvs_ring_bo,
1693 tess_factor_ring_size, tess_factor_ring_bo,
1694 tess_offchip_ring_size, tess_offchip_ring_bo);
1695
1696 queue->device->ws->buffer_unmap(descriptor_bo);
1697 }
1698
1699 if (esgs_ring_bo || gsvs_ring_bo || tess_factor_ring_bo || tess_offchip_ring_bo) {
1700 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1701 radeon_emit(cs, EVENT_TYPE(V_028A90_VS_PARTIAL_FLUSH) | EVENT_INDEX(4));
1702 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1703 radeon_emit(cs, EVENT_TYPE(V_028A90_VGT_FLUSH) | EVENT_INDEX(0));
1704 }
1705
1706 if (esgs_ring_bo || gsvs_ring_bo) {
1707 if (queue->device->physical_device->rad_info.chip_class >= CIK) {
1708 radeon_set_uconfig_reg_seq(cs, R_030900_VGT_ESGS_RING_SIZE, 2);
1709 radeon_emit(cs, esgs_ring_size >> 8);
1710 radeon_emit(cs, gsvs_ring_size >> 8);
1711 } else {
1712 radeon_set_config_reg_seq(cs, R_0088C8_VGT_ESGS_RING_SIZE, 2);
1713 radeon_emit(cs, esgs_ring_size >> 8);
1714 radeon_emit(cs, gsvs_ring_size >> 8);
1715 }
1716 }
1717
1718 if (tess_factor_ring_bo) {
1719 uint64_t tf_va = queue->device->ws->buffer_get_va(tess_factor_ring_bo);
1720 if (queue->device->physical_device->rad_info.chip_class >= CIK) {
1721 radeon_set_uconfig_reg(cs, R_030938_VGT_TF_RING_SIZE,
1722 S_030938_SIZE(tess_factor_ring_size / 4));
1723 radeon_set_uconfig_reg(cs, R_030940_VGT_TF_MEMORY_BASE,
1724 tf_va >> 8);
1725 if (queue->device->physical_device->rad_info.chip_class >= GFX9) {
1726 radeon_set_uconfig_reg(cs, R_030944_VGT_TF_MEMORY_BASE_HI,
1727 tf_va >> 40);
1728 }
1729 radeon_set_uconfig_reg(cs, R_03093C_VGT_HS_OFFCHIP_PARAM, hs_offchip_param);
1730 } else {
1731 radeon_set_config_reg(cs, R_008988_VGT_TF_RING_SIZE,
1732 S_008988_SIZE(tess_factor_ring_size / 4));
1733 radeon_set_config_reg(cs, R_0089B8_VGT_TF_MEMORY_BASE,
1734 tf_va >> 8);
1735 radeon_set_config_reg(cs, R_0089B0_VGT_HS_OFFCHIP_PARAM,
1736 hs_offchip_param);
1737 }
1738 }
1739
1740 if (descriptor_bo) {
1741 uint32_t regs[] = {R_00B030_SPI_SHADER_USER_DATA_PS_0,
1742 R_00B130_SPI_SHADER_USER_DATA_VS_0,
1743 R_00B230_SPI_SHADER_USER_DATA_GS_0,
1744 R_00B330_SPI_SHADER_USER_DATA_ES_0,
1745 R_00B430_SPI_SHADER_USER_DATA_HS_0,
1746 R_00B530_SPI_SHADER_USER_DATA_LS_0};
1747
1748 uint64_t va = queue->device->ws->buffer_get_va(descriptor_bo);
1749
1750 for (int i = 0; i < ARRAY_SIZE(regs); ++i) {
1751 radeon_set_sh_reg_seq(cs, regs[i], 2);
1752 radeon_emit(cs, va);
1753 radeon_emit(cs, va >> 32);
1754 }
1755 }
1756
1757 if (compute_scratch_bo) {
1758 uint64_t scratch_va = queue->device->ws->buffer_get_va(compute_scratch_bo);
1759 uint32_t rsrc1 = S_008F04_BASE_ADDRESS_HI(scratch_va >> 32) |
1760 S_008F04_SWIZZLE_ENABLE(1);
1761
1762 queue->device->ws->cs_add_buffer(cs, compute_scratch_bo, 8);
1763
1764 radeon_set_sh_reg_seq(cs, R_00B900_COMPUTE_USER_DATA_0, 2);
1765 radeon_emit(cs, scratch_va);
1766 radeon_emit(cs, rsrc1);
1767 }
1768
1769 if (!i) {
1770 si_cs_emit_cache_flush(cs,
1771 false,
1772 queue->device->physical_device->rad_info.chip_class,
1773 NULL, 0,
1774 queue->queue_family_index == RING_COMPUTE &&
1775 queue->device->physical_device->rad_info.chip_class >= CIK,
1776 RADV_CMD_FLAG_INV_ICACHE |
1777 RADV_CMD_FLAG_INV_SMEM_L1 |
1778 RADV_CMD_FLAG_INV_VMEM_L1 |
1779 RADV_CMD_FLAG_INV_GLOBAL_L2);
1780 }
1781
1782 if (!queue->device->ws->cs_finalize(cs))
1783 goto fail;
1784 }
1785
1786 if (queue->initial_preamble_cs)
1787 queue->device->ws->cs_destroy(queue->initial_preamble_cs);
1788
1789 if (queue->continue_preamble_cs)
1790 queue->device->ws->cs_destroy(queue->continue_preamble_cs);
1791
1792 queue->initial_preamble_cs = dest_cs[0];
1793 queue->continue_preamble_cs = dest_cs[1];
1794
1795 if (scratch_bo != queue->scratch_bo) {
1796 if (queue->scratch_bo)
1797 queue->device->ws->buffer_destroy(queue->scratch_bo);
1798 queue->scratch_bo = scratch_bo;
1799 queue->scratch_size = scratch_size;
1800 }
1801
1802 if (compute_scratch_bo != queue->compute_scratch_bo) {
1803 if (queue->compute_scratch_bo)
1804 queue->device->ws->buffer_destroy(queue->compute_scratch_bo);
1805 queue->compute_scratch_bo = compute_scratch_bo;
1806 queue->compute_scratch_size = compute_scratch_size;
1807 }
1808
1809 if (esgs_ring_bo != queue->esgs_ring_bo) {
1810 if (queue->esgs_ring_bo)
1811 queue->device->ws->buffer_destroy(queue->esgs_ring_bo);
1812 queue->esgs_ring_bo = esgs_ring_bo;
1813 queue->esgs_ring_size = esgs_ring_size;
1814 }
1815
1816 if (gsvs_ring_bo != queue->gsvs_ring_bo) {
1817 if (queue->gsvs_ring_bo)
1818 queue->device->ws->buffer_destroy(queue->gsvs_ring_bo);
1819 queue->gsvs_ring_bo = gsvs_ring_bo;
1820 queue->gsvs_ring_size = gsvs_ring_size;
1821 }
1822
1823 if (tess_factor_ring_bo != queue->tess_factor_ring_bo) {
1824 queue->tess_factor_ring_bo = tess_factor_ring_bo;
1825 }
1826
1827 if (tess_offchip_ring_bo != queue->tess_offchip_ring_bo) {
1828 queue->tess_offchip_ring_bo = tess_offchip_ring_bo;
1829 queue->has_tess_rings = true;
1830 }
1831
1832 if (descriptor_bo != queue->descriptor_bo) {
1833 if (queue->descriptor_bo)
1834 queue->device->ws->buffer_destroy(queue->descriptor_bo);
1835
1836 queue->descriptor_bo = descriptor_bo;
1837 }
1838
1839 if (add_sample_positions)
1840 queue->has_sample_positions = true;
1841
1842 *initial_preamble_cs = queue->initial_preamble_cs;
1843 *continue_preamble_cs = queue->continue_preamble_cs;
1844 if (!scratch_size && !compute_scratch_size && !esgs_ring_size && !gsvs_ring_size)
1845 *continue_preamble_cs = NULL;
1846 return VK_SUCCESS;
1847 fail:
1848 for (int i = 0; i < ARRAY_SIZE(dest_cs); ++i)
1849 if (dest_cs[i])
1850 queue->device->ws->cs_destroy(dest_cs[i]);
1851 if (descriptor_bo && descriptor_bo != queue->descriptor_bo)
1852 queue->device->ws->buffer_destroy(descriptor_bo);
1853 if (scratch_bo && scratch_bo != queue->scratch_bo)
1854 queue->device->ws->buffer_destroy(scratch_bo);
1855 if (compute_scratch_bo && compute_scratch_bo != queue->compute_scratch_bo)
1856 queue->device->ws->buffer_destroy(compute_scratch_bo);
1857 if (esgs_ring_bo && esgs_ring_bo != queue->esgs_ring_bo)
1858 queue->device->ws->buffer_destroy(esgs_ring_bo);
1859 if (gsvs_ring_bo && gsvs_ring_bo != queue->gsvs_ring_bo)
1860 queue->device->ws->buffer_destroy(gsvs_ring_bo);
1861 if (tess_factor_ring_bo && tess_factor_ring_bo != queue->tess_factor_ring_bo)
1862 queue->device->ws->buffer_destroy(tess_factor_ring_bo);
1863 if (tess_offchip_ring_bo && tess_offchip_ring_bo != queue->tess_offchip_ring_bo)
1864 queue->device->ws->buffer_destroy(tess_offchip_ring_bo);
1865 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
1866 }
1867
1868 VkResult radv_QueueSubmit(
1869 VkQueue _queue,
1870 uint32_t submitCount,
1871 const VkSubmitInfo* pSubmits,
1872 VkFence _fence)
1873 {
1874 RADV_FROM_HANDLE(radv_queue, queue, _queue);
1875 RADV_FROM_HANDLE(radv_fence, fence, _fence);
1876 struct radeon_winsys_fence *base_fence = fence ? fence->fence : NULL;
1877 struct radeon_winsys_ctx *ctx = queue->hw_ctx;
1878 int ret;
1879 uint32_t max_cs_submission = queue->device->trace_bo ? 1 : UINT32_MAX;
1880 uint32_t scratch_size = 0;
1881 uint32_t compute_scratch_size = 0;
1882 uint32_t esgs_ring_size = 0, gsvs_ring_size = 0;
1883 struct radeon_winsys_cs *initial_preamble_cs = NULL, *continue_preamble_cs = NULL;
1884 VkResult result;
1885 bool fence_emitted = false;
1886 bool tess_rings_needed = false;
1887 bool sample_positions_needed = false;
1888
1889 /* Do this first so failing to allocate scratch buffers can't result in
1890 * partially executed submissions. */
1891 for (uint32_t i = 0; i < submitCount; i++) {
1892 for (uint32_t j = 0; j < pSubmits[i].commandBufferCount; j++) {
1893 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer,
1894 pSubmits[i].pCommandBuffers[j]);
1895
1896 scratch_size = MAX2(scratch_size, cmd_buffer->scratch_size_needed);
1897 compute_scratch_size = MAX2(compute_scratch_size,
1898 cmd_buffer->compute_scratch_size_needed);
1899 esgs_ring_size = MAX2(esgs_ring_size, cmd_buffer->esgs_ring_size_needed);
1900 gsvs_ring_size = MAX2(gsvs_ring_size, cmd_buffer->gsvs_ring_size_needed);
1901 tess_rings_needed |= cmd_buffer->tess_rings_needed;
1902 sample_positions_needed |= cmd_buffer->sample_positions_needed;
1903 }
1904 }
1905
1906 result = radv_get_preamble_cs(queue, scratch_size, compute_scratch_size,
1907 esgs_ring_size, gsvs_ring_size, tess_rings_needed,
1908 sample_positions_needed,
1909 &initial_preamble_cs, &continue_preamble_cs);
1910 if (result != VK_SUCCESS)
1911 return result;
1912
1913 for (uint32_t i = 0; i < submitCount; i++) {
1914 struct radeon_winsys_cs **cs_array;
1915 bool do_flush = !i || pSubmits[i].pWaitDstStageMask;
1916 bool can_patch = !do_flush;
1917 uint32_t advance;
1918
1919 if (!pSubmits[i].commandBufferCount) {
1920 if (pSubmits[i].waitSemaphoreCount || pSubmits[i].signalSemaphoreCount) {
1921 ret = queue->device->ws->cs_submit(ctx, queue->queue_idx,
1922 &queue->device->empty_cs[queue->queue_family_index],
1923 1, NULL, NULL,
1924 (struct radeon_winsys_sem **)pSubmits[i].pWaitSemaphores,
1925 pSubmits[i].waitSemaphoreCount,
1926 (struct radeon_winsys_sem **)pSubmits[i].pSignalSemaphores,
1927 pSubmits[i].signalSemaphoreCount,
1928 false, base_fence);
1929 if (ret) {
1930 radv_loge("failed to submit CS %d\n", i);
1931 abort();
1932 }
1933 fence_emitted = true;
1934 }
1935 continue;
1936 }
1937
1938 cs_array = malloc(sizeof(struct radeon_winsys_cs *) *
1939 (pSubmits[i].commandBufferCount + do_flush));
1940
1941 if(do_flush)
1942 cs_array[0] = pSubmits[i].waitSemaphoreCount ?
1943 queue->device->flush_shader_cs[queue->queue_family_index] :
1944 queue->device->flush_cs[queue->queue_family_index];
1945
1946 for (uint32_t j = 0; j < pSubmits[i].commandBufferCount; j++) {
1947 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer,
1948 pSubmits[i].pCommandBuffers[j]);
1949 assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
1950
1951 cs_array[j + do_flush] = cmd_buffer->cs;
1952 if ((cmd_buffer->usage_flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT))
1953 can_patch = false;
1954 }
1955
1956 for (uint32_t j = 0; j < pSubmits[i].commandBufferCount + do_flush; j += advance) {
1957 advance = MIN2(max_cs_submission,
1958 pSubmits[i].commandBufferCount + do_flush - j);
1959 bool b = j == 0;
1960 bool e = j + advance == pSubmits[i].commandBufferCount + do_flush;
1961
1962 if (queue->device->trace_bo)
1963 *queue->device->trace_id_ptr = 0;
1964
1965 ret = queue->device->ws->cs_submit(ctx, queue->queue_idx, cs_array + j,
1966 advance, initial_preamble_cs, continue_preamble_cs,
1967 (struct radeon_winsys_sem **)pSubmits[i].pWaitSemaphores,
1968 b ? pSubmits[i].waitSemaphoreCount : 0,
1969 (struct radeon_winsys_sem **)pSubmits[i].pSignalSemaphores,
1970 e ? pSubmits[i].signalSemaphoreCount : 0,
1971 can_patch, base_fence);
1972
1973 if (ret) {
1974 radv_loge("failed to submit CS %d\n", i);
1975 abort();
1976 }
1977 fence_emitted = true;
1978 if (queue->device->trace_bo) {
1979 bool success = queue->device->ws->ctx_wait_idle(
1980 queue->hw_ctx,
1981 radv_queue_family_to_ring(
1982 queue->queue_family_index),
1983 queue->queue_idx);
1984
1985 if (!success) { /* Hang */
1986 radv_dump_trace(queue->device, cs_array[j]);
1987 abort();
1988 }
1989 }
1990 }
1991 free(cs_array);
1992 }
1993
1994 if (fence) {
1995 if (!fence_emitted)
1996 ret = queue->device->ws->cs_submit(ctx, queue->queue_idx,
1997 &queue->device->empty_cs[queue->queue_family_index],
1998 1, NULL, NULL, NULL, 0, NULL, 0,
1999 false, base_fence);
2000
2001 fence->submitted = true;
2002 }
2003
2004 return VK_SUCCESS;
2005 }
2006
2007 VkResult radv_QueueWaitIdle(
2008 VkQueue _queue)
2009 {
2010 RADV_FROM_HANDLE(radv_queue, queue, _queue);
2011
2012 queue->device->ws->ctx_wait_idle(queue->hw_ctx,
2013 radv_queue_family_to_ring(queue->queue_family_index),
2014 queue->queue_idx);
2015 return VK_SUCCESS;
2016 }
2017
2018 VkResult radv_DeviceWaitIdle(
2019 VkDevice _device)
2020 {
2021 RADV_FROM_HANDLE(radv_device, device, _device);
2022
2023 for (unsigned i = 0; i < RADV_MAX_QUEUE_FAMILIES; i++) {
2024 for (unsigned q = 0; q < device->queue_count[i]; q++) {
2025 radv_QueueWaitIdle(radv_queue_to_handle(&device->queues[i][q]));
2026 }
2027 }
2028 return VK_SUCCESS;
2029 }
2030
2031 PFN_vkVoidFunction radv_GetInstanceProcAddr(
2032 VkInstance instance,
2033 const char* pName)
2034 {
2035 return radv_lookup_entrypoint(pName);
2036 }
2037
2038 /* The loader wants us to expose a second GetInstanceProcAddr function
2039 * to work around certain LD_PRELOAD issues seen in apps.
2040 */
2041 PUBLIC
2042 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vk_icdGetInstanceProcAddr(
2043 VkInstance instance,
2044 const char* pName);
2045
2046 PUBLIC
2047 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vk_icdGetInstanceProcAddr(
2048 VkInstance instance,
2049 const char* pName)
2050 {
2051 return radv_GetInstanceProcAddr(instance, pName);
2052 }
2053
2054 PFN_vkVoidFunction radv_GetDeviceProcAddr(
2055 VkDevice device,
2056 const char* pName)
2057 {
2058 return radv_lookup_entrypoint(pName);
2059 }
2060
2061 bool radv_get_memory_fd(struct radv_device *device,
2062 struct radv_device_memory *memory,
2063 int *pFD)
2064 {
2065 struct radeon_bo_metadata metadata;
2066
2067 if (memory->image) {
2068 radv_init_metadata(device, memory->image, &metadata);
2069 device->ws->buffer_set_metadata(memory->bo, &metadata);
2070 }
2071
2072 return device->ws->buffer_get_fd(device->ws, memory->bo,
2073 pFD);
2074 }
2075
2076 VkResult radv_AllocateMemory(
2077 VkDevice _device,
2078 const VkMemoryAllocateInfo* pAllocateInfo,
2079 const VkAllocationCallbacks* pAllocator,
2080 VkDeviceMemory* pMem)
2081 {
2082 RADV_FROM_HANDLE(radv_device, device, _device);
2083 struct radv_device_memory *mem;
2084 VkResult result;
2085 enum radeon_bo_domain domain;
2086 uint32_t flags = 0;
2087
2088 assert(pAllocateInfo->sType == VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO);
2089
2090 if (pAllocateInfo->allocationSize == 0) {
2091 /* Apparently, this is allowed */
2092 *pMem = VK_NULL_HANDLE;
2093 return VK_SUCCESS;
2094 }
2095
2096 const VkImportMemoryFdInfoKHR *import_info =
2097 vk_find_struct_const(pAllocateInfo->pNext, IMPORT_MEMORY_FD_INFO_KHR);
2098 const VkMemoryDedicatedAllocateInfoKHR *dedicate_info =
2099 vk_find_struct_const(pAllocateInfo->pNext, MEMORY_DEDICATED_ALLOCATE_INFO_KHR);
2100
2101 mem = vk_alloc2(&device->alloc, pAllocator, sizeof(*mem), 8,
2102 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2103 if (mem == NULL)
2104 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
2105
2106 if (dedicate_info) {
2107 mem->image = radv_image_from_handle(dedicate_info->image);
2108 mem->buffer = radv_buffer_from_handle(dedicate_info->buffer);
2109 } else {
2110 mem->image = NULL;
2111 mem->buffer = NULL;
2112 }
2113
2114 if (import_info) {
2115 assert(import_info->handleType ==
2116 VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR);
2117 mem->bo = device->ws->buffer_from_fd(device->ws, import_info->fd,
2118 NULL, NULL);
2119 if (!mem->bo) {
2120 result = VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR;
2121 goto fail;
2122 } else
2123 goto out_success;
2124 }
2125
2126 uint64_t alloc_size = align_u64(pAllocateInfo->allocationSize, 4096);
2127 if (pAllocateInfo->memoryTypeIndex == RADV_MEM_TYPE_GTT_WRITE_COMBINE ||
2128 pAllocateInfo->memoryTypeIndex == RADV_MEM_TYPE_GTT_CACHED)
2129 domain = RADEON_DOMAIN_GTT;
2130 else
2131 domain = RADEON_DOMAIN_VRAM;
2132
2133 if (pAllocateInfo->memoryTypeIndex == RADV_MEM_TYPE_VRAM)
2134 flags |= RADEON_FLAG_NO_CPU_ACCESS;
2135 else
2136 flags |= RADEON_FLAG_CPU_ACCESS;
2137
2138 if (pAllocateInfo->memoryTypeIndex == RADV_MEM_TYPE_GTT_WRITE_COMBINE)
2139 flags |= RADEON_FLAG_GTT_WC;
2140
2141 mem->bo = device->ws->buffer_create(device->ws, alloc_size, device->physical_device->rad_info.max_alignment,
2142 domain, flags);
2143
2144 if (!mem->bo) {
2145 result = VK_ERROR_OUT_OF_DEVICE_MEMORY;
2146 goto fail;
2147 }
2148 mem->type_index = pAllocateInfo->memoryTypeIndex;
2149 out_success:
2150 *pMem = radv_device_memory_to_handle(mem);
2151
2152 return VK_SUCCESS;
2153
2154 fail:
2155 vk_free2(&device->alloc, pAllocator, mem);
2156
2157 return result;
2158 }
2159
2160 void radv_FreeMemory(
2161 VkDevice _device,
2162 VkDeviceMemory _mem,
2163 const VkAllocationCallbacks* pAllocator)
2164 {
2165 RADV_FROM_HANDLE(radv_device, device, _device);
2166 RADV_FROM_HANDLE(radv_device_memory, mem, _mem);
2167
2168 if (mem == NULL)
2169 return;
2170
2171 device->ws->buffer_destroy(mem->bo);
2172 mem->bo = NULL;
2173
2174 vk_free2(&device->alloc, pAllocator, mem);
2175 }
2176
2177 VkResult radv_MapMemory(
2178 VkDevice _device,
2179 VkDeviceMemory _memory,
2180 VkDeviceSize offset,
2181 VkDeviceSize size,
2182 VkMemoryMapFlags flags,
2183 void** ppData)
2184 {
2185 RADV_FROM_HANDLE(radv_device, device, _device);
2186 RADV_FROM_HANDLE(radv_device_memory, mem, _memory);
2187
2188 if (mem == NULL) {
2189 *ppData = NULL;
2190 return VK_SUCCESS;
2191 }
2192
2193 *ppData = device->ws->buffer_map(mem->bo);
2194 if (*ppData) {
2195 *ppData += offset;
2196 return VK_SUCCESS;
2197 }
2198
2199 return VK_ERROR_MEMORY_MAP_FAILED;
2200 }
2201
2202 void radv_UnmapMemory(
2203 VkDevice _device,
2204 VkDeviceMemory _memory)
2205 {
2206 RADV_FROM_HANDLE(radv_device, device, _device);
2207 RADV_FROM_HANDLE(radv_device_memory, mem, _memory);
2208
2209 if (mem == NULL)
2210 return;
2211
2212 device->ws->buffer_unmap(mem->bo);
2213 }
2214
2215 VkResult radv_FlushMappedMemoryRanges(
2216 VkDevice _device,
2217 uint32_t memoryRangeCount,
2218 const VkMappedMemoryRange* pMemoryRanges)
2219 {
2220 return VK_SUCCESS;
2221 }
2222
2223 VkResult radv_InvalidateMappedMemoryRanges(
2224 VkDevice _device,
2225 uint32_t memoryRangeCount,
2226 const VkMappedMemoryRange* pMemoryRanges)
2227 {
2228 return VK_SUCCESS;
2229 }
2230
2231 void radv_GetBufferMemoryRequirements(
2232 VkDevice device,
2233 VkBuffer _buffer,
2234 VkMemoryRequirements* pMemoryRequirements)
2235 {
2236 RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
2237
2238 pMemoryRequirements->memoryTypeBits = (1u << RADV_MEM_TYPE_COUNT) - 1;
2239
2240 if (buffer->flags & VK_BUFFER_CREATE_SPARSE_BINDING_BIT)
2241 pMemoryRequirements->alignment = 4096;
2242 else
2243 pMemoryRequirements->alignment = 16;
2244
2245 pMemoryRequirements->size = align64(buffer->size, pMemoryRequirements->alignment);
2246 }
2247
2248 void radv_GetBufferMemoryRequirements2KHR(
2249 VkDevice device,
2250 const VkBufferMemoryRequirementsInfo2KHR* pInfo,
2251 VkMemoryRequirements2KHR* pMemoryRequirements)
2252 {
2253 radv_GetBufferMemoryRequirements(device, pInfo->buffer,
2254 &pMemoryRequirements->memoryRequirements);
2255
2256 vk_foreach_struct(ext, pMemoryRequirements->pNext) {
2257 switch (ext->sType) {
2258 case VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR: {
2259 VkMemoryDedicatedRequirementsKHR *req =
2260 (VkMemoryDedicatedRequirementsKHR *) ext;
2261 req->requiresDedicatedAllocation = false;
2262 req->prefersDedicatedAllocation = req->requiresDedicatedAllocation;
2263 break;
2264 }
2265 default:
2266 break;
2267 }
2268 }
2269 }
2270
2271 void radv_GetImageMemoryRequirements(
2272 VkDevice device,
2273 VkImage _image,
2274 VkMemoryRequirements* pMemoryRequirements)
2275 {
2276 RADV_FROM_HANDLE(radv_image, image, _image);
2277
2278 pMemoryRequirements->memoryTypeBits = (1u << RADV_MEM_TYPE_COUNT) - 1;
2279
2280 pMemoryRequirements->size = image->size;
2281 pMemoryRequirements->alignment = image->alignment;
2282 }
2283
2284 void radv_GetImageMemoryRequirements2KHR(
2285 VkDevice device,
2286 const VkImageMemoryRequirementsInfo2KHR* pInfo,
2287 VkMemoryRequirements2KHR* pMemoryRequirements)
2288 {
2289 radv_GetImageMemoryRequirements(device, pInfo->image,
2290 &pMemoryRequirements->memoryRequirements);
2291
2292 RADV_FROM_HANDLE(radv_image, image, pInfo->image);
2293
2294 vk_foreach_struct(ext, pMemoryRequirements->pNext) {
2295 switch (ext->sType) {
2296 case VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR: {
2297 VkMemoryDedicatedRequirementsKHR *req =
2298 (VkMemoryDedicatedRequirementsKHR *) ext;
2299 req->requiresDedicatedAllocation = image->shareable;
2300 req->prefersDedicatedAllocation = req->requiresDedicatedAllocation;
2301 break;
2302 }
2303 default:
2304 break;
2305 }
2306 }
2307 }
2308
2309 void radv_GetImageSparseMemoryRequirements(
2310 VkDevice device,
2311 VkImage image,
2312 uint32_t* pSparseMemoryRequirementCount,
2313 VkSparseImageMemoryRequirements* pSparseMemoryRequirements)
2314 {
2315 stub();
2316 }
2317
2318 void radv_GetImageSparseMemoryRequirements2KHR(
2319 VkDevice device,
2320 const VkImageSparseMemoryRequirementsInfo2KHR* pInfo,
2321 uint32_t* pSparseMemoryRequirementCount,
2322 VkSparseImageMemoryRequirements2KHR* pSparseMemoryRequirements)
2323 {
2324 stub();
2325 }
2326
2327 void radv_GetDeviceMemoryCommitment(
2328 VkDevice device,
2329 VkDeviceMemory memory,
2330 VkDeviceSize* pCommittedMemoryInBytes)
2331 {
2332 *pCommittedMemoryInBytes = 0;
2333 }
2334
2335 VkResult radv_BindBufferMemory(
2336 VkDevice device,
2337 VkBuffer _buffer,
2338 VkDeviceMemory _memory,
2339 VkDeviceSize memoryOffset)
2340 {
2341 RADV_FROM_HANDLE(radv_device_memory, mem, _memory);
2342 RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
2343
2344 if (mem) {
2345 buffer->bo = mem->bo;
2346 buffer->offset = memoryOffset;
2347 } else {
2348 buffer->bo = NULL;
2349 buffer->offset = 0;
2350 }
2351
2352 return VK_SUCCESS;
2353 }
2354
2355 VkResult radv_BindImageMemory(
2356 VkDevice device,
2357 VkImage _image,
2358 VkDeviceMemory _memory,
2359 VkDeviceSize memoryOffset)
2360 {
2361 RADV_FROM_HANDLE(radv_device_memory, mem, _memory);
2362 RADV_FROM_HANDLE(radv_image, image, _image);
2363
2364 if (mem) {
2365 image->bo = mem->bo;
2366 image->offset = memoryOffset;
2367 } else {
2368 image->bo = NULL;
2369 image->offset = 0;
2370 }
2371
2372 return VK_SUCCESS;
2373 }
2374
2375
2376 static void
2377 radv_sparse_buffer_bind_memory(struct radv_device *device,
2378 const VkSparseBufferMemoryBindInfo *bind)
2379 {
2380 RADV_FROM_HANDLE(radv_buffer, buffer, bind->buffer);
2381
2382 for (uint32_t i = 0; i < bind->bindCount; ++i) {
2383 struct radv_device_memory *mem = NULL;
2384
2385 if (bind->pBinds[i].memory != VK_NULL_HANDLE)
2386 mem = radv_device_memory_from_handle(bind->pBinds[i].memory);
2387
2388 device->ws->buffer_virtual_bind(buffer->bo,
2389 bind->pBinds[i].resourceOffset,
2390 bind->pBinds[i].size,
2391 mem ? mem->bo : NULL,
2392 bind->pBinds[i].memoryOffset);
2393 }
2394 }
2395
2396 static void
2397 radv_sparse_image_opaque_bind_memory(struct radv_device *device,
2398 const VkSparseImageOpaqueMemoryBindInfo *bind)
2399 {
2400 RADV_FROM_HANDLE(radv_image, image, bind->image);
2401
2402 for (uint32_t i = 0; i < bind->bindCount; ++i) {
2403 struct radv_device_memory *mem = NULL;
2404
2405 if (bind->pBinds[i].memory != VK_NULL_HANDLE)
2406 mem = radv_device_memory_from_handle(bind->pBinds[i].memory);
2407
2408 device->ws->buffer_virtual_bind(image->bo,
2409 bind->pBinds[i].resourceOffset,
2410 bind->pBinds[i].size,
2411 mem ? mem->bo : NULL,
2412 bind->pBinds[i].memoryOffset);
2413 }
2414 }
2415
2416 VkResult radv_QueueBindSparse(
2417 VkQueue _queue,
2418 uint32_t bindInfoCount,
2419 const VkBindSparseInfo* pBindInfo,
2420 VkFence _fence)
2421 {
2422 RADV_FROM_HANDLE(radv_fence, fence, _fence);
2423 RADV_FROM_HANDLE(radv_queue, queue, _queue);
2424 struct radeon_winsys_fence *base_fence = fence ? fence->fence : NULL;
2425 bool fence_emitted = false;
2426
2427 for (uint32_t i = 0; i < bindInfoCount; ++i) {
2428 for (uint32_t j = 0; j < pBindInfo[i].bufferBindCount; ++j) {
2429 radv_sparse_buffer_bind_memory(queue->device,
2430 pBindInfo[i].pBufferBinds + j);
2431 }
2432
2433 for (uint32_t j = 0; j < pBindInfo[i].imageOpaqueBindCount; ++j) {
2434 radv_sparse_image_opaque_bind_memory(queue->device,
2435 pBindInfo[i].pImageOpaqueBinds + j);
2436 }
2437
2438 if (pBindInfo[i].waitSemaphoreCount || pBindInfo[i].signalSemaphoreCount) {
2439 queue->device->ws->cs_submit(queue->hw_ctx, queue->queue_idx,
2440 &queue->device->empty_cs[queue->queue_family_index],
2441 1, NULL, NULL,
2442 (struct radeon_winsys_sem **)pBindInfo[i].pWaitSemaphores,
2443 pBindInfo[i].waitSemaphoreCount,
2444 (struct radeon_winsys_sem **)pBindInfo[i].pSignalSemaphores,
2445 pBindInfo[i].signalSemaphoreCount,
2446 false, base_fence);
2447 fence_emitted = true;
2448 if (fence)
2449 fence->submitted = true;
2450 }
2451 }
2452
2453 if (fence && !fence_emitted) {
2454 fence->signalled = true;
2455 }
2456
2457 return VK_SUCCESS;
2458 }
2459
2460 VkResult radv_CreateFence(
2461 VkDevice _device,
2462 const VkFenceCreateInfo* pCreateInfo,
2463 const VkAllocationCallbacks* pAllocator,
2464 VkFence* pFence)
2465 {
2466 RADV_FROM_HANDLE(radv_device, device, _device);
2467 struct radv_fence *fence = vk_alloc2(&device->alloc, pAllocator,
2468 sizeof(*fence), 8,
2469 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2470
2471 if (!fence)
2472 return VK_ERROR_OUT_OF_HOST_MEMORY;
2473
2474 memset(fence, 0, sizeof(*fence));
2475 fence->submitted = false;
2476 fence->signalled = !!(pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT);
2477 fence->fence = device->ws->create_fence();
2478 if (!fence->fence) {
2479 vk_free2(&device->alloc, pAllocator, fence);
2480 return VK_ERROR_OUT_OF_HOST_MEMORY;
2481 }
2482
2483 *pFence = radv_fence_to_handle(fence);
2484
2485 return VK_SUCCESS;
2486 }
2487
2488 void radv_DestroyFence(
2489 VkDevice _device,
2490 VkFence _fence,
2491 const VkAllocationCallbacks* pAllocator)
2492 {
2493 RADV_FROM_HANDLE(radv_device, device, _device);
2494 RADV_FROM_HANDLE(radv_fence, fence, _fence);
2495
2496 if (!fence)
2497 return;
2498 device->ws->destroy_fence(fence->fence);
2499 vk_free2(&device->alloc, pAllocator, fence);
2500 }
2501
2502 static uint64_t radv_get_absolute_timeout(uint64_t timeout)
2503 {
2504 uint64_t current_time;
2505 struct timespec tv;
2506
2507 clock_gettime(CLOCK_MONOTONIC, &tv);
2508 current_time = tv.tv_nsec + tv.tv_sec*1000000000ull;
2509
2510 timeout = MIN2(UINT64_MAX - current_time, timeout);
2511
2512 return current_time + timeout;
2513 }
2514
2515 VkResult radv_WaitForFences(
2516 VkDevice _device,
2517 uint32_t fenceCount,
2518 const VkFence* pFences,
2519 VkBool32 waitAll,
2520 uint64_t timeout)
2521 {
2522 RADV_FROM_HANDLE(radv_device, device, _device);
2523 timeout = radv_get_absolute_timeout(timeout);
2524
2525 if (!waitAll && fenceCount > 1) {
2526 fprintf(stderr, "radv: WaitForFences without waitAll not implemented yet\n");
2527 }
2528
2529 for (uint32_t i = 0; i < fenceCount; ++i) {
2530 RADV_FROM_HANDLE(radv_fence, fence, pFences[i]);
2531 bool expired = false;
2532
2533 if (fence->signalled)
2534 continue;
2535
2536 if (!fence->submitted)
2537 return VK_TIMEOUT;
2538
2539 expired = device->ws->fence_wait(device->ws, fence->fence, true, timeout);
2540 if (!expired)
2541 return VK_TIMEOUT;
2542
2543 fence->signalled = true;
2544 }
2545
2546 return VK_SUCCESS;
2547 }
2548
2549 VkResult radv_ResetFences(VkDevice device,
2550 uint32_t fenceCount,
2551 const VkFence *pFences)
2552 {
2553 for (unsigned i = 0; i < fenceCount; ++i) {
2554 RADV_FROM_HANDLE(radv_fence, fence, pFences[i]);
2555 fence->submitted = fence->signalled = false;
2556 }
2557
2558 return VK_SUCCESS;
2559 }
2560
2561 VkResult radv_GetFenceStatus(VkDevice _device, VkFence _fence)
2562 {
2563 RADV_FROM_HANDLE(radv_device, device, _device);
2564 RADV_FROM_HANDLE(radv_fence, fence, _fence);
2565
2566 if (fence->signalled)
2567 return VK_SUCCESS;
2568 if (!fence->submitted)
2569 return VK_NOT_READY;
2570
2571 if (!device->ws->fence_wait(device->ws, fence->fence, false, 0))
2572 return VK_NOT_READY;
2573
2574 return VK_SUCCESS;
2575 }
2576
2577
2578 // Queue semaphore functions
2579
2580 VkResult radv_CreateSemaphore(
2581 VkDevice _device,
2582 const VkSemaphoreCreateInfo* pCreateInfo,
2583 const VkAllocationCallbacks* pAllocator,
2584 VkSemaphore* pSemaphore)
2585 {
2586 RADV_FROM_HANDLE(radv_device, device, _device);
2587 struct radeon_winsys_sem *sem;
2588
2589 sem = device->ws->create_sem(device->ws);
2590 if (!sem)
2591 return VK_ERROR_OUT_OF_HOST_MEMORY;
2592
2593 *pSemaphore = radeon_winsys_sem_to_handle(sem);
2594 return VK_SUCCESS;
2595 }
2596
2597 void radv_DestroySemaphore(
2598 VkDevice _device,
2599 VkSemaphore _semaphore,
2600 const VkAllocationCallbacks* pAllocator)
2601 {
2602 RADV_FROM_HANDLE(radv_device, device, _device);
2603 RADV_FROM_HANDLE(radeon_winsys_sem, sem, _semaphore);
2604 if (!_semaphore)
2605 return;
2606
2607 device->ws->destroy_sem(sem);
2608 }
2609
2610 VkResult radv_CreateEvent(
2611 VkDevice _device,
2612 const VkEventCreateInfo* pCreateInfo,
2613 const VkAllocationCallbacks* pAllocator,
2614 VkEvent* pEvent)
2615 {
2616 RADV_FROM_HANDLE(radv_device, device, _device);
2617 struct radv_event *event = vk_alloc2(&device->alloc, pAllocator,
2618 sizeof(*event), 8,
2619 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2620
2621 if (!event)
2622 return VK_ERROR_OUT_OF_HOST_MEMORY;
2623
2624 event->bo = device->ws->buffer_create(device->ws, 8, 8,
2625 RADEON_DOMAIN_GTT,
2626 RADEON_FLAG_CPU_ACCESS);
2627 if (!event->bo) {
2628 vk_free2(&device->alloc, pAllocator, event);
2629 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
2630 }
2631
2632 event->map = (uint64_t*)device->ws->buffer_map(event->bo);
2633
2634 *pEvent = radv_event_to_handle(event);
2635
2636 return VK_SUCCESS;
2637 }
2638
2639 void radv_DestroyEvent(
2640 VkDevice _device,
2641 VkEvent _event,
2642 const VkAllocationCallbacks* pAllocator)
2643 {
2644 RADV_FROM_HANDLE(radv_device, device, _device);
2645 RADV_FROM_HANDLE(radv_event, event, _event);
2646
2647 if (!event)
2648 return;
2649 device->ws->buffer_destroy(event->bo);
2650 vk_free2(&device->alloc, pAllocator, event);
2651 }
2652
2653 VkResult radv_GetEventStatus(
2654 VkDevice _device,
2655 VkEvent _event)
2656 {
2657 RADV_FROM_HANDLE(radv_event, event, _event);
2658
2659 if (*event->map == 1)
2660 return VK_EVENT_SET;
2661 return VK_EVENT_RESET;
2662 }
2663
2664 VkResult radv_SetEvent(
2665 VkDevice _device,
2666 VkEvent _event)
2667 {
2668 RADV_FROM_HANDLE(radv_event, event, _event);
2669 *event->map = 1;
2670
2671 return VK_SUCCESS;
2672 }
2673
2674 VkResult radv_ResetEvent(
2675 VkDevice _device,
2676 VkEvent _event)
2677 {
2678 RADV_FROM_HANDLE(radv_event, event, _event);
2679 *event->map = 0;
2680
2681 return VK_SUCCESS;
2682 }
2683
2684 VkResult radv_CreateBuffer(
2685 VkDevice _device,
2686 const VkBufferCreateInfo* pCreateInfo,
2687 const VkAllocationCallbacks* pAllocator,
2688 VkBuffer* pBuffer)
2689 {
2690 RADV_FROM_HANDLE(radv_device, device, _device);
2691 struct radv_buffer *buffer;
2692
2693 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO);
2694
2695 buffer = vk_alloc2(&device->alloc, pAllocator, sizeof(*buffer), 8,
2696 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2697 if (buffer == NULL)
2698 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
2699
2700 buffer->size = pCreateInfo->size;
2701 buffer->usage = pCreateInfo->usage;
2702 buffer->bo = NULL;
2703 buffer->offset = 0;
2704 buffer->flags = pCreateInfo->flags;
2705
2706 if (pCreateInfo->flags & VK_BUFFER_CREATE_SPARSE_BINDING_BIT) {
2707 buffer->bo = device->ws->buffer_create(device->ws,
2708 align64(buffer->size, 4096),
2709 4096, 0, RADEON_FLAG_VIRTUAL);
2710 if (!buffer->bo) {
2711 vk_free2(&device->alloc, pAllocator, buffer);
2712 return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
2713 }
2714 }
2715
2716 *pBuffer = radv_buffer_to_handle(buffer);
2717
2718 return VK_SUCCESS;
2719 }
2720
2721 void radv_DestroyBuffer(
2722 VkDevice _device,
2723 VkBuffer _buffer,
2724 const VkAllocationCallbacks* pAllocator)
2725 {
2726 RADV_FROM_HANDLE(radv_device, device, _device);
2727 RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
2728
2729 if (!buffer)
2730 return;
2731
2732 if (buffer->flags & VK_BUFFER_CREATE_SPARSE_BINDING_BIT)
2733 device->ws->buffer_destroy(buffer->bo);
2734
2735 vk_free2(&device->alloc, pAllocator, buffer);
2736 }
2737
2738 static inline unsigned
2739 si_tile_mode_index(const struct radv_image *image, unsigned level, bool stencil)
2740 {
2741 if (stencil)
2742 return image->surface.u.legacy.stencil_tiling_index[level];
2743 else
2744 return image->surface.u.legacy.tiling_index[level];
2745 }
2746
2747 static uint32_t radv_surface_layer_count(struct radv_image_view *iview)
2748 {
2749 return iview->type == VK_IMAGE_VIEW_TYPE_3D ? iview->extent.depth : iview->layer_count;
2750 }
2751
2752 static void
2753 radv_initialise_color_surface(struct radv_device *device,
2754 struct radv_color_buffer_info *cb,
2755 struct radv_image_view *iview)
2756 {
2757 const struct vk_format_description *desc;
2758 unsigned ntype, format, swap, endian;
2759 unsigned blend_clamp = 0, blend_bypass = 0;
2760 uint64_t va;
2761 const struct radeon_surf *surf = &iview->image->surface;
2762
2763 desc = vk_format_description(iview->vk_format);
2764
2765 memset(cb, 0, sizeof(*cb));
2766
2767 /* Intensity is implemented as Red, so treat it that way. */
2768 cb->cb_color_attrib = S_028C74_FORCE_DST_ALPHA_1(desc->swizzle[3] == VK_SWIZZLE_1);
2769
2770 va = device->ws->buffer_get_va(iview->bo) + iview->image->offset;
2771
2772 if (device->physical_device->rad_info.chip_class >= GFX9) {
2773 struct gfx9_surf_meta_flags meta;
2774 if (iview->image->dcc_offset)
2775 meta = iview->image->surface.u.gfx9.dcc;
2776 else
2777 meta = iview->image->surface.u.gfx9.cmask;
2778
2779 cb->cb_color_attrib |= S_028C74_COLOR_SW_MODE(iview->image->surface.u.gfx9.surf.swizzle_mode) |
2780 S_028C74_FMASK_SW_MODE(iview->image->surface.u.gfx9.fmask.swizzle_mode) |
2781 S_028C74_RB_ALIGNED(meta.rb_aligned) |
2782 S_028C74_PIPE_ALIGNED(meta.pipe_aligned);
2783
2784 va += iview->image->surface.u.gfx9.surf_offset >> 8;
2785 } else {
2786 const struct legacy_surf_level *level_info = &surf->u.legacy.level[iview->base_mip];
2787 unsigned pitch_tile_max, slice_tile_max, tile_mode_index;
2788
2789 va += level_info->offset;
2790
2791 pitch_tile_max = level_info->nblk_x / 8 - 1;
2792 slice_tile_max = (level_info->nblk_x * level_info->nblk_y) / 64 - 1;
2793 tile_mode_index = si_tile_mode_index(iview->image, iview->base_mip, false);
2794
2795 cb->cb_color_pitch = S_028C64_TILE_MAX(pitch_tile_max);
2796 cb->cb_color_slice = S_028C68_TILE_MAX(slice_tile_max);
2797 cb->cb_color_cmask_slice = iview->image->cmask.slice_tile_max;
2798
2799 cb->cb_color_attrib |= S_028C74_TILE_MODE_INDEX(tile_mode_index);
2800 cb->micro_tile_mode = iview->image->surface.micro_tile_mode;
2801
2802 if (iview->image->fmask.size) {
2803 if (device->physical_device->rad_info.chip_class >= CIK)
2804 cb->cb_color_pitch |= S_028C64_FMASK_TILE_MAX(iview->image->fmask.pitch_in_pixels / 8 - 1);
2805 cb->cb_color_attrib |= S_028C74_FMASK_TILE_MODE_INDEX(iview->image->fmask.tile_mode_index);
2806 cb->cb_color_fmask_slice = S_028C88_TILE_MAX(iview->image->fmask.slice_tile_max);
2807 } else {
2808 /* This must be set for fast clear to work without FMASK. */
2809 if (device->physical_device->rad_info.chip_class >= CIK)
2810 cb->cb_color_pitch |= S_028C64_FMASK_TILE_MAX(pitch_tile_max);
2811 cb->cb_color_attrib |= S_028C74_FMASK_TILE_MODE_INDEX(tile_mode_index);
2812 cb->cb_color_fmask_slice = S_028C88_TILE_MAX(slice_tile_max);
2813 }
2814 }
2815
2816 cb->cb_color_base = va >> 8;
2817 if (device->physical_device->rad_info.chip_class < GFX9)
2818 cb->cb_color_base |= iview->image->surface.u.legacy.tile_swizzle;
2819 /* CMASK variables */
2820 va = device->ws->buffer_get_va(iview->bo) + iview->image->offset;
2821 va += iview->image->cmask.offset;
2822 cb->cb_color_cmask = va >> 8;
2823
2824 va = device->ws->buffer_get_va(iview->bo) + iview->image->offset;
2825 va += iview->image->dcc_offset;
2826 cb->cb_dcc_base = va >> 8;
2827 if (device->physical_device->rad_info.chip_class < GFX9)
2828 cb->cb_dcc_base |= iview->image->surface.u.legacy.tile_swizzle;
2829
2830 uint32_t max_slice = radv_surface_layer_count(iview);
2831 cb->cb_color_view = S_028C6C_SLICE_START(iview->base_layer) |
2832 S_028C6C_SLICE_MAX(iview->base_layer + max_slice - 1);
2833
2834 if (iview->image->info.samples > 1) {
2835 unsigned log_samples = util_logbase2(iview->image->info.samples);
2836
2837 cb->cb_color_attrib |= S_028C74_NUM_SAMPLES(log_samples) |
2838 S_028C74_NUM_FRAGMENTS(log_samples);
2839 }
2840
2841 if (iview->image->fmask.size) {
2842 va = device->ws->buffer_get_va(iview->bo) + iview->image->offset + iview->image->fmask.offset;
2843 cb->cb_color_fmask = va >> 8;
2844 if (device->physical_device->rad_info.chip_class < GFX9)
2845 cb->cb_color_fmask |= iview->image->surface.u.legacy.tile_swizzle;
2846 } else {
2847 cb->cb_color_fmask = cb->cb_color_base;
2848 }
2849
2850 ntype = radv_translate_color_numformat(iview->vk_format,
2851 desc,
2852 vk_format_get_first_non_void_channel(iview->vk_format));
2853 format = radv_translate_colorformat(iview->vk_format);
2854 if (format == V_028C70_COLOR_INVALID || ntype == ~0u)
2855 radv_finishme("Illegal color\n");
2856 swap = radv_translate_colorswap(iview->vk_format, FALSE);
2857 endian = radv_colorformat_endian_swap(format);
2858
2859 /* blend clamp should be set for all NORM/SRGB types */
2860 if (ntype == V_028C70_NUMBER_UNORM ||
2861 ntype == V_028C70_NUMBER_SNORM ||
2862 ntype == V_028C70_NUMBER_SRGB)
2863 blend_clamp = 1;
2864
2865 /* set blend bypass according to docs if SINT/UINT or
2866 8/24 COLOR variants */
2867 if (ntype == V_028C70_NUMBER_UINT || ntype == V_028C70_NUMBER_SINT ||
2868 format == V_028C70_COLOR_8_24 || format == V_028C70_COLOR_24_8 ||
2869 format == V_028C70_COLOR_X24_8_32_FLOAT) {
2870 blend_clamp = 0;
2871 blend_bypass = 1;
2872 }
2873 #if 0
2874 if ((ntype == V_028C70_NUMBER_UINT || ntype == V_028C70_NUMBER_SINT) &&
2875 (format == V_028C70_COLOR_8 ||
2876 format == V_028C70_COLOR_8_8 ||
2877 format == V_028C70_COLOR_8_8_8_8))
2878 ->color_is_int8 = true;
2879 #endif
2880 cb->cb_color_info = S_028C70_FORMAT(format) |
2881 S_028C70_COMP_SWAP(swap) |
2882 S_028C70_BLEND_CLAMP(blend_clamp) |
2883 S_028C70_BLEND_BYPASS(blend_bypass) |
2884 S_028C70_SIMPLE_FLOAT(1) |
2885 S_028C70_ROUND_MODE(ntype != V_028C70_NUMBER_UNORM &&
2886 ntype != V_028C70_NUMBER_SNORM &&
2887 ntype != V_028C70_NUMBER_SRGB &&
2888 format != V_028C70_COLOR_8_24 &&
2889 format != V_028C70_COLOR_24_8) |
2890 S_028C70_NUMBER_TYPE(ntype) |
2891 S_028C70_ENDIAN(endian);
2892 if (iview->image->info.samples > 1)
2893 if (iview->image->fmask.size)
2894 cb->cb_color_info |= S_028C70_COMPRESSION(1);
2895
2896 if (iview->image->cmask.size &&
2897 !(device->debug_flags & RADV_DEBUG_NO_FAST_CLEARS))
2898 cb->cb_color_info |= S_028C70_FAST_CLEAR(1);
2899
2900 if (iview->image->surface.dcc_size && iview->base_mip < surf->num_dcc_levels)
2901 cb->cb_color_info |= S_028C70_DCC_ENABLE(1);
2902
2903 if (device->physical_device->rad_info.chip_class >= VI) {
2904 unsigned max_uncompressed_block_size = 2;
2905 if (iview->image->info.samples > 1) {
2906 if (iview->image->surface.bpe == 1)
2907 max_uncompressed_block_size = 0;
2908 else if (iview->image->surface.bpe == 2)
2909 max_uncompressed_block_size = 1;
2910 }
2911
2912 cb->cb_dcc_control = S_028C78_MAX_UNCOMPRESSED_BLOCK_SIZE(max_uncompressed_block_size) |
2913 S_028C78_INDEPENDENT_64B_BLOCKS(1);
2914 }
2915
2916 /* This must be set for fast clear to work without FMASK. */
2917 if (!iview->image->fmask.size &&
2918 device->physical_device->rad_info.chip_class == SI) {
2919 unsigned bankh = util_logbase2(iview->image->surface.u.legacy.bankh);
2920 cb->cb_color_attrib |= S_028C74_FMASK_BANK_HEIGHT(bankh);
2921 }
2922
2923 if (device->physical_device->rad_info.chip_class >= GFX9) {
2924 uint32_t max_slice = radv_surface_layer_count(iview);
2925 unsigned mip0_depth = iview->base_layer + max_slice - 1;
2926
2927 cb->cb_color_view |= S_028C6C_MIP_LEVEL(iview->base_mip);
2928 cb->cb_color_attrib |= S_028C74_MIP0_DEPTH(mip0_depth) |
2929 S_028C74_RESOURCE_TYPE(iview->image->surface.u.gfx9.resource_type);
2930 cb->cb_color_attrib2 = S_028C68_MIP0_WIDTH(iview->image->info.width - 1) |
2931 S_028C68_MIP0_HEIGHT(iview->image->info.height - 1) |
2932 S_028C68_MAX_MIP(iview->image->info.levels);
2933
2934 cb->gfx9_epitch = S_0287A0_EPITCH(iview->image->surface.u.gfx9.surf.epitch);
2935
2936 }
2937 }
2938
2939 static void
2940 radv_initialise_ds_surface(struct radv_device *device,
2941 struct radv_ds_buffer_info *ds,
2942 struct radv_image_view *iview)
2943 {
2944 unsigned level = iview->base_mip;
2945 unsigned format, stencil_format;
2946 uint64_t va, s_offs, z_offs;
2947 bool stencil_only = false;
2948 memset(ds, 0, sizeof(*ds));
2949 switch (iview->image->vk_format) {
2950 case VK_FORMAT_D24_UNORM_S8_UINT:
2951 case VK_FORMAT_X8_D24_UNORM_PACK32:
2952 ds->pa_su_poly_offset_db_fmt_cntl = S_028B78_POLY_OFFSET_NEG_NUM_DB_BITS(-24);
2953 ds->offset_scale = 2.0f;
2954 break;
2955 case VK_FORMAT_D16_UNORM:
2956 case VK_FORMAT_D16_UNORM_S8_UINT:
2957 ds->pa_su_poly_offset_db_fmt_cntl = S_028B78_POLY_OFFSET_NEG_NUM_DB_BITS(-16);
2958 ds->offset_scale = 4.0f;
2959 break;
2960 case VK_FORMAT_D32_SFLOAT:
2961 case VK_FORMAT_D32_SFLOAT_S8_UINT:
2962 ds->pa_su_poly_offset_db_fmt_cntl = S_028B78_POLY_OFFSET_NEG_NUM_DB_BITS(-23) |
2963 S_028B78_POLY_OFFSET_DB_IS_FLOAT_FMT(1);
2964 ds->offset_scale = 1.0f;
2965 break;
2966 case VK_FORMAT_S8_UINT:
2967 stencil_only = true;
2968 break;
2969 default:
2970 break;
2971 }
2972
2973 format = radv_translate_dbformat(iview->image->vk_format);
2974 stencil_format = iview->image->surface.flags & RADEON_SURF_SBUFFER ?
2975 V_028044_STENCIL_8 : V_028044_STENCIL_INVALID;
2976
2977 uint32_t max_slice = radv_surface_layer_count(iview);
2978 ds->db_depth_view = S_028008_SLICE_START(iview->base_layer) |
2979 S_028008_SLICE_MAX(iview->base_layer + max_slice - 1);
2980
2981 ds->db_htile_data_base = 0;
2982 ds->db_htile_surface = 0;
2983
2984 va = device->ws->buffer_get_va(iview->bo) + iview->image->offset;
2985 s_offs = z_offs = va;
2986
2987 if (device->physical_device->rad_info.chip_class >= GFX9) {
2988 assert(iview->image->surface.u.gfx9.surf_offset == 0);
2989 s_offs += iview->image->surface.u.gfx9.stencil_offset;
2990
2991 ds->db_z_info = S_028038_FORMAT(format) |
2992 S_028038_NUM_SAMPLES(util_logbase2(iview->image->info.samples)) |
2993 S_028038_SW_MODE(iview->image->surface.u.gfx9.surf.swizzle_mode) |
2994 S_028038_MAXMIP(iview->image->info.levels - 1);
2995 ds->db_stencil_info = S_02803C_FORMAT(stencil_format) |
2996 S_02803C_SW_MODE(iview->image->surface.u.gfx9.stencil.swizzle_mode);
2997
2998 ds->db_z_info2 = S_028068_EPITCH(iview->image->surface.u.gfx9.surf.epitch);
2999 ds->db_stencil_info2 = S_02806C_EPITCH(iview->image->surface.u.gfx9.stencil.epitch);
3000 ds->db_depth_view |= S_028008_MIPID(level);
3001
3002 ds->db_depth_size = S_02801C_X_MAX(iview->image->info.width - 1) |
3003 S_02801C_Y_MAX(iview->image->info.height - 1);
3004
3005 /* Only use HTILE for the first level. */
3006 if (iview->image->surface.htile_size && !level) {
3007 ds->db_z_info |= S_028038_TILE_SURFACE_ENABLE(1);
3008
3009 if (!(iview->image->surface.flags & RADEON_SURF_SBUFFER))
3010 /* Use all of the htile_buffer for depth if there's no stencil. */
3011 ds->db_stencil_info |= S_02803C_TILE_STENCIL_DISABLE(1);
3012 va = device->ws->buffer_get_va(iview->bo) + iview->image->offset +
3013 iview->image->htile_offset;
3014 ds->db_htile_data_base = va >> 8;
3015 ds->db_htile_surface = S_028ABC_FULL_CACHE(1) |
3016 S_028ABC_PIPE_ALIGNED(iview->image->surface.u.gfx9.htile.pipe_aligned) |
3017 S_028ABC_RB_ALIGNED(iview->image->surface.u.gfx9.htile.rb_aligned);
3018 }
3019 } else {
3020 const struct legacy_surf_level *level_info = &iview->image->surface.u.legacy.level[level];
3021
3022 if (stencil_only)
3023 level_info = &iview->image->surface.u.legacy.stencil_level[level];
3024
3025 z_offs += iview->image->surface.u.legacy.level[level].offset;
3026 s_offs += iview->image->surface.u.legacy.stencil_level[level].offset;
3027
3028 ds->db_depth_info = S_02803C_ADDR5_SWIZZLE_MASK(1);
3029 ds->db_z_info = S_028040_FORMAT(format) | S_028040_ZRANGE_PRECISION(1);
3030 ds->db_stencil_info = S_028044_FORMAT(stencil_format);
3031
3032 if (iview->image->info.samples > 1)
3033 ds->db_z_info |= S_028040_NUM_SAMPLES(util_logbase2(iview->image->info.samples));
3034
3035 if (device->physical_device->rad_info.chip_class >= CIK) {
3036 struct radeon_info *info = &device->physical_device->rad_info;
3037 unsigned tiling_index = iview->image->surface.u.legacy.tiling_index[level];
3038 unsigned stencil_index = iview->image->surface.u.legacy.stencil_tiling_index[level];
3039 unsigned macro_index = iview->image->surface.u.legacy.macro_tile_index;
3040 unsigned tile_mode = info->si_tile_mode_array[tiling_index];
3041 unsigned stencil_tile_mode = info->si_tile_mode_array[stencil_index];
3042 unsigned macro_mode = info->cik_macrotile_mode_array[macro_index];
3043
3044 if (stencil_only)
3045 tile_mode = stencil_tile_mode;
3046
3047 ds->db_depth_info |=
3048 S_02803C_ARRAY_MODE(G_009910_ARRAY_MODE(tile_mode)) |
3049 S_02803C_PIPE_CONFIG(G_009910_PIPE_CONFIG(tile_mode)) |
3050 S_02803C_BANK_WIDTH(G_009990_BANK_WIDTH(macro_mode)) |
3051 S_02803C_BANK_HEIGHT(G_009990_BANK_HEIGHT(macro_mode)) |
3052 S_02803C_MACRO_TILE_ASPECT(G_009990_MACRO_TILE_ASPECT(macro_mode)) |
3053 S_02803C_NUM_BANKS(G_009990_NUM_BANKS(macro_mode));
3054 ds->db_z_info |= S_028040_TILE_SPLIT(G_009910_TILE_SPLIT(tile_mode));
3055 ds->db_stencil_info |= S_028044_TILE_SPLIT(G_009910_TILE_SPLIT(stencil_tile_mode));
3056 } else {
3057 unsigned tile_mode_index = si_tile_mode_index(iview->image, level, false);
3058 ds->db_z_info |= S_028040_TILE_MODE_INDEX(tile_mode_index);
3059 tile_mode_index = si_tile_mode_index(iview->image, level, true);
3060 ds->db_stencil_info |= S_028044_TILE_MODE_INDEX(tile_mode_index);
3061 }
3062
3063 ds->db_depth_size = S_028058_PITCH_TILE_MAX((level_info->nblk_x / 8) - 1) |
3064 S_028058_HEIGHT_TILE_MAX((level_info->nblk_y / 8) - 1);
3065 ds->db_depth_slice = S_02805C_SLICE_TILE_MAX((level_info->nblk_x * level_info->nblk_y) / 64 - 1);
3066
3067 if (iview->image->surface.htile_size && !level) {
3068 ds->db_z_info |= S_028040_TILE_SURFACE_ENABLE(1);
3069
3070 if (!(iview->image->surface.flags & RADEON_SURF_SBUFFER))
3071 /* Use all of the htile_buffer for depth if there's no stencil. */
3072 ds->db_stencil_info |= S_028044_TILE_STENCIL_DISABLE(1);
3073
3074 va = device->ws->buffer_get_va(iview->bo) + iview->image->offset +
3075 iview->image->htile_offset;
3076 ds->db_htile_data_base = va >> 8;
3077 ds->db_htile_surface = S_028ABC_FULL_CACHE(1);
3078 }
3079 }
3080
3081 ds->db_z_read_base = ds->db_z_write_base = z_offs >> 8;
3082 ds->db_stencil_read_base = ds->db_stencil_write_base = s_offs >> 8;
3083 }
3084
3085 VkResult radv_CreateFramebuffer(
3086 VkDevice _device,
3087 const VkFramebufferCreateInfo* pCreateInfo,
3088 const VkAllocationCallbacks* pAllocator,
3089 VkFramebuffer* pFramebuffer)
3090 {
3091 RADV_FROM_HANDLE(radv_device, device, _device);
3092 struct radv_framebuffer *framebuffer;
3093
3094 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO);
3095
3096 size_t size = sizeof(*framebuffer) +
3097 sizeof(struct radv_attachment_info) * pCreateInfo->attachmentCount;
3098 framebuffer = vk_alloc2(&device->alloc, pAllocator, size, 8,
3099 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
3100 if (framebuffer == NULL)
3101 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
3102
3103 framebuffer->attachment_count = pCreateInfo->attachmentCount;
3104 framebuffer->width = pCreateInfo->width;
3105 framebuffer->height = pCreateInfo->height;
3106 framebuffer->layers = pCreateInfo->layers;
3107 for (uint32_t i = 0; i < pCreateInfo->attachmentCount; i++) {
3108 VkImageView _iview = pCreateInfo->pAttachments[i];
3109 struct radv_image_view *iview = radv_image_view_from_handle(_iview);
3110 framebuffer->attachments[i].attachment = iview;
3111 if (iview->aspect_mask & VK_IMAGE_ASPECT_COLOR_BIT) {
3112 radv_initialise_color_surface(device, &framebuffer->attachments[i].cb, iview);
3113 } else if (iview->aspect_mask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) {
3114 radv_initialise_ds_surface(device, &framebuffer->attachments[i].ds, iview);
3115 }
3116 framebuffer->width = MIN2(framebuffer->width, iview->extent.width);
3117 framebuffer->height = MIN2(framebuffer->height, iview->extent.height);
3118 framebuffer->layers = MIN2(framebuffer->layers, radv_surface_layer_count(iview));
3119 }
3120
3121 *pFramebuffer = radv_framebuffer_to_handle(framebuffer);
3122 return VK_SUCCESS;
3123 }
3124
3125 void radv_DestroyFramebuffer(
3126 VkDevice _device,
3127 VkFramebuffer _fb,
3128 const VkAllocationCallbacks* pAllocator)
3129 {
3130 RADV_FROM_HANDLE(radv_device, device, _device);
3131 RADV_FROM_HANDLE(radv_framebuffer, fb, _fb);
3132
3133 if (!fb)
3134 return;
3135 vk_free2(&device->alloc, pAllocator, fb);
3136 }
3137
3138 static unsigned radv_tex_wrap(VkSamplerAddressMode address_mode)
3139 {
3140 switch (address_mode) {
3141 case VK_SAMPLER_ADDRESS_MODE_REPEAT:
3142 return V_008F30_SQ_TEX_WRAP;
3143 case VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT:
3144 return V_008F30_SQ_TEX_MIRROR;
3145 case VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE:
3146 return V_008F30_SQ_TEX_CLAMP_LAST_TEXEL;
3147 case VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER:
3148 return V_008F30_SQ_TEX_CLAMP_BORDER;
3149 case VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE:
3150 return V_008F30_SQ_TEX_MIRROR_ONCE_LAST_TEXEL;
3151 default:
3152 unreachable("illegal tex wrap mode");
3153 break;
3154 }
3155 }
3156
3157 static unsigned
3158 radv_tex_compare(VkCompareOp op)
3159 {
3160 switch (op) {
3161 case VK_COMPARE_OP_NEVER:
3162 return V_008F30_SQ_TEX_DEPTH_COMPARE_NEVER;
3163 case VK_COMPARE_OP_LESS:
3164 return V_008F30_SQ_TEX_DEPTH_COMPARE_LESS;
3165 case VK_COMPARE_OP_EQUAL:
3166 return V_008F30_SQ_TEX_DEPTH_COMPARE_EQUAL;
3167 case VK_COMPARE_OP_LESS_OR_EQUAL:
3168 return V_008F30_SQ_TEX_DEPTH_COMPARE_LESSEQUAL;
3169 case VK_COMPARE_OP_GREATER:
3170 return V_008F30_SQ_TEX_DEPTH_COMPARE_GREATER;
3171 case VK_COMPARE_OP_NOT_EQUAL:
3172 return V_008F30_SQ_TEX_DEPTH_COMPARE_NOTEQUAL;
3173 case VK_COMPARE_OP_GREATER_OR_EQUAL:
3174 return V_008F30_SQ_TEX_DEPTH_COMPARE_GREATEREQUAL;
3175 case VK_COMPARE_OP_ALWAYS:
3176 return V_008F30_SQ_TEX_DEPTH_COMPARE_ALWAYS;
3177 default:
3178 unreachable("illegal compare mode");
3179 break;
3180 }
3181 }
3182
3183 static unsigned
3184 radv_tex_filter(VkFilter filter, unsigned max_ansio)
3185 {
3186 switch (filter) {
3187 case VK_FILTER_NEAREST:
3188 return (max_ansio > 1 ? V_008F38_SQ_TEX_XY_FILTER_ANISO_POINT :
3189 V_008F38_SQ_TEX_XY_FILTER_POINT);
3190 case VK_FILTER_LINEAR:
3191 return (max_ansio > 1 ? V_008F38_SQ_TEX_XY_FILTER_ANISO_BILINEAR :
3192 V_008F38_SQ_TEX_XY_FILTER_BILINEAR);
3193 case VK_FILTER_CUBIC_IMG:
3194 default:
3195 fprintf(stderr, "illegal texture filter");
3196 return 0;
3197 }
3198 }
3199
3200 static unsigned
3201 radv_tex_mipfilter(VkSamplerMipmapMode mode)
3202 {
3203 switch (mode) {
3204 case VK_SAMPLER_MIPMAP_MODE_NEAREST:
3205 return V_008F38_SQ_TEX_Z_FILTER_POINT;
3206 case VK_SAMPLER_MIPMAP_MODE_LINEAR:
3207 return V_008F38_SQ_TEX_Z_FILTER_LINEAR;
3208 default:
3209 return V_008F38_SQ_TEX_Z_FILTER_NONE;
3210 }
3211 }
3212
3213 static unsigned
3214 radv_tex_bordercolor(VkBorderColor bcolor)
3215 {
3216 switch (bcolor) {
3217 case VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK:
3218 case VK_BORDER_COLOR_INT_TRANSPARENT_BLACK:
3219 return V_008F3C_SQ_TEX_BORDER_COLOR_TRANS_BLACK;
3220 case VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK:
3221 case VK_BORDER_COLOR_INT_OPAQUE_BLACK:
3222 return V_008F3C_SQ_TEX_BORDER_COLOR_OPAQUE_BLACK;
3223 case VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE:
3224 case VK_BORDER_COLOR_INT_OPAQUE_WHITE:
3225 return V_008F3C_SQ_TEX_BORDER_COLOR_OPAQUE_WHITE;
3226 default:
3227 break;
3228 }
3229 return 0;
3230 }
3231
3232 static unsigned
3233 radv_tex_aniso_filter(unsigned filter)
3234 {
3235 if (filter < 2)
3236 return 0;
3237 if (filter < 4)
3238 return 1;
3239 if (filter < 8)
3240 return 2;
3241 if (filter < 16)
3242 return 3;
3243 return 4;
3244 }
3245
3246 static void
3247 radv_init_sampler(struct radv_device *device,
3248 struct radv_sampler *sampler,
3249 const VkSamplerCreateInfo *pCreateInfo)
3250 {
3251 uint32_t max_aniso = pCreateInfo->anisotropyEnable && pCreateInfo->maxAnisotropy > 1.0 ?
3252 (uint32_t) pCreateInfo->maxAnisotropy : 0;
3253 uint32_t max_aniso_ratio = radv_tex_aniso_filter(max_aniso);
3254 bool is_vi = (device->physical_device->rad_info.chip_class >= VI);
3255
3256 sampler->state[0] = (S_008F30_CLAMP_X(radv_tex_wrap(pCreateInfo->addressModeU)) |
3257 S_008F30_CLAMP_Y(radv_tex_wrap(pCreateInfo->addressModeV)) |
3258 S_008F30_CLAMP_Z(radv_tex_wrap(pCreateInfo->addressModeW)) |
3259 S_008F30_MAX_ANISO_RATIO(max_aniso_ratio) |
3260 S_008F30_DEPTH_COMPARE_FUNC(radv_tex_compare(pCreateInfo->compareOp)) |
3261 S_008F30_FORCE_UNNORMALIZED(pCreateInfo->unnormalizedCoordinates ? 1 : 0) |
3262 S_008F30_ANISO_THRESHOLD(max_aniso_ratio >> 1) |
3263 S_008F30_ANISO_BIAS(max_aniso_ratio) |
3264 S_008F30_DISABLE_CUBE_WRAP(0) |
3265 S_008F30_COMPAT_MODE(is_vi));
3266 sampler->state[1] = (S_008F34_MIN_LOD(S_FIXED(CLAMP(pCreateInfo->minLod, 0, 15), 8)) |
3267 S_008F34_MAX_LOD(S_FIXED(CLAMP(pCreateInfo->maxLod, 0, 15), 8)) |
3268 S_008F34_PERF_MIP(max_aniso_ratio ? max_aniso_ratio + 6 : 0));
3269 sampler->state[2] = (S_008F38_LOD_BIAS(S_FIXED(CLAMP(pCreateInfo->mipLodBias, -16, 16), 8)) |
3270 S_008F38_XY_MAG_FILTER(radv_tex_filter(pCreateInfo->magFilter, max_aniso)) |
3271 S_008F38_XY_MIN_FILTER(radv_tex_filter(pCreateInfo->minFilter, max_aniso)) |
3272 S_008F38_MIP_FILTER(radv_tex_mipfilter(pCreateInfo->mipmapMode)) |
3273 S_008F38_MIP_POINT_PRECLAMP(0) |
3274 S_008F38_DISABLE_LSB_CEIL(1) |
3275 S_008F38_FILTER_PREC_FIX(1) |
3276 S_008F38_ANISO_OVERRIDE(is_vi));
3277 sampler->state[3] = (S_008F3C_BORDER_COLOR_PTR(0) |
3278 S_008F3C_BORDER_COLOR_TYPE(radv_tex_bordercolor(pCreateInfo->borderColor)));
3279 }
3280
3281 VkResult radv_CreateSampler(
3282 VkDevice _device,
3283 const VkSamplerCreateInfo* pCreateInfo,
3284 const VkAllocationCallbacks* pAllocator,
3285 VkSampler* pSampler)
3286 {
3287 RADV_FROM_HANDLE(radv_device, device, _device);
3288 struct radv_sampler *sampler;
3289
3290 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO);
3291
3292 sampler = vk_alloc2(&device->alloc, pAllocator, sizeof(*sampler), 8,
3293 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
3294 if (!sampler)
3295 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
3296
3297 radv_init_sampler(device, sampler, pCreateInfo);
3298 *pSampler = radv_sampler_to_handle(sampler);
3299
3300 return VK_SUCCESS;
3301 }
3302
3303 void radv_DestroySampler(
3304 VkDevice _device,
3305 VkSampler _sampler,
3306 const VkAllocationCallbacks* pAllocator)
3307 {
3308 RADV_FROM_HANDLE(radv_device, device, _device);
3309 RADV_FROM_HANDLE(radv_sampler, sampler, _sampler);
3310
3311 if (!sampler)
3312 return;
3313 vk_free2(&device->alloc, pAllocator, sampler);
3314 }
3315
3316 /* vk_icd.h does not declare this function, so we declare it here to
3317 * suppress Wmissing-prototypes.
3318 */
3319 PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
3320 vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion);
3321
3322 PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
3323 vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion)
3324 {
3325 /* For the full details on loader interface versioning, see
3326 * <https://github.com/KhronosGroup/Vulkan-LoaderAndValidationLayers/blob/master/loader/LoaderAndLayerInterface.md>.
3327 * What follows is a condensed summary, to help you navigate the large and
3328 * confusing official doc.
3329 *
3330 * - Loader interface v0 is incompatible with later versions. We don't
3331 * support it.
3332 *
3333 * - In loader interface v1:
3334 * - The first ICD entrypoint called by the loader is
3335 * vk_icdGetInstanceProcAddr(). The ICD must statically expose this
3336 * entrypoint.
3337 * - The ICD must statically expose no other Vulkan symbol unless it is
3338 * linked with -Bsymbolic.
3339 * - Each dispatchable Vulkan handle created by the ICD must be
3340 * a pointer to a struct whose first member is VK_LOADER_DATA. The
3341 * ICD must initialize VK_LOADER_DATA.loadMagic to ICD_LOADER_MAGIC.
3342 * - The loader implements vkCreate{PLATFORM}SurfaceKHR() and
3343 * vkDestroySurfaceKHR(). The ICD must be capable of working with
3344 * such loader-managed surfaces.
3345 *
3346 * - Loader interface v2 differs from v1 in:
3347 * - The first ICD entrypoint called by the loader is
3348 * vk_icdNegotiateLoaderICDInterfaceVersion(). The ICD must
3349 * statically expose this entrypoint.
3350 *
3351 * - Loader interface v3 differs from v2 in:
3352 * - The ICD must implement vkCreate{PLATFORM}SurfaceKHR(),
3353 * vkDestroySurfaceKHR(), and other API which uses VKSurfaceKHR,
3354 * because the loader no longer does so.
3355 */
3356 *pSupportedVersion = MIN2(*pSupportedVersion, 3u);
3357 return VK_SUCCESS;
3358 }
3359
3360 VkResult radv_GetMemoryFdKHR(VkDevice _device,
3361 const VkMemoryGetFdInfoKHR *pGetFdInfo,
3362 int *pFD)
3363 {
3364 RADV_FROM_HANDLE(radv_device, device, _device);
3365 RADV_FROM_HANDLE(radv_device_memory, memory, pGetFdInfo->memory);
3366
3367 assert(pGetFdInfo->sType == VK_STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR);
3368
3369 /* We support only one handle type. */
3370 assert(pGetFdInfo->handleType ==
3371 VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR);
3372
3373 bool ret = radv_get_memory_fd(device, memory, pFD);
3374 if (ret == false)
3375 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
3376 return VK_SUCCESS;
3377 }
3378
3379 VkResult radv_GetMemoryFdPropertiesKHR(VkDevice _device,
3380 VkExternalMemoryHandleTypeFlagBitsKHR handleType,
3381 int fd,
3382 VkMemoryFdPropertiesKHR *pMemoryFdProperties)
3383 {
3384 /* The valid usage section for this function says:
3385 *
3386 * "handleType must not be one of the handle types defined as opaque."
3387 *
3388 * Since we only handle opaque handles for now, there are no FD properties.
3389 */
3390 return VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR;
3391 }