radv: Free temporary syncobj after waiting on it.
[mesa.git] / src / amd / vulkan / radv_device.c
1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 */
27
28 #include <stdbool.h>
29 #include <string.h>
30 #include <unistd.h>
31 #include <fcntl.h>
32 #include "radv_debug.h"
33 #include "radv_private.h"
34 #include "radv_shader.h"
35 #include "radv_cs.h"
36 #include "util/disk_cache.h"
37 #include "util/strtod.h"
38 #include "vk_util.h"
39 #include <xf86drm.h>
40 #include <amdgpu.h>
41 #include <amdgpu_drm.h>
42 #include "winsys/amdgpu/radv_amdgpu_winsys_public.h"
43 #include "ac_llvm_util.h"
44 #include "vk_format.h"
45 #include "sid.h"
46 #include "gfx9d.h"
47 #include "util/debug.h"
48
49 static int
50 radv_device_get_cache_uuid(enum radeon_family family, void *uuid)
51 {
52 uint32_t mesa_timestamp, llvm_timestamp;
53 uint16_t f = family;
54 memset(uuid, 0, VK_UUID_SIZE);
55 if (!disk_cache_get_function_timestamp(radv_device_get_cache_uuid, &mesa_timestamp) ||
56 !disk_cache_get_function_timestamp(LLVMInitializeAMDGPUTargetInfo, &llvm_timestamp))
57 return -1;
58
59 memcpy(uuid, &mesa_timestamp, 4);
60 memcpy((char*)uuid + 4, &llvm_timestamp, 4);
61 memcpy((char*)uuid + 8, &f, 2);
62 snprintf((char*)uuid + 10, VK_UUID_SIZE - 10, "radv");
63 return 0;
64 }
65
66 static void
67 radv_get_driver_uuid(void *uuid)
68 {
69 ac_compute_driver_uuid(uuid, VK_UUID_SIZE);
70 }
71
72 static void
73 radv_get_device_uuid(struct radeon_info *info, void *uuid)
74 {
75 ac_compute_device_uuid(info, uuid, VK_UUID_SIZE);
76 }
77
78 static const char *
79 get_chip_name(enum radeon_family family)
80 {
81 switch (family) {
82 case CHIP_TAHITI: return "AMD RADV TAHITI";
83 case CHIP_PITCAIRN: return "AMD RADV PITCAIRN";
84 case CHIP_VERDE: return "AMD RADV CAPE VERDE";
85 case CHIP_OLAND: return "AMD RADV OLAND";
86 case CHIP_HAINAN: return "AMD RADV HAINAN";
87 case CHIP_BONAIRE: return "AMD RADV BONAIRE";
88 case CHIP_KAVERI: return "AMD RADV KAVERI";
89 case CHIP_KABINI: return "AMD RADV KABINI";
90 case CHIP_HAWAII: return "AMD RADV HAWAII";
91 case CHIP_MULLINS: return "AMD RADV MULLINS";
92 case CHIP_TONGA: return "AMD RADV TONGA";
93 case CHIP_ICELAND: return "AMD RADV ICELAND";
94 case CHIP_CARRIZO: return "AMD RADV CARRIZO";
95 case CHIP_FIJI: return "AMD RADV FIJI";
96 case CHIP_POLARIS10: return "AMD RADV POLARIS10";
97 case CHIP_POLARIS11: return "AMD RADV POLARIS11";
98 case CHIP_POLARIS12: return "AMD RADV POLARIS12";
99 case CHIP_STONEY: return "AMD RADV STONEY";
100 case CHIP_VEGA10: return "AMD RADV VEGA";
101 case CHIP_RAVEN: return "AMD RADV RAVEN";
102 default: return "AMD RADV unknown";
103 }
104 }
105
106 static void
107 radv_physical_device_init_mem_types(struct radv_physical_device *device)
108 {
109 STATIC_ASSERT(RADV_MEM_HEAP_COUNT <= VK_MAX_MEMORY_HEAPS);
110 uint64_t visible_vram_size = MIN2(device->rad_info.vram_size,
111 device->rad_info.vram_vis_size);
112
113 int vram_index = -1, visible_vram_index = -1, gart_index = -1;
114 device->memory_properties.memoryHeapCount = 0;
115 if (device->rad_info.vram_size - visible_vram_size > 0) {
116 vram_index = device->memory_properties.memoryHeapCount++;
117 device->memory_properties.memoryHeaps[vram_index] = (VkMemoryHeap) {
118 .size = device->rad_info.vram_size - visible_vram_size,
119 .flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT,
120 };
121 }
122 if (visible_vram_size) {
123 visible_vram_index = device->memory_properties.memoryHeapCount++;
124 device->memory_properties.memoryHeaps[visible_vram_index] = (VkMemoryHeap) {
125 .size = visible_vram_size,
126 .flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT,
127 };
128 }
129 if (device->rad_info.gart_size > 0) {
130 gart_index = device->memory_properties.memoryHeapCount++;
131 device->memory_properties.memoryHeaps[gart_index] = (VkMemoryHeap) {
132 .size = device->rad_info.gart_size,
133 .flags = 0,
134 };
135 }
136
137 STATIC_ASSERT(RADV_MEM_TYPE_COUNT <= VK_MAX_MEMORY_TYPES);
138 unsigned type_count = 0;
139 if (vram_index >= 0) {
140 device->mem_type_indices[type_count] = RADV_MEM_TYPE_VRAM;
141 device->memory_properties.memoryTypes[type_count++] = (VkMemoryType) {
142 .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT,
143 .heapIndex = vram_index,
144 };
145 }
146 if (gart_index >= 0) {
147 device->mem_type_indices[type_count] = RADV_MEM_TYPE_GTT_WRITE_COMBINE;
148 device->memory_properties.memoryTypes[type_count++] = (VkMemoryType) {
149 .propertyFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
150 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
151 .heapIndex = gart_index,
152 };
153 }
154 if (visible_vram_index >= 0) {
155 device->mem_type_indices[type_count] = RADV_MEM_TYPE_VRAM_CPU_ACCESS;
156 device->memory_properties.memoryTypes[type_count++] = (VkMemoryType) {
157 .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
158 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
159 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
160 .heapIndex = visible_vram_index,
161 };
162 }
163 if (gart_index >= 0) {
164 device->mem_type_indices[type_count] = RADV_MEM_TYPE_GTT_CACHED;
165 device->memory_properties.memoryTypes[type_count++] = (VkMemoryType) {
166 .propertyFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
167 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT |
168 VK_MEMORY_PROPERTY_HOST_CACHED_BIT,
169 .heapIndex = gart_index,
170 };
171 }
172 device->memory_properties.memoryTypeCount = type_count;
173 }
174
175 static VkResult
176 radv_physical_device_init(struct radv_physical_device *device,
177 struct radv_instance *instance,
178 drmDevicePtr drm_device)
179 {
180 const char *path = drm_device->nodes[DRM_NODE_RENDER];
181 VkResult result;
182 drmVersionPtr version;
183 int fd;
184
185 fd = open(path, O_RDWR | O_CLOEXEC);
186 if (fd < 0)
187 return vk_error(VK_ERROR_INCOMPATIBLE_DRIVER);
188
189 version = drmGetVersion(fd);
190 if (!version) {
191 close(fd);
192 return vk_errorf(VK_ERROR_INCOMPATIBLE_DRIVER,
193 "failed to get version %s: %m", path);
194 }
195
196 if (strcmp(version->name, "amdgpu")) {
197 drmFreeVersion(version);
198 close(fd);
199 return vk_error(VK_ERROR_INCOMPATIBLE_DRIVER);
200 }
201 drmFreeVersion(version);
202
203 device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
204 device->instance = instance;
205 assert(strlen(path) < ARRAY_SIZE(device->path));
206 strncpy(device->path, path, ARRAY_SIZE(device->path));
207
208 device->ws = radv_amdgpu_winsys_create(fd, instance->debug_flags,
209 instance->perftest_flags);
210 if (!device->ws) {
211 result = VK_ERROR_INCOMPATIBLE_DRIVER;
212 goto fail;
213 }
214
215 device->local_fd = fd;
216 device->ws->query_info(device->ws, &device->rad_info);
217 result = radv_init_wsi(device);
218 if (result != VK_SUCCESS) {
219 device->ws->destroy(device->ws);
220 goto fail;
221 }
222
223 device->name = get_chip_name(device->rad_info.family);
224
225 if (radv_device_get_cache_uuid(device->rad_info.family, device->cache_uuid)) {
226 radv_finish_wsi(device);
227 device->ws->destroy(device->ws);
228 result = vk_errorf(VK_ERROR_INITIALIZATION_FAILED,
229 "cannot generate UUID");
230 goto fail;
231 }
232
233 /* These flags affect shader compilation. */
234 uint64_t shader_env_flags =
235 (device->instance->perftest_flags & RADV_PERFTEST_SISCHED ? 0x1 : 0) |
236 (device->instance->debug_flags & RADV_DEBUG_UNSAFE_MATH ? 0x2 : 0);
237
238 /* The gpu id is already embeded in the uuid so we just pass "radv"
239 * when creating the cache.
240 */
241 char buf[VK_UUID_SIZE * 2 + 1];
242 disk_cache_format_hex_id(buf, device->cache_uuid, VK_UUID_SIZE * 2);
243 device->disk_cache = disk_cache_create(device->name, buf, shader_env_flags);
244
245 fprintf(stderr, "WARNING: radv is not a conformant vulkan implementation, testing use only.\n");
246
247 radv_get_driver_uuid(&device->device_uuid);
248 radv_get_device_uuid(&device->rad_info, &device->device_uuid);
249
250 if (device->rad_info.family == CHIP_STONEY ||
251 device->rad_info.chip_class >= GFX9) {
252 device->has_rbplus = true;
253 device->rbplus_allowed = device->rad_info.family == CHIP_STONEY;
254 }
255
256 /* The mere presense of CLEAR_STATE in the IB causes random GPU hangs
257 * on SI.
258 */
259 device->has_clear_state = device->rad_info.chip_class >= CIK;
260
261 radv_physical_device_init_mem_types(device);
262 return VK_SUCCESS;
263
264 fail:
265 close(fd);
266 return result;
267 }
268
269 static void
270 radv_physical_device_finish(struct radv_physical_device *device)
271 {
272 radv_finish_wsi(device);
273 device->ws->destroy(device->ws);
274 disk_cache_destroy(device->disk_cache);
275 close(device->local_fd);
276 }
277
278 static void *
279 default_alloc_func(void *pUserData, size_t size, size_t align,
280 VkSystemAllocationScope allocationScope)
281 {
282 return malloc(size);
283 }
284
285 static void *
286 default_realloc_func(void *pUserData, void *pOriginal, size_t size,
287 size_t align, VkSystemAllocationScope allocationScope)
288 {
289 return realloc(pOriginal, size);
290 }
291
292 static void
293 default_free_func(void *pUserData, void *pMemory)
294 {
295 free(pMemory);
296 }
297
298 static const VkAllocationCallbacks default_alloc = {
299 .pUserData = NULL,
300 .pfnAllocation = default_alloc_func,
301 .pfnReallocation = default_realloc_func,
302 .pfnFree = default_free_func,
303 };
304
305 static const struct debug_control radv_debug_options[] = {
306 {"nofastclears", RADV_DEBUG_NO_FAST_CLEARS},
307 {"nodcc", RADV_DEBUG_NO_DCC},
308 {"shaders", RADV_DEBUG_DUMP_SHADERS},
309 {"nocache", RADV_DEBUG_NO_CACHE},
310 {"shaderstats", RADV_DEBUG_DUMP_SHADER_STATS},
311 {"nohiz", RADV_DEBUG_NO_HIZ},
312 {"nocompute", RADV_DEBUG_NO_COMPUTE_QUEUE},
313 {"unsafemath", RADV_DEBUG_UNSAFE_MATH},
314 {"allbos", RADV_DEBUG_ALL_BOS},
315 {"noibs", RADV_DEBUG_NO_IBS},
316 {"spirv", RADV_DEBUG_DUMP_SPIRV},
317 {"vmfaults", RADV_DEBUG_VM_FAULTS},
318 {"zerovram", RADV_DEBUG_ZERO_VRAM},
319 {"syncshaders", RADV_DEBUG_SYNC_SHADERS},
320 {NULL, 0}
321 };
322
323 const char *
324 radv_get_debug_option_name(int id)
325 {
326 assert(id < ARRAY_SIZE(radv_debug_options) - 1);
327 return radv_debug_options[id].string;
328 }
329
330 static const struct debug_control radv_perftest_options[] = {
331 {"nobatchchain", RADV_PERFTEST_NO_BATCHCHAIN},
332 {"sisched", RADV_PERFTEST_SISCHED},
333 {NULL, 0}
334 };
335
336 const char *
337 radv_get_perftest_option_name(int id)
338 {
339 assert(id < ARRAY_SIZE(radv_debug_options) - 1);
340 return radv_perftest_options[id].string;
341 }
342
343 VkResult radv_CreateInstance(
344 const VkInstanceCreateInfo* pCreateInfo,
345 const VkAllocationCallbacks* pAllocator,
346 VkInstance* pInstance)
347 {
348 struct radv_instance *instance;
349
350 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO);
351
352 uint32_t client_version;
353 if (pCreateInfo->pApplicationInfo &&
354 pCreateInfo->pApplicationInfo->apiVersion != 0) {
355 client_version = pCreateInfo->pApplicationInfo->apiVersion;
356 } else {
357 client_version = VK_MAKE_VERSION(1, 0, 0);
358 }
359
360 if (VK_MAKE_VERSION(1, 0, 0) > client_version ||
361 client_version > VK_MAKE_VERSION(1, 0, 0xfff)) {
362 return vk_errorf(VK_ERROR_INCOMPATIBLE_DRIVER,
363 "Client requested version %d.%d.%d",
364 VK_VERSION_MAJOR(client_version),
365 VK_VERSION_MINOR(client_version),
366 VK_VERSION_PATCH(client_version));
367 }
368
369 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
370 const char *ext_name = pCreateInfo->ppEnabledExtensionNames[i];
371 if (!radv_instance_extension_supported(ext_name))
372 return vk_error(VK_ERROR_EXTENSION_NOT_PRESENT);
373 }
374
375 instance = vk_zalloc2(&default_alloc, pAllocator, sizeof(*instance), 8,
376 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
377 if (!instance)
378 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
379
380 instance->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
381
382 if (pAllocator)
383 instance->alloc = *pAllocator;
384 else
385 instance->alloc = default_alloc;
386
387 instance->apiVersion = client_version;
388 instance->physicalDeviceCount = -1;
389
390 _mesa_locale_init();
391
392 VG(VALGRIND_CREATE_MEMPOOL(instance, 0, false));
393
394 instance->debug_flags = parse_debug_string(getenv("RADV_DEBUG"),
395 radv_debug_options);
396
397 instance->perftest_flags = parse_debug_string(getenv("RADV_PERFTEST"),
398 radv_perftest_options);
399
400 *pInstance = radv_instance_to_handle(instance);
401
402 return VK_SUCCESS;
403 }
404
405 void radv_DestroyInstance(
406 VkInstance _instance,
407 const VkAllocationCallbacks* pAllocator)
408 {
409 RADV_FROM_HANDLE(radv_instance, instance, _instance);
410
411 if (!instance)
412 return;
413
414 for (int i = 0; i < instance->physicalDeviceCount; ++i) {
415 radv_physical_device_finish(instance->physicalDevices + i);
416 }
417
418 VG(VALGRIND_DESTROY_MEMPOOL(instance));
419
420 _mesa_locale_fini();
421
422 vk_free(&instance->alloc, instance);
423 }
424
425 static VkResult
426 radv_enumerate_devices(struct radv_instance *instance)
427 {
428 /* TODO: Check for more devices ? */
429 drmDevicePtr devices[8];
430 VkResult result = VK_ERROR_INCOMPATIBLE_DRIVER;
431 int max_devices;
432
433 instance->physicalDeviceCount = 0;
434
435 max_devices = drmGetDevices2(0, devices, ARRAY_SIZE(devices));
436 if (max_devices < 1)
437 return vk_error(VK_ERROR_INCOMPATIBLE_DRIVER);
438
439 for (unsigned i = 0; i < (unsigned)max_devices; i++) {
440 if (devices[i]->available_nodes & 1 << DRM_NODE_RENDER &&
441 devices[i]->bustype == DRM_BUS_PCI &&
442 devices[i]->deviceinfo.pci->vendor_id == ATI_VENDOR_ID) {
443
444 result = radv_physical_device_init(instance->physicalDevices +
445 instance->physicalDeviceCount,
446 instance,
447 devices[i]);
448 if (result == VK_SUCCESS)
449 ++instance->physicalDeviceCount;
450 else if (result != VK_ERROR_INCOMPATIBLE_DRIVER)
451 break;
452 }
453 }
454 drmFreeDevices(devices, max_devices);
455
456 return result;
457 }
458
459 VkResult radv_EnumeratePhysicalDevices(
460 VkInstance _instance,
461 uint32_t* pPhysicalDeviceCount,
462 VkPhysicalDevice* pPhysicalDevices)
463 {
464 RADV_FROM_HANDLE(radv_instance, instance, _instance);
465 VkResult result;
466
467 if (instance->physicalDeviceCount < 0) {
468 result = radv_enumerate_devices(instance);
469 if (result != VK_SUCCESS &&
470 result != VK_ERROR_INCOMPATIBLE_DRIVER)
471 return result;
472 }
473
474 if (!pPhysicalDevices) {
475 *pPhysicalDeviceCount = instance->physicalDeviceCount;
476 } else {
477 *pPhysicalDeviceCount = MIN2(*pPhysicalDeviceCount, instance->physicalDeviceCount);
478 for (unsigned i = 0; i < *pPhysicalDeviceCount; ++i)
479 pPhysicalDevices[i] = radv_physical_device_to_handle(instance->physicalDevices + i);
480 }
481
482 return *pPhysicalDeviceCount < instance->physicalDeviceCount ? VK_INCOMPLETE
483 : VK_SUCCESS;
484 }
485
486 void radv_GetPhysicalDeviceFeatures(
487 VkPhysicalDevice physicalDevice,
488 VkPhysicalDeviceFeatures* pFeatures)
489 {
490 memset(pFeatures, 0, sizeof(*pFeatures));
491
492 *pFeatures = (VkPhysicalDeviceFeatures) {
493 .robustBufferAccess = true,
494 .fullDrawIndexUint32 = true,
495 .imageCubeArray = true,
496 .independentBlend = true,
497 .geometryShader = true,
498 .tessellationShader = true,
499 .sampleRateShading = true,
500 .dualSrcBlend = true,
501 .logicOp = true,
502 .multiDrawIndirect = true,
503 .drawIndirectFirstInstance = true,
504 .depthClamp = true,
505 .depthBiasClamp = true,
506 .fillModeNonSolid = true,
507 .depthBounds = true,
508 .wideLines = true,
509 .largePoints = true,
510 .alphaToOne = true,
511 .multiViewport = true,
512 .samplerAnisotropy = true,
513 .textureCompressionETC2 = false,
514 .textureCompressionASTC_LDR = false,
515 .textureCompressionBC = true,
516 .occlusionQueryPrecise = true,
517 .pipelineStatisticsQuery = true,
518 .vertexPipelineStoresAndAtomics = true,
519 .fragmentStoresAndAtomics = true,
520 .shaderTessellationAndGeometryPointSize = true,
521 .shaderImageGatherExtended = true,
522 .shaderStorageImageExtendedFormats = true,
523 .shaderStorageImageMultisample = false,
524 .shaderUniformBufferArrayDynamicIndexing = true,
525 .shaderSampledImageArrayDynamicIndexing = true,
526 .shaderStorageBufferArrayDynamicIndexing = true,
527 .shaderStorageImageArrayDynamicIndexing = true,
528 .shaderStorageImageReadWithoutFormat = true,
529 .shaderStorageImageWriteWithoutFormat = true,
530 .shaderClipDistance = true,
531 .shaderCullDistance = true,
532 .shaderFloat64 = true,
533 .shaderInt64 = true,
534 .shaderInt16 = false,
535 .sparseBinding = true,
536 .variableMultisampleRate = true,
537 .inheritedQueries = true,
538 };
539 }
540
541 void radv_GetPhysicalDeviceFeatures2KHR(
542 VkPhysicalDevice physicalDevice,
543 VkPhysicalDeviceFeatures2KHR *pFeatures)
544 {
545 vk_foreach_struct(ext, pFeatures->pNext) {
546 switch (ext->sType) {
547 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTER_FEATURES_KHR: {
548 VkPhysicalDeviceVariablePointerFeaturesKHR *features = (void *)ext;
549 features->variablePointersStorageBuffer = true;
550 features->variablePointers = false;
551 break;
552 }
553 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES_KHX: {
554 VkPhysicalDeviceMultiviewFeaturesKHX *features = (VkPhysicalDeviceMultiviewFeaturesKHX*)ext;
555 features->multiview = true;
556 features->multiviewGeometryShader = true;
557 features->multiviewTessellationShader = true;
558 break;
559 }
560 default:
561 break;
562 }
563 }
564 return radv_GetPhysicalDeviceFeatures(physicalDevice, &pFeatures->features);
565 }
566
567 void radv_GetPhysicalDeviceProperties(
568 VkPhysicalDevice physicalDevice,
569 VkPhysicalDeviceProperties* pProperties)
570 {
571 RADV_FROM_HANDLE(radv_physical_device, pdevice, physicalDevice);
572 VkSampleCountFlags sample_counts = 0xf;
573
574 /* make sure that the entire descriptor set is addressable with a signed
575 * 32-bit int. So the sum of all limits scaled by descriptor size has to
576 * be at most 2 GiB. the combined image & samples object count as one of
577 * both. This limit is for the pipeline layout, not for the set layout, but
578 * there is no set limit, so we just set a pipeline limit. I don't think
579 * any app is going to hit this soon. */
580 size_t max_descriptor_set_size = ((1ull << 31) - 16 * MAX_DYNAMIC_BUFFERS) /
581 (32 /* uniform buffer, 32 due to potential space wasted on alignement */ +
582 32 /* storage buffer, 32 due to potential space wasted on alignement */ +
583 32 /* sampler, largest when combined with image */ +
584 64 /* sampled image */ +
585 64 /* storage image */);
586
587 VkPhysicalDeviceLimits limits = {
588 .maxImageDimension1D = (1 << 14),
589 .maxImageDimension2D = (1 << 14),
590 .maxImageDimension3D = (1 << 11),
591 .maxImageDimensionCube = (1 << 14),
592 .maxImageArrayLayers = (1 << 11),
593 .maxTexelBufferElements = 128 * 1024 * 1024,
594 .maxUniformBufferRange = UINT32_MAX,
595 .maxStorageBufferRange = UINT32_MAX,
596 .maxPushConstantsSize = MAX_PUSH_CONSTANTS_SIZE,
597 .maxMemoryAllocationCount = UINT32_MAX,
598 .maxSamplerAllocationCount = 64 * 1024,
599 .bufferImageGranularity = 64, /* A cache line */
600 .sparseAddressSpaceSize = 0xffffffffu, /* buffer max size */
601 .maxBoundDescriptorSets = MAX_SETS,
602 .maxPerStageDescriptorSamplers = max_descriptor_set_size,
603 .maxPerStageDescriptorUniformBuffers = max_descriptor_set_size,
604 .maxPerStageDescriptorStorageBuffers = max_descriptor_set_size,
605 .maxPerStageDescriptorSampledImages = max_descriptor_set_size,
606 .maxPerStageDescriptorStorageImages = max_descriptor_set_size,
607 .maxPerStageDescriptorInputAttachments = max_descriptor_set_size,
608 .maxPerStageResources = max_descriptor_set_size,
609 .maxDescriptorSetSamplers = max_descriptor_set_size,
610 .maxDescriptorSetUniformBuffers = max_descriptor_set_size,
611 .maxDescriptorSetUniformBuffersDynamic = MAX_DYNAMIC_BUFFERS / 2,
612 .maxDescriptorSetStorageBuffers = max_descriptor_set_size,
613 .maxDescriptorSetStorageBuffersDynamic = MAX_DYNAMIC_BUFFERS / 2,
614 .maxDescriptorSetSampledImages = max_descriptor_set_size,
615 .maxDescriptorSetStorageImages = max_descriptor_set_size,
616 .maxDescriptorSetInputAttachments = max_descriptor_set_size,
617 .maxVertexInputAttributes = 32,
618 .maxVertexInputBindings = 32,
619 .maxVertexInputAttributeOffset = 2047,
620 .maxVertexInputBindingStride = 2048,
621 .maxVertexOutputComponents = 128,
622 .maxTessellationGenerationLevel = 64,
623 .maxTessellationPatchSize = 32,
624 .maxTessellationControlPerVertexInputComponents = 128,
625 .maxTessellationControlPerVertexOutputComponents = 128,
626 .maxTessellationControlPerPatchOutputComponents = 120,
627 .maxTessellationControlTotalOutputComponents = 4096,
628 .maxTessellationEvaluationInputComponents = 128,
629 .maxTessellationEvaluationOutputComponents = 128,
630 .maxGeometryShaderInvocations = 127,
631 .maxGeometryInputComponents = 64,
632 .maxGeometryOutputComponents = 128,
633 .maxGeometryOutputVertices = 256,
634 .maxGeometryTotalOutputComponents = 1024,
635 .maxFragmentInputComponents = 128,
636 .maxFragmentOutputAttachments = 8,
637 .maxFragmentDualSrcAttachments = 1,
638 .maxFragmentCombinedOutputResources = 8,
639 .maxComputeSharedMemorySize = 32768,
640 .maxComputeWorkGroupCount = { 65535, 65535, 65535 },
641 .maxComputeWorkGroupInvocations = 2048,
642 .maxComputeWorkGroupSize = {
643 2048,
644 2048,
645 2048
646 },
647 .subPixelPrecisionBits = 4 /* FIXME */,
648 .subTexelPrecisionBits = 4 /* FIXME */,
649 .mipmapPrecisionBits = 4 /* FIXME */,
650 .maxDrawIndexedIndexValue = UINT32_MAX,
651 .maxDrawIndirectCount = UINT32_MAX,
652 .maxSamplerLodBias = 16,
653 .maxSamplerAnisotropy = 16,
654 .maxViewports = MAX_VIEWPORTS,
655 .maxViewportDimensions = { (1 << 14), (1 << 14) },
656 .viewportBoundsRange = { INT16_MIN, INT16_MAX },
657 .viewportSubPixelBits = 13, /* We take a float? */
658 .minMemoryMapAlignment = 4096, /* A page */
659 .minTexelBufferOffsetAlignment = 1,
660 .minUniformBufferOffsetAlignment = 4,
661 .minStorageBufferOffsetAlignment = 4,
662 .minTexelOffset = -32,
663 .maxTexelOffset = 31,
664 .minTexelGatherOffset = -32,
665 .maxTexelGatherOffset = 31,
666 .minInterpolationOffset = -2,
667 .maxInterpolationOffset = 2,
668 .subPixelInterpolationOffsetBits = 8,
669 .maxFramebufferWidth = (1 << 14),
670 .maxFramebufferHeight = (1 << 14),
671 .maxFramebufferLayers = (1 << 10),
672 .framebufferColorSampleCounts = sample_counts,
673 .framebufferDepthSampleCounts = sample_counts,
674 .framebufferStencilSampleCounts = sample_counts,
675 .framebufferNoAttachmentsSampleCounts = sample_counts,
676 .maxColorAttachments = MAX_RTS,
677 .sampledImageColorSampleCounts = sample_counts,
678 .sampledImageIntegerSampleCounts = VK_SAMPLE_COUNT_1_BIT,
679 .sampledImageDepthSampleCounts = sample_counts,
680 .sampledImageStencilSampleCounts = sample_counts,
681 .storageImageSampleCounts = VK_SAMPLE_COUNT_1_BIT,
682 .maxSampleMaskWords = 1,
683 .timestampComputeAndGraphics = true,
684 .timestampPeriod = 1000000.0 / pdevice->rad_info.clock_crystal_freq,
685 .maxClipDistances = 8,
686 .maxCullDistances = 8,
687 .maxCombinedClipAndCullDistances = 8,
688 .discreteQueuePriorities = 1,
689 .pointSizeRange = { 0.125, 255.875 },
690 .lineWidthRange = { 0.0, 7.9921875 },
691 .pointSizeGranularity = (1.0 / 8.0),
692 .lineWidthGranularity = (1.0 / 128.0),
693 .strictLines = false, /* FINISHME */
694 .standardSampleLocations = true,
695 .optimalBufferCopyOffsetAlignment = 128,
696 .optimalBufferCopyRowPitchAlignment = 128,
697 .nonCoherentAtomSize = 64,
698 };
699
700 *pProperties = (VkPhysicalDeviceProperties) {
701 .apiVersion = radv_physical_device_api_version(pdevice),
702 .driverVersion = vk_get_driver_version(),
703 .vendorID = ATI_VENDOR_ID,
704 .deviceID = pdevice->rad_info.pci_id,
705 .deviceType = pdevice->rad_info.has_dedicated_vram ? VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU : VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU,
706 .limits = limits,
707 .sparseProperties = {0},
708 };
709
710 strcpy(pProperties->deviceName, pdevice->name);
711 memcpy(pProperties->pipelineCacheUUID, pdevice->cache_uuid, VK_UUID_SIZE);
712 }
713
714 void radv_GetPhysicalDeviceProperties2KHR(
715 VkPhysicalDevice physicalDevice,
716 VkPhysicalDeviceProperties2KHR *pProperties)
717 {
718 RADV_FROM_HANDLE(radv_physical_device, pdevice, physicalDevice);
719 radv_GetPhysicalDeviceProperties(physicalDevice, &pProperties->properties);
720
721 vk_foreach_struct(ext, pProperties->pNext) {
722 switch (ext->sType) {
723 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR: {
724 VkPhysicalDevicePushDescriptorPropertiesKHR *properties =
725 (VkPhysicalDevicePushDescriptorPropertiesKHR *) ext;
726 properties->maxPushDescriptors = MAX_PUSH_DESCRIPTORS;
727 break;
728 }
729 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES_KHR: {
730 VkPhysicalDeviceIDPropertiesKHR *properties = (VkPhysicalDeviceIDPropertiesKHR*)ext;
731 memcpy(properties->driverUUID, pdevice->driver_uuid, VK_UUID_SIZE);
732 memcpy(properties->deviceUUID, pdevice->device_uuid, VK_UUID_SIZE);
733 properties->deviceLUIDValid = false;
734 break;
735 }
736 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES_KHX: {
737 VkPhysicalDeviceMultiviewPropertiesKHX *properties = (VkPhysicalDeviceMultiviewPropertiesKHX*)ext;
738 properties->maxMultiviewViewCount = MAX_VIEWS;
739 properties->maxMultiviewInstanceIndex = INT_MAX;
740 break;
741 }
742 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES_KHR: {
743 VkPhysicalDevicePointClippingPropertiesKHR *properties =
744 (VkPhysicalDevicePointClippingPropertiesKHR*)ext;
745 properties->pointClippingBehavior = VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES_KHR;
746 break;
747 }
748 default:
749 break;
750 }
751 }
752 }
753
754 static void radv_get_physical_device_queue_family_properties(
755 struct radv_physical_device* pdevice,
756 uint32_t* pCount,
757 VkQueueFamilyProperties** pQueueFamilyProperties)
758 {
759 int num_queue_families = 1;
760 int idx;
761 if (pdevice->rad_info.num_compute_rings > 0 &&
762 pdevice->rad_info.chip_class >= CIK &&
763 !(pdevice->instance->debug_flags & RADV_DEBUG_NO_COMPUTE_QUEUE))
764 num_queue_families++;
765
766 if (pQueueFamilyProperties == NULL) {
767 *pCount = num_queue_families;
768 return;
769 }
770
771 if (!*pCount)
772 return;
773
774 idx = 0;
775 if (*pCount >= 1) {
776 *pQueueFamilyProperties[idx] = (VkQueueFamilyProperties) {
777 .queueFlags = VK_QUEUE_GRAPHICS_BIT |
778 VK_QUEUE_COMPUTE_BIT |
779 VK_QUEUE_TRANSFER_BIT |
780 VK_QUEUE_SPARSE_BINDING_BIT,
781 .queueCount = 1,
782 .timestampValidBits = 64,
783 .minImageTransferGranularity = (VkExtent3D) { 1, 1, 1 },
784 };
785 idx++;
786 }
787
788 if (pdevice->rad_info.num_compute_rings > 0 &&
789 pdevice->rad_info.chip_class >= CIK &&
790 !(pdevice->instance->debug_flags & RADV_DEBUG_NO_COMPUTE_QUEUE)) {
791 if (*pCount > idx) {
792 *pQueueFamilyProperties[idx] = (VkQueueFamilyProperties) {
793 .queueFlags = VK_QUEUE_COMPUTE_BIT |
794 VK_QUEUE_TRANSFER_BIT |
795 VK_QUEUE_SPARSE_BINDING_BIT,
796 .queueCount = pdevice->rad_info.num_compute_rings,
797 .timestampValidBits = 64,
798 .minImageTransferGranularity = (VkExtent3D) { 1, 1, 1 },
799 };
800 idx++;
801 }
802 }
803 *pCount = idx;
804 }
805
806 void radv_GetPhysicalDeviceQueueFamilyProperties(
807 VkPhysicalDevice physicalDevice,
808 uint32_t* pCount,
809 VkQueueFamilyProperties* pQueueFamilyProperties)
810 {
811 RADV_FROM_HANDLE(radv_physical_device, pdevice, physicalDevice);
812 if (!pQueueFamilyProperties) {
813 return radv_get_physical_device_queue_family_properties(pdevice, pCount, NULL);
814 return;
815 }
816 VkQueueFamilyProperties *properties[] = {
817 pQueueFamilyProperties + 0,
818 pQueueFamilyProperties + 1,
819 pQueueFamilyProperties + 2,
820 };
821 radv_get_physical_device_queue_family_properties(pdevice, pCount, properties);
822 assert(*pCount <= 3);
823 }
824
825 void radv_GetPhysicalDeviceQueueFamilyProperties2KHR(
826 VkPhysicalDevice physicalDevice,
827 uint32_t* pCount,
828 VkQueueFamilyProperties2KHR *pQueueFamilyProperties)
829 {
830 RADV_FROM_HANDLE(radv_physical_device, pdevice, physicalDevice);
831 if (!pQueueFamilyProperties) {
832 return radv_get_physical_device_queue_family_properties(pdevice, pCount, NULL);
833 return;
834 }
835 VkQueueFamilyProperties *properties[] = {
836 &pQueueFamilyProperties[0].queueFamilyProperties,
837 &pQueueFamilyProperties[1].queueFamilyProperties,
838 &pQueueFamilyProperties[2].queueFamilyProperties,
839 };
840 radv_get_physical_device_queue_family_properties(pdevice, pCount, properties);
841 assert(*pCount <= 3);
842 }
843
844 void radv_GetPhysicalDeviceMemoryProperties(
845 VkPhysicalDevice physicalDevice,
846 VkPhysicalDeviceMemoryProperties *pMemoryProperties)
847 {
848 RADV_FROM_HANDLE(radv_physical_device, physical_device, physicalDevice);
849
850 *pMemoryProperties = physical_device->memory_properties;
851 }
852
853 void radv_GetPhysicalDeviceMemoryProperties2KHR(
854 VkPhysicalDevice physicalDevice,
855 VkPhysicalDeviceMemoryProperties2KHR *pMemoryProperties)
856 {
857 return radv_GetPhysicalDeviceMemoryProperties(physicalDevice,
858 &pMemoryProperties->memoryProperties);
859 }
860
861 static enum radeon_ctx_priority
862 radv_get_queue_global_priority(const VkDeviceQueueGlobalPriorityCreateInfoEXT *pObj)
863 {
864 /* Default to MEDIUM when a specific global priority isn't requested */
865 if (!pObj)
866 return RADEON_CTX_PRIORITY_MEDIUM;
867
868 switch(pObj->globalPriority) {
869 case VK_QUEUE_GLOBAL_PRIORITY_REALTIME:
870 return RADEON_CTX_PRIORITY_REALTIME;
871 case VK_QUEUE_GLOBAL_PRIORITY_HIGH:
872 return RADEON_CTX_PRIORITY_HIGH;
873 case VK_QUEUE_GLOBAL_PRIORITY_MEDIUM:
874 return RADEON_CTX_PRIORITY_MEDIUM;
875 case VK_QUEUE_GLOBAL_PRIORITY_LOW:
876 return RADEON_CTX_PRIORITY_LOW;
877 default:
878 unreachable("Illegal global priority value");
879 return RADEON_CTX_PRIORITY_INVALID;
880 }
881 }
882
883 static int
884 radv_queue_init(struct radv_device *device, struct radv_queue *queue,
885 int queue_family_index, int idx,
886 const VkDeviceQueueGlobalPriorityCreateInfoEXT *global_priority)
887 {
888 queue->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
889 queue->device = device;
890 queue->queue_family_index = queue_family_index;
891 queue->queue_idx = idx;
892 queue->priority = radv_get_queue_global_priority(global_priority);
893
894 queue->hw_ctx = device->ws->ctx_create(device->ws, queue->priority);
895 if (!queue->hw_ctx)
896 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
897
898 return VK_SUCCESS;
899 }
900
901 static void
902 radv_queue_finish(struct radv_queue *queue)
903 {
904 if (queue->hw_ctx)
905 queue->device->ws->ctx_destroy(queue->hw_ctx);
906
907 if (queue->initial_full_flush_preamble_cs)
908 queue->device->ws->cs_destroy(queue->initial_full_flush_preamble_cs);
909 if (queue->initial_preamble_cs)
910 queue->device->ws->cs_destroy(queue->initial_preamble_cs);
911 if (queue->continue_preamble_cs)
912 queue->device->ws->cs_destroy(queue->continue_preamble_cs);
913 if (queue->descriptor_bo)
914 queue->device->ws->buffer_destroy(queue->descriptor_bo);
915 if (queue->scratch_bo)
916 queue->device->ws->buffer_destroy(queue->scratch_bo);
917 if (queue->esgs_ring_bo)
918 queue->device->ws->buffer_destroy(queue->esgs_ring_bo);
919 if (queue->gsvs_ring_bo)
920 queue->device->ws->buffer_destroy(queue->gsvs_ring_bo);
921 if (queue->tess_factor_ring_bo)
922 queue->device->ws->buffer_destroy(queue->tess_factor_ring_bo);
923 if (queue->tess_offchip_ring_bo)
924 queue->device->ws->buffer_destroy(queue->tess_offchip_ring_bo);
925 if (queue->compute_scratch_bo)
926 queue->device->ws->buffer_destroy(queue->compute_scratch_bo);
927 }
928
929 static void
930 radv_device_init_gs_info(struct radv_device *device)
931 {
932 switch (device->physical_device->rad_info.family) {
933 case CHIP_OLAND:
934 case CHIP_HAINAN:
935 case CHIP_KAVERI:
936 case CHIP_KABINI:
937 case CHIP_MULLINS:
938 case CHIP_ICELAND:
939 case CHIP_CARRIZO:
940 case CHIP_STONEY:
941 device->gs_table_depth = 16;
942 return;
943 case CHIP_TAHITI:
944 case CHIP_PITCAIRN:
945 case CHIP_VERDE:
946 case CHIP_BONAIRE:
947 case CHIP_HAWAII:
948 case CHIP_TONGA:
949 case CHIP_FIJI:
950 case CHIP_POLARIS10:
951 case CHIP_POLARIS11:
952 case CHIP_POLARIS12:
953 case CHIP_VEGA10:
954 case CHIP_RAVEN:
955 device->gs_table_depth = 32;
956 return;
957 default:
958 unreachable("unknown GPU");
959 }
960 }
961
962 VkResult radv_CreateDevice(
963 VkPhysicalDevice physicalDevice,
964 const VkDeviceCreateInfo* pCreateInfo,
965 const VkAllocationCallbacks* pAllocator,
966 VkDevice* pDevice)
967 {
968 RADV_FROM_HANDLE(radv_physical_device, physical_device, physicalDevice);
969 VkResult result;
970 struct radv_device *device;
971
972 bool keep_shader_info = false;
973
974 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
975 const char *ext_name = pCreateInfo->ppEnabledExtensionNames[i];
976 if (!radv_physical_device_extension_supported(physical_device, ext_name))
977 return vk_error(VK_ERROR_EXTENSION_NOT_PRESENT);
978
979 if (strcmp(ext_name, VK_AMD_SHADER_INFO_EXTENSION_NAME) == 0)
980 keep_shader_info = true;
981 }
982
983 /* Check enabled features */
984 if (pCreateInfo->pEnabledFeatures) {
985 VkPhysicalDeviceFeatures supported_features;
986 radv_GetPhysicalDeviceFeatures(physicalDevice, &supported_features);
987 VkBool32 *supported_feature = (VkBool32 *)&supported_features;
988 VkBool32 *enabled_feature = (VkBool32 *)pCreateInfo->pEnabledFeatures;
989 unsigned num_features = sizeof(VkPhysicalDeviceFeatures) / sizeof(VkBool32);
990 for (uint32_t i = 0; i < num_features; i++) {
991 if (enabled_feature[i] && !supported_feature[i])
992 return vk_error(VK_ERROR_FEATURE_NOT_PRESENT);
993 }
994 }
995
996 device = vk_zalloc2(&physical_device->instance->alloc, pAllocator,
997 sizeof(*device), 8,
998 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
999 if (!device)
1000 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1001
1002 device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
1003 device->instance = physical_device->instance;
1004 device->physical_device = physical_device;
1005
1006 device->ws = physical_device->ws;
1007 if (pAllocator)
1008 device->alloc = *pAllocator;
1009 else
1010 device->alloc = physical_device->instance->alloc;
1011
1012 mtx_init(&device->shader_slab_mutex, mtx_plain);
1013 list_inithead(&device->shader_slabs);
1014
1015 for (unsigned i = 0; i < pCreateInfo->queueCreateInfoCount; i++) {
1016 const VkDeviceQueueCreateInfo *queue_create = &pCreateInfo->pQueueCreateInfos[i];
1017 uint32_t qfi = queue_create->queueFamilyIndex;
1018 const VkDeviceQueueGlobalPriorityCreateInfoEXT *global_priority =
1019 vk_find_struct_const(queue_create->pNext, DEVICE_QUEUE_GLOBAL_PRIORITY_CREATE_INFO_EXT);
1020
1021 assert(!global_priority || device->physical_device->rad_info.has_ctx_priority);
1022
1023 device->queues[qfi] = vk_alloc(&device->alloc,
1024 queue_create->queueCount * sizeof(struct radv_queue), 8, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
1025 if (!device->queues[qfi]) {
1026 result = VK_ERROR_OUT_OF_HOST_MEMORY;
1027 goto fail;
1028 }
1029
1030 memset(device->queues[qfi], 0, queue_create->queueCount * sizeof(struct radv_queue));
1031
1032 device->queue_count[qfi] = queue_create->queueCount;
1033
1034 for (unsigned q = 0; q < queue_create->queueCount; q++) {
1035 result = radv_queue_init(device, &device->queues[qfi][q], qfi, q, global_priority);
1036 if (result != VK_SUCCESS)
1037 goto fail;
1038 }
1039 }
1040
1041 #if HAVE_LLVM < 0x0400
1042 device->llvm_supports_spill = false;
1043 #else
1044 device->llvm_supports_spill = true;
1045 #endif
1046
1047 /* The maximum number of scratch waves. Scratch space isn't divided
1048 * evenly between CUs. The number is only a function of the number of CUs.
1049 * We can decrease the constant to decrease the scratch buffer size.
1050 *
1051 * sctx->scratch_waves must be >= the maximum posible size of
1052 * 1 threadgroup, so that the hw doesn't hang from being unable
1053 * to start any.
1054 *
1055 * The recommended value is 4 per CU at most. Higher numbers don't
1056 * bring much benefit, but they still occupy chip resources (think
1057 * async compute). I've seen ~2% performance difference between 4 and 32.
1058 */
1059 uint32_t max_threads_per_block = 2048;
1060 device->scratch_waves = MAX2(32 * physical_device->rad_info.num_good_compute_units,
1061 max_threads_per_block / 64);
1062
1063 radv_device_init_gs_info(device);
1064
1065 device->tess_offchip_block_dw_size =
1066 device->physical_device->rad_info.family == CHIP_HAWAII ? 4096 : 8192;
1067 device->has_distributed_tess =
1068 device->physical_device->rad_info.chip_class >= VI &&
1069 device->physical_device->rad_info.max_se >= 2;
1070
1071 if (getenv("RADV_TRACE_FILE")) {
1072 keep_shader_info = true;
1073
1074 if (!radv_init_trace(device))
1075 goto fail;
1076 }
1077
1078 device->keep_shader_info = keep_shader_info;
1079
1080 result = radv_device_init_meta(device);
1081 if (result != VK_SUCCESS)
1082 goto fail;
1083
1084 radv_device_init_msaa(device);
1085
1086 for (int family = 0; family < RADV_MAX_QUEUE_FAMILIES; ++family) {
1087 device->empty_cs[family] = device->ws->cs_create(device->ws, family);
1088 switch (family) {
1089 case RADV_QUEUE_GENERAL:
1090 radeon_emit(device->empty_cs[family], PKT3(PKT3_CONTEXT_CONTROL, 1, 0));
1091 radeon_emit(device->empty_cs[family], CONTEXT_CONTROL_LOAD_ENABLE(1));
1092 radeon_emit(device->empty_cs[family], CONTEXT_CONTROL_SHADOW_ENABLE(1));
1093 break;
1094 case RADV_QUEUE_COMPUTE:
1095 radeon_emit(device->empty_cs[family], PKT3(PKT3_NOP, 0, 0));
1096 radeon_emit(device->empty_cs[family], 0);
1097 break;
1098 }
1099 device->ws->cs_finalize(device->empty_cs[family]);
1100 }
1101
1102 if (device->physical_device->rad_info.chip_class >= CIK)
1103 cik_create_gfx_config(device);
1104
1105 VkPipelineCacheCreateInfo ci;
1106 ci.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
1107 ci.pNext = NULL;
1108 ci.flags = 0;
1109 ci.pInitialData = NULL;
1110 ci.initialDataSize = 0;
1111 VkPipelineCache pc;
1112 result = radv_CreatePipelineCache(radv_device_to_handle(device),
1113 &ci, NULL, &pc);
1114 if (result != VK_SUCCESS)
1115 goto fail;
1116
1117 device->mem_cache = radv_pipeline_cache_from_handle(pc);
1118
1119 *pDevice = radv_device_to_handle(device);
1120 return VK_SUCCESS;
1121
1122 fail:
1123 if (device->trace_bo)
1124 device->ws->buffer_destroy(device->trace_bo);
1125
1126 if (device->gfx_init)
1127 device->ws->buffer_destroy(device->gfx_init);
1128
1129 for (unsigned i = 0; i < RADV_MAX_QUEUE_FAMILIES; i++) {
1130 for (unsigned q = 0; q < device->queue_count[i]; q++)
1131 radv_queue_finish(&device->queues[i][q]);
1132 if (device->queue_count[i])
1133 vk_free(&device->alloc, device->queues[i]);
1134 }
1135
1136 vk_free(&device->alloc, device);
1137 return result;
1138 }
1139
1140 void radv_DestroyDevice(
1141 VkDevice _device,
1142 const VkAllocationCallbacks* pAllocator)
1143 {
1144 RADV_FROM_HANDLE(radv_device, device, _device);
1145
1146 if (!device)
1147 return;
1148
1149 if (device->trace_bo)
1150 device->ws->buffer_destroy(device->trace_bo);
1151
1152 if (device->gfx_init)
1153 device->ws->buffer_destroy(device->gfx_init);
1154
1155 for (unsigned i = 0; i < RADV_MAX_QUEUE_FAMILIES; i++) {
1156 for (unsigned q = 0; q < device->queue_count[i]; q++)
1157 radv_queue_finish(&device->queues[i][q]);
1158 if (device->queue_count[i])
1159 vk_free(&device->alloc, device->queues[i]);
1160 if (device->empty_cs[i])
1161 device->ws->cs_destroy(device->empty_cs[i]);
1162 }
1163 radv_device_finish_meta(device);
1164
1165 VkPipelineCache pc = radv_pipeline_cache_to_handle(device->mem_cache);
1166 radv_DestroyPipelineCache(radv_device_to_handle(device), pc, NULL);
1167
1168 radv_destroy_shader_slabs(device);
1169
1170 vk_free(&device->alloc, device);
1171 }
1172
1173 VkResult radv_EnumerateInstanceLayerProperties(
1174 uint32_t* pPropertyCount,
1175 VkLayerProperties* pProperties)
1176 {
1177 if (pProperties == NULL) {
1178 *pPropertyCount = 0;
1179 return VK_SUCCESS;
1180 }
1181
1182 /* None supported at this time */
1183 return vk_error(VK_ERROR_LAYER_NOT_PRESENT);
1184 }
1185
1186 VkResult radv_EnumerateDeviceLayerProperties(
1187 VkPhysicalDevice physicalDevice,
1188 uint32_t* pPropertyCount,
1189 VkLayerProperties* pProperties)
1190 {
1191 if (pProperties == NULL) {
1192 *pPropertyCount = 0;
1193 return VK_SUCCESS;
1194 }
1195
1196 /* None supported at this time */
1197 return vk_error(VK_ERROR_LAYER_NOT_PRESENT);
1198 }
1199
1200 void radv_GetDeviceQueue(
1201 VkDevice _device,
1202 uint32_t queueFamilyIndex,
1203 uint32_t queueIndex,
1204 VkQueue* pQueue)
1205 {
1206 RADV_FROM_HANDLE(radv_device, device, _device);
1207
1208 *pQueue = radv_queue_to_handle(&device->queues[queueFamilyIndex][queueIndex]);
1209 }
1210
1211 static void
1212 fill_geom_tess_rings(struct radv_queue *queue,
1213 uint32_t *map,
1214 bool add_sample_positions,
1215 uint32_t esgs_ring_size,
1216 struct radeon_winsys_bo *esgs_ring_bo,
1217 uint32_t gsvs_ring_size,
1218 struct radeon_winsys_bo *gsvs_ring_bo,
1219 uint32_t tess_factor_ring_size,
1220 struct radeon_winsys_bo *tess_factor_ring_bo,
1221 uint32_t tess_offchip_ring_size,
1222 struct radeon_winsys_bo *tess_offchip_ring_bo)
1223 {
1224 uint64_t esgs_va = 0, gsvs_va = 0;
1225 uint64_t tess_factor_va = 0, tess_offchip_va = 0;
1226 uint32_t *desc = &map[4];
1227
1228 if (esgs_ring_bo)
1229 esgs_va = radv_buffer_get_va(esgs_ring_bo);
1230 if (gsvs_ring_bo)
1231 gsvs_va = radv_buffer_get_va(gsvs_ring_bo);
1232 if (tess_factor_ring_bo)
1233 tess_factor_va = radv_buffer_get_va(tess_factor_ring_bo);
1234 if (tess_offchip_ring_bo)
1235 tess_offchip_va = radv_buffer_get_va(tess_offchip_ring_bo);
1236
1237 /* stride 0, num records - size, add tid, swizzle, elsize4,
1238 index stride 64 */
1239 desc[0] = esgs_va;
1240 desc[1] = S_008F04_BASE_ADDRESS_HI(esgs_va >> 32) |
1241 S_008F04_STRIDE(0) |
1242 S_008F04_SWIZZLE_ENABLE(true);
1243 desc[2] = esgs_ring_size;
1244 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
1245 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
1246 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
1247 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
1248 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
1249 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32) |
1250 S_008F0C_ELEMENT_SIZE(1) |
1251 S_008F0C_INDEX_STRIDE(3) |
1252 S_008F0C_ADD_TID_ENABLE(true);
1253
1254 desc += 4;
1255 /* GS entry for ES->GS ring */
1256 /* stride 0, num records - size, elsize0,
1257 index stride 0 */
1258 desc[0] = esgs_va;
1259 desc[1] = S_008F04_BASE_ADDRESS_HI(esgs_va >> 32)|
1260 S_008F04_STRIDE(0) |
1261 S_008F04_SWIZZLE_ENABLE(false);
1262 desc[2] = esgs_ring_size;
1263 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
1264 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
1265 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
1266 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
1267 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
1268 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32) |
1269 S_008F0C_ELEMENT_SIZE(0) |
1270 S_008F0C_INDEX_STRIDE(0) |
1271 S_008F0C_ADD_TID_ENABLE(false);
1272
1273 desc += 4;
1274 /* VS entry for GS->VS ring */
1275 /* stride 0, num records - size, elsize0,
1276 index stride 0 */
1277 desc[0] = gsvs_va;
1278 desc[1] = S_008F04_BASE_ADDRESS_HI(gsvs_va >> 32)|
1279 S_008F04_STRIDE(0) |
1280 S_008F04_SWIZZLE_ENABLE(false);
1281 desc[2] = gsvs_ring_size;
1282 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
1283 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
1284 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
1285 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
1286 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
1287 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32) |
1288 S_008F0C_ELEMENT_SIZE(0) |
1289 S_008F0C_INDEX_STRIDE(0) |
1290 S_008F0C_ADD_TID_ENABLE(false);
1291 desc += 4;
1292
1293 /* stride gsvs_itemsize, num records 64
1294 elsize 4, index stride 16 */
1295 /* shader will patch stride and desc[2] */
1296 desc[0] = gsvs_va;
1297 desc[1] = S_008F04_BASE_ADDRESS_HI(gsvs_va >> 32)|
1298 S_008F04_STRIDE(0) |
1299 S_008F04_SWIZZLE_ENABLE(true);
1300 desc[2] = 0;
1301 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
1302 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
1303 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
1304 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
1305 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
1306 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32) |
1307 S_008F0C_ELEMENT_SIZE(1) |
1308 S_008F0C_INDEX_STRIDE(1) |
1309 S_008F0C_ADD_TID_ENABLE(true);
1310 desc += 4;
1311
1312 desc[0] = tess_factor_va;
1313 desc[1] = S_008F04_BASE_ADDRESS_HI(tess_factor_va >> 32) |
1314 S_008F04_STRIDE(0) |
1315 S_008F04_SWIZZLE_ENABLE(false);
1316 desc[2] = tess_factor_ring_size;
1317 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
1318 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
1319 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
1320 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
1321 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
1322 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32) |
1323 S_008F0C_ELEMENT_SIZE(0) |
1324 S_008F0C_INDEX_STRIDE(0) |
1325 S_008F0C_ADD_TID_ENABLE(false);
1326 desc += 4;
1327
1328 desc[0] = tess_offchip_va;
1329 desc[1] = S_008F04_BASE_ADDRESS_HI(tess_offchip_va >> 32) |
1330 S_008F04_STRIDE(0) |
1331 S_008F04_SWIZZLE_ENABLE(false);
1332 desc[2] = tess_offchip_ring_size;
1333 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
1334 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
1335 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
1336 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
1337 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
1338 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32) |
1339 S_008F0C_ELEMENT_SIZE(0) |
1340 S_008F0C_INDEX_STRIDE(0) |
1341 S_008F0C_ADD_TID_ENABLE(false);
1342 desc += 4;
1343
1344 /* add sample positions after all rings */
1345 memcpy(desc, queue->device->sample_locations_1x, 8);
1346 desc += 2;
1347 memcpy(desc, queue->device->sample_locations_2x, 16);
1348 desc += 4;
1349 memcpy(desc, queue->device->sample_locations_4x, 32);
1350 desc += 8;
1351 memcpy(desc, queue->device->sample_locations_8x, 64);
1352 desc += 16;
1353 memcpy(desc, queue->device->sample_locations_16x, 128);
1354 }
1355
1356 static unsigned
1357 radv_get_hs_offchip_param(struct radv_device *device, uint32_t *max_offchip_buffers_p)
1358 {
1359 bool double_offchip_buffers = device->physical_device->rad_info.chip_class >= CIK &&
1360 device->physical_device->rad_info.family != CHIP_CARRIZO &&
1361 device->physical_device->rad_info.family != CHIP_STONEY;
1362 unsigned max_offchip_buffers_per_se = double_offchip_buffers ? 128 : 64;
1363 unsigned max_offchip_buffers = max_offchip_buffers_per_se *
1364 device->physical_device->rad_info.max_se;
1365 unsigned offchip_granularity;
1366 unsigned hs_offchip_param;
1367 switch (device->tess_offchip_block_dw_size) {
1368 default:
1369 assert(0);
1370 /* fall through */
1371 case 8192:
1372 offchip_granularity = V_03093C_X_8K_DWORDS;
1373 break;
1374 case 4096:
1375 offchip_granularity = V_03093C_X_4K_DWORDS;
1376 break;
1377 }
1378
1379 switch (device->physical_device->rad_info.chip_class) {
1380 case SI:
1381 max_offchip_buffers = MIN2(max_offchip_buffers, 126);
1382 break;
1383 case CIK:
1384 case VI:
1385 case GFX9:
1386 default:
1387 max_offchip_buffers = MIN2(max_offchip_buffers, 508);
1388 break;
1389 }
1390
1391 *max_offchip_buffers_p = max_offchip_buffers;
1392 if (device->physical_device->rad_info.chip_class >= CIK) {
1393 if (device->physical_device->rad_info.chip_class >= VI)
1394 --max_offchip_buffers;
1395 hs_offchip_param =
1396 S_03093C_OFFCHIP_BUFFERING(max_offchip_buffers) |
1397 S_03093C_OFFCHIP_GRANULARITY(offchip_granularity);
1398 } else {
1399 hs_offchip_param =
1400 S_0089B0_OFFCHIP_BUFFERING(max_offchip_buffers);
1401 }
1402 return hs_offchip_param;
1403 }
1404
1405 static VkResult
1406 radv_get_preamble_cs(struct radv_queue *queue,
1407 uint32_t scratch_size,
1408 uint32_t compute_scratch_size,
1409 uint32_t esgs_ring_size,
1410 uint32_t gsvs_ring_size,
1411 bool needs_tess_rings,
1412 bool needs_sample_positions,
1413 struct radeon_winsys_cs **initial_full_flush_preamble_cs,
1414 struct radeon_winsys_cs **initial_preamble_cs,
1415 struct radeon_winsys_cs **continue_preamble_cs)
1416 {
1417 struct radeon_winsys_bo *scratch_bo = NULL;
1418 struct radeon_winsys_bo *descriptor_bo = NULL;
1419 struct radeon_winsys_bo *compute_scratch_bo = NULL;
1420 struct radeon_winsys_bo *esgs_ring_bo = NULL;
1421 struct radeon_winsys_bo *gsvs_ring_bo = NULL;
1422 struct radeon_winsys_bo *tess_factor_ring_bo = NULL;
1423 struct radeon_winsys_bo *tess_offchip_ring_bo = NULL;
1424 struct radeon_winsys_cs *dest_cs[3] = {0};
1425 bool add_tess_rings = false, add_sample_positions = false;
1426 unsigned tess_factor_ring_size = 0, tess_offchip_ring_size = 0;
1427 unsigned max_offchip_buffers;
1428 unsigned hs_offchip_param = 0;
1429 uint32_t ring_bo_flags = RADEON_FLAG_NO_CPU_ACCESS | RADEON_FLAG_NO_INTERPROCESS_SHARING;
1430 if (!queue->has_tess_rings) {
1431 if (needs_tess_rings)
1432 add_tess_rings = true;
1433 }
1434 if (!queue->has_sample_positions) {
1435 if (needs_sample_positions)
1436 add_sample_positions = true;
1437 }
1438 tess_factor_ring_size = 32768 * queue->device->physical_device->rad_info.max_se;
1439 hs_offchip_param = radv_get_hs_offchip_param(queue->device,
1440 &max_offchip_buffers);
1441 tess_offchip_ring_size = max_offchip_buffers *
1442 queue->device->tess_offchip_block_dw_size * 4;
1443
1444 if (scratch_size <= queue->scratch_size &&
1445 compute_scratch_size <= queue->compute_scratch_size &&
1446 esgs_ring_size <= queue->esgs_ring_size &&
1447 gsvs_ring_size <= queue->gsvs_ring_size &&
1448 !add_tess_rings && !add_sample_positions &&
1449 queue->initial_preamble_cs) {
1450 *initial_full_flush_preamble_cs = queue->initial_full_flush_preamble_cs;
1451 *initial_preamble_cs = queue->initial_preamble_cs;
1452 *continue_preamble_cs = queue->continue_preamble_cs;
1453 if (!scratch_size && !compute_scratch_size && !esgs_ring_size && !gsvs_ring_size)
1454 *continue_preamble_cs = NULL;
1455 return VK_SUCCESS;
1456 }
1457
1458 if (scratch_size > queue->scratch_size) {
1459 scratch_bo = queue->device->ws->buffer_create(queue->device->ws,
1460 scratch_size,
1461 4096,
1462 RADEON_DOMAIN_VRAM,
1463 ring_bo_flags);
1464 if (!scratch_bo)
1465 goto fail;
1466 } else
1467 scratch_bo = queue->scratch_bo;
1468
1469 if (compute_scratch_size > queue->compute_scratch_size) {
1470 compute_scratch_bo = queue->device->ws->buffer_create(queue->device->ws,
1471 compute_scratch_size,
1472 4096,
1473 RADEON_DOMAIN_VRAM,
1474 ring_bo_flags);
1475 if (!compute_scratch_bo)
1476 goto fail;
1477
1478 } else
1479 compute_scratch_bo = queue->compute_scratch_bo;
1480
1481 if (esgs_ring_size > queue->esgs_ring_size) {
1482 esgs_ring_bo = queue->device->ws->buffer_create(queue->device->ws,
1483 esgs_ring_size,
1484 4096,
1485 RADEON_DOMAIN_VRAM,
1486 ring_bo_flags);
1487 if (!esgs_ring_bo)
1488 goto fail;
1489 } else {
1490 esgs_ring_bo = queue->esgs_ring_bo;
1491 esgs_ring_size = queue->esgs_ring_size;
1492 }
1493
1494 if (gsvs_ring_size > queue->gsvs_ring_size) {
1495 gsvs_ring_bo = queue->device->ws->buffer_create(queue->device->ws,
1496 gsvs_ring_size,
1497 4096,
1498 RADEON_DOMAIN_VRAM,
1499 ring_bo_flags);
1500 if (!gsvs_ring_bo)
1501 goto fail;
1502 } else {
1503 gsvs_ring_bo = queue->gsvs_ring_bo;
1504 gsvs_ring_size = queue->gsvs_ring_size;
1505 }
1506
1507 if (add_tess_rings) {
1508 tess_factor_ring_bo = queue->device->ws->buffer_create(queue->device->ws,
1509 tess_factor_ring_size,
1510 256,
1511 RADEON_DOMAIN_VRAM,
1512 ring_bo_flags);
1513 if (!tess_factor_ring_bo)
1514 goto fail;
1515 tess_offchip_ring_bo = queue->device->ws->buffer_create(queue->device->ws,
1516 tess_offchip_ring_size,
1517 256,
1518 RADEON_DOMAIN_VRAM,
1519 ring_bo_flags);
1520 if (!tess_offchip_ring_bo)
1521 goto fail;
1522 } else {
1523 tess_factor_ring_bo = queue->tess_factor_ring_bo;
1524 tess_offchip_ring_bo = queue->tess_offchip_ring_bo;
1525 }
1526
1527 if (scratch_bo != queue->scratch_bo ||
1528 esgs_ring_bo != queue->esgs_ring_bo ||
1529 gsvs_ring_bo != queue->gsvs_ring_bo ||
1530 tess_factor_ring_bo != queue->tess_factor_ring_bo ||
1531 tess_offchip_ring_bo != queue->tess_offchip_ring_bo || add_sample_positions) {
1532 uint32_t size = 0;
1533 if (gsvs_ring_bo || esgs_ring_bo ||
1534 tess_factor_ring_bo || tess_offchip_ring_bo || add_sample_positions) {
1535 size = 112; /* 2 dword + 2 padding + 4 dword * 6 */
1536 if (add_sample_positions)
1537 size += 256; /* 32+16+8+4+2+1 samples * 4 * 2 = 248 bytes. */
1538 }
1539 else if (scratch_bo)
1540 size = 8; /* 2 dword */
1541
1542 descriptor_bo = queue->device->ws->buffer_create(queue->device->ws,
1543 size,
1544 4096,
1545 RADEON_DOMAIN_VRAM,
1546 RADEON_FLAG_CPU_ACCESS|RADEON_FLAG_NO_INTERPROCESS_SHARING);
1547 if (!descriptor_bo)
1548 goto fail;
1549 } else
1550 descriptor_bo = queue->descriptor_bo;
1551
1552 for(int i = 0; i < 3; ++i) {
1553 struct radeon_winsys_cs *cs = NULL;
1554 cs = queue->device->ws->cs_create(queue->device->ws,
1555 queue->queue_family_index ? RING_COMPUTE : RING_GFX);
1556 if (!cs)
1557 goto fail;
1558
1559 dest_cs[i] = cs;
1560
1561 if (scratch_bo)
1562 radv_cs_add_buffer(queue->device->ws, cs, scratch_bo, 8);
1563
1564 if (esgs_ring_bo)
1565 radv_cs_add_buffer(queue->device->ws, cs, esgs_ring_bo, 8);
1566
1567 if (gsvs_ring_bo)
1568 radv_cs_add_buffer(queue->device->ws, cs, gsvs_ring_bo, 8);
1569
1570 if (tess_factor_ring_bo)
1571 radv_cs_add_buffer(queue->device->ws, cs, tess_factor_ring_bo, 8);
1572
1573 if (tess_offchip_ring_bo)
1574 radv_cs_add_buffer(queue->device->ws, cs, tess_offchip_ring_bo, 8);
1575
1576 if (descriptor_bo)
1577 radv_cs_add_buffer(queue->device->ws, cs, descriptor_bo, 8);
1578
1579 if (descriptor_bo != queue->descriptor_bo) {
1580 uint32_t *map = (uint32_t*)queue->device->ws->buffer_map(descriptor_bo);
1581
1582 if (scratch_bo) {
1583 uint64_t scratch_va = radv_buffer_get_va(scratch_bo);
1584 uint32_t rsrc1 = S_008F04_BASE_ADDRESS_HI(scratch_va >> 32) |
1585 S_008F04_SWIZZLE_ENABLE(1);
1586 map[0] = scratch_va;
1587 map[1] = rsrc1;
1588 }
1589
1590 if (esgs_ring_bo || gsvs_ring_bo || tess_factor_ring_bo || tess_offchip_ring_bo ||
1591 add_sample_positions)
1592 fill_geom_tess_rings(queue, map, add_sample_positions,
1593 esgs_ring_size, esgs_ring_bo,
1594 gsvs_ring_size, gsvs_ring_bo,
1595 tess_factor_ring_size, tess_factor_ring_bo,
1596 tess_offchip_ring_size, tess_offchip_ring_bo);
1597
1598 queue->device->ws->buffer_unmap(descriptor_bo);
1599 }
1600
1601 if (esgs_ring_bo || gsvs_ring_bo || tess_factor_ring_bo || tess_offchip_ring_bo) {
1602 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1603 radeon_emit(cs, EVENT_TYPE(V_028A90_VS_PARTIAL_FLUSH) | EVENT_INDEX(4));
1604 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1605 radeon_emit(cs, EVENT_TYPE(V_028A90_VGT_FLUSH) | EVENT_INDEX(0));
1606 }
1607
1608 if (esgs_ring_bo || gsvs_ring_bo) {
1609 if (queue->device->physical_device->rad_info.chip_class >= CIK) {
1610 radeon_set_uconfig_reg_seq(cs, R_030900_VGT_ESGS_RING_SIZE, 2);
1611 radeon_emit(cs, esgs_ring_size >> 8);
1612 radeon_emit(cs, gsvs_ring_size >> 8);
1613 } else {
1614 radeon_set_config_reg_seq(cs, R_0088C8_VGT_ESGS_RING_SIZE, 2);
1615 radeon_emit(cs, esgs_ring_size >> 8);
1616 radeon_emit(cs, gsvs_ring_size >> 8);
1617 }
1618 }
1619
1620 if (tess_factor_ring_bo) {
1621 uint64_t tf_va = radv_buffer_get_va(tess_factor_ring_bo);
1622 if (queue->device->physical_device->rad_info.chip_class >= CIK) {
1623 radeon_set_uconfig_reg(cs, R_030938_VGT_TF_RING_SIZE,
1624 S_030938_SIZE(tess_factor_ring_size / 4));
1625 radeon_set_uconfig_reg(cs, R_030940_VGT_TF_MEMORY_BASE,
1626 tf_va >> 8);
1627 if (queue->device->physical_device->rad_info.chip_class >= GFX9) {
1628 radeon_set_uconfig_reg(cs, R_030944_VGT_TF_MEMORY_BASE_HI,
1629 tf_va >> 40);
1630 }
1631 radeon_set_uconfig_reg(cs, R_03093C_VGT_HS_OFFCHIP_PARAM, hs_offchip_param);
1632 } else {
1633 radeon_set_config_reg(cs, R_008988_VGT_TF_RING_SIZE,
1634 S_008988_SIZE(tess_factor_ring_size / 4));
1635 radeon_set_config_reg(cs, R_0089B8_VGT_TF_MEMORY_BASE,
1636 tf_va >> 8);
1637 radeon_set_config_reg(cs, R_0089B0_VGT_HS_OFFCHIP_PARAM,
1638 hs_offchip_param);
1639 }
1640 }
1641
1642 if (descriptor_bo) {
1643 uint64_t va = radv_buffer_get_va(descriptor_bo);
1644 if (queue->device->physical_device->rad_info.chip_class >= GFX9) {
1645 uint32_t regs[] = {R_00B030_SPI_SHADER_USER_DATA_PS_0,
1646 R_00B130_SPI_SHADER_USER_DATA_VS_0,
1647 R_00B208_SPI_SHADER_USER_DATA_ADDR_LO_GS,
1648 R_00B408_SPI_SHADER_USER_DATA_ADDR_LO_HS};
1649
1650 for (int i = 0; i < ARRAY_SIZE(regs); ++i) {
1651 radeon_set_sh_reg_seq(cs, regs[i], 2);
1652 radeon_emit(cs, va);
1653 radeon_emit(cs, va >> 32);
1654 }
1655 } else {
1656 uint32_t regs[] = {R_00B030_SPI_SHADER_USER_DATA_PS_0,
1657 R_00B130_SPI_SHADER_USER_DATA_VS_0,
1658 R_00B230_SPI_SHADER_USER_DATA_GS_0,
1659 R_00B330_SPI_SHADER_USER_DATA_ES_0,
1660 R_00B430_SPI_SHADER_USER_DATA_HS_0,
1661 R_00B530_SPI_SHADER_USER_DATA_LS_0};
1662
1663 for (int i = 0; i < ARRAY_SIZE(regs); ++i) {
1664 radeon_set_sh_reg_seq(cs, regs[i], 2);
1665 radeon_emit(cs, va);
1666 radeon_emit(cs, va >> 32);
1667 }
1668 }
1669 }
1670
1671 if (compute_scratch_bo) {
1672 uint64_t scratch_va = radv_buffer_get_va(compute_scratch_bo);
1673 uint32_t rsrc1 = S_008F04_BASE_ADDRESS_HI(scratch_va >> 32) |
1674 S_008F04_SWIZZLE_ENABLE(1);
1675
1676 radv_cs_add_buffer(queue->device->ws, cs, compute_scratch_bo, 8);
1677
1678 radeon_set_sh_reg_seq(cs, R_00B900_COMPUTE_USER_DATA_0, 2);
1679 radeon_emit(cs, scratch_va);
1680 radeon_emit(cs, rsrc1);
1681 }
1682
1683 if (i == 0) {
1684 si_cs_emit_cache_flush(cs,
1685 false,
1686 queue->device->physical_device->rad_info.chip_class,
1687 NULL, 0,
1688 queue->queue_family_index == RING_COMPUTE &&
1689 queue->device->physical_device->rad_info.chip_class >= CIK,
1690 (queue->queue_family_index == RADV_QUEUE_COMPUTE ? RADV_CMD_FLAG_CS_PARTIAL_FLUSH : (RADV_CMD_FLAG_CS_PARTIAL_FLUSH | RADV_CMD_FLAG_PS_PARTIAL_FLUSH)) |
1691 RADV_CMD_FLAG_INV_ICACHE |
1692 RADV_CMD_FLAG_INV_SMEM_L1 |
1693 RADV_CMD_FLAG_INV_VMEM_L1 |
1694 RADV_CMD_FLAG_INV_GLOBAL_L2);
1695 } else if (i == 1) {
1696 si_cs_emit_cache_flush(cs,
1697 false,
1698 queue->device->physical_device->rad_info.chip_class,
1699 NULL, 0,
1700 queue->queue_family_index == RING_COMPUTE &&
1701 queue->device->physical_device->rad_info.chip_class >= CIK,
1702 RADV_CMD_FLAG_INV_ICACHE |
1703 RADV_CMD_FLAG_INV_SMEM_L1 |
1704 RADV_CMD_FLAG_INV_VMEM_L1 |
1705 RADV_CMD_FLAG_INV_GLOBAL_L2);
1706 }
1707
1708 if (!queue->device->ws->cs_finalize(cs))
1709 goto fail;
1710 }
1711
1712 if (queue->initial_full_flush_preamble_cs)
1713 queue->device->ws->cs_destroy(queue->initial_full_flush_preamble_cs);
1714
1715 if (queue->initial_preamble_cs)
1716 queue->device->ws->cs_destroy(queue->initial_preamble_cs);
1717
1718 if (queue->continue_preamble_cs)
1719 queue->device->ws->cs_destroy(queue->continue_preamble_cs);
1720
1721 queue->initial_full_flush_preamble_cs = dest_cs[0];
1722 queue->initial_preamble_cs = dest_cs[1];
1723 queue->continue_preamble_cs = dest_cs[2];
1724
1725 if (scratch_bo != queue->scratch_bo) {
1726 if (queue->scratch_bo)
1727 queue->device->ws->buffer_destroy(queue->scratch_bo);
1728 queue->scratch_bo = scratch_bo;
1729 queue->scratch_size = scratch_size;
1730 }
1731
1732 if (compute_scratch_bo != queue->compute_scratch_bo) {
1733 if (queue->compute_scratch_bo)
1734 queue->device->ws->buffer_destroy(queue->compute_scratch_bo);
1735 queue->compute_scratch_bo = compute_scratch_bo;
1736 queue->compute_scratch_size = compute_scratch_size;
1737 }
1738
1739 if (esgs_ring_bo != queue->esgs_ring_bo) {
1740 if (queue->esgs_ring_bo)
1741 queue->device->ws->buffer_destroy(queue->esgs_ring_bo);
1742 queue->esgs_ring_bo = esgs_ring_bo;
1743 queue->esgs_ring_size = esgs_ring_size;
1744 }
1745
1746 if (gsvs_ring_bo != queue->gsvs_ring_bo) {
1747 if (queue->gsvs_ring_bo)
1748 queue->device->ws->buffer_destroy(queue->gsvs_ring_bo);
1749 queue->gsvs_ring_bo = gsvs_ring_bo;
1750 queue->gsvs_ring_size = gsvs_ring_size;
1751 }
1752
1753 if (tess_factor_ring_bo != queue->tess_factor_ring_bo) {
1754 queue->tess_factor_ring_bo = tess_factor_ring_bo;
1755 }
1756
1757 if (tess_offchip_ring_bo != queue->tess_offchip_ring_bo) {
1758 queue->tess_offchip_ring_bo = tess_offchip_ring_bo;
1759 queue->has_tess_rings = true;
1760 }
1761
1762 if (descriptor_bo != queue->descriptor_bo) {
1763 if (queue->descriptor_bo)
1764 queue->device->ws->buffer_destroy(queue->descriptor_bo);
1765
1766 queue->descriptor_bo = descriptor_bo;
1767 }
1768
1769 if (add_sample_positions)
1770 queue->has_sample_positions = true;
1771
1772 *initial_full_flush_preamble_cs = queue->initial_full_flush_preamble_cs;
1773 *initial_preamble_cs = queue->initial_preamble_cs;
1774 *continue_preamble_cs = queue->continue_preamble_cs;
1775 if (!scratch_size && !compute_scratch_size && !esgs_ring_size && !gsvs_ring_size)
1776 *continue_preamble_cs = NULL;
1777 return VK_SUCCESS;
1778 fail:
1779 for (int i = 0; i < ARRAY_SIZE(dest_cs); ++i)
1780 if (dest_cs[i])
1781 queue->device->ws->cs_destroy(dest_cs[i]);
1782 if (descriptor_bo && descriptor_bo != queue->descriptor_bo)
1783 queue->device->ws->buffer_destroy(descriptor_bo);
1784 if (scratch_bo && scratch_bo != queue->scratch_bo)
1785 queue->device->ws->buffer_destroy(scratch_bo);
1786 if (compute_scratch_bo && compute_scratch_bo != queue->compute_scratch_bo)
1787 queue->device->ws->buffer_destroy(compute_scratch_bo);
1788 if (esgs_ring_bo && esgs_ring_bo != queue->esgs_ring_bo)
1789 queue->device->ws->buffer_destroy(esgs_ring_bo);
1790 if (gsvs_ring_bo && gsvs_ring_bo != queue->gsvs_ring_bo)
1791 queue->device->ws->buffer_destroy(gsvs_ring_bo);
1792 if (tess_factor_ring_bo && tess_factor_ring_bo != queue->tess_factor_ring_bo)
1793 queue->device->ws->buffer_destroy(tess_factor_ring_bo);
1794 if (tess_offchip_ring_bo && tess_offchip_ring_bo != queue->tess_offchip_ring_bo)
1795 queue->device->ws->buffer_destroy(tess_offchip_ring_bo);
1796 return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
1797 }
1798
1799 static VkResult radv_alloc_sem_counts(struct radv_winsys_sem_counts *counts,
1800 int num_sems,
1801 const VkSemaphore *sems,
1802 bool reset_temp)
1803 {
1804 int syncobj_idx = 0, sem_idx = 0;
1805
1806 if (num_sems == 0)
1807 return VK_SUCCESS;
1808 for (uint32_t i = 0; i < num_sems; i++) {
1809 RADV_FROM_HANDLE(radv_semaphore, sem, sems[i]);
1810
1811 if (sem->temp_syncobj || sem->syncobj)
1812 counts->syncobj_count++;
1813 else
1814 counts->sem_count++;
1815 }
1816
1817 if (counts->syncobj_count) {
1818 counts->syncobj = (uint32_t *)malloc(sizeof(uint32_t) * counts->syncobj_count);
1819 if (!counts->syncobj)
1820 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1821 }
1822
1823 if (counts->sem_count) {
1824 counts->sem = (struct radeon_winsys_sem **)malloc(sizeof(struct radeon_winsys_sem *) * counts->sem_count);
1825 if (!counts->sem) {
1826 free(counts->syncobj);
1827 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1828 }
1829 }
1830
1831 for (uint32_t i = 0; i < num_sems; i++) {
1832 RADV_FROM_HANDLE(radv_semaphore, sem, sems[i]);
1833
1834 if (sem->temp_syncobj) {
1835 counts->syncobj[syncobj_idx++] = sem->temp_syncobj;
1836 }
1837 else if (sem->syncobj)
1838 counts->syncobj[syncobj_idx++] = sem->syncobj;
1839 else {
1840 assert(sem->sem);
1841 counts->sem[sem_idx++] = sem->sem;
1842 }
1843 }
1844
1845 return VK_SUCCESS;
1846 }
1847
1848 void radv_free_sem_info(struct radv_winsys_sem_info *sem_info)
1849 {
1850 free(sem_info->wait.syncobj);
1851 free(sem_info->wait.sem);
1852 free(sem_info->signal.syncobj);
1853 free(sem_info->signal.sem);
1854 }
1855
1856
1857 static void radv_free_temp_syncobjs(struct radv_device *device,
1858 int num_sems,
1859 const VkSemaphore *sems)
1860 {
1861 for (uint32_t i = 0; i < num_sems; i++) {
1862 RADV_FROM_HANDLE(radv_semaphore, sem, sems[i]);
1863
1864 if (sem->temp_syncobj) {
1865 device->ws->destroy_syncobj(device->ws, sem->temp_syncobj);
1866 sem->temp_syncobj = 0;
1867 }
1868 }
1869 }
1870
1871 VkResult radv_alloc_sem_info(struct radv_winsys_sem_info *sem_info,
1872 int num_wait_sems,
1873 const VkSemaphore *wait_sems,
1874 int num_signal_sems,
1875 const VkSemaphore *signal_sems)
1876 {
1877 VkResult ret;
1878 memset(sem_info, 0, sizeof(*sem_info));
1879
1880 ret = radv_alloc_sem_counts(&sem_info->wait, num_wait_sems, wait_sems, true);
1881 if (ret)
1882 return ret;
1883 ret = radv_alloc_sem_counts(&sem_info->signal, num_signal_sems, signal_sems, false);
1884 if (ret)
1885 radv_free_sem_info(sem_info);
1886
1887 /* caller can override these */
1888 sem_info->cs_emit_wait = true;
1889 sem_info->cs_emit_signal = true;
1890 return ret;
1891 }
1892
1893 VkResult radv_QueueSubmit(
1894 VkQueue _queue,
1895 uint32_t submitCount,
1896 const VkSubmitInfo* pSubmits,
1897 VkFence _fence)
1898 {
1899 RADV_FROM_HANDLE(radv_queue, queue, _queue);
1900 RADV_FROM_HANDLE(radv_fence, fence, _fence);
1901 struct radeon_winsys_fence *base_fence = fence ? fence->fence : NULL;
1902 struct radeon_winsys_ctx *ctx = queue->hw_ctx;
1903 int ret;
1904 uint32_t max_cs_submission = queue->device->trace_bo ? 1 : UINT32_MAX;
1905 uint32_t scratch_size = 0;
1906 uint32_t compute_scratch_size = 0;
1907 uint32_t esgs_ring_size = 0, gsvs_ring_size = 0;
1908 struct radeon_winsys_cs *initial_preamble_cs = NULL, *initial_flush_preamble_cs = NULL, *continue_preamble_cs = NULL;
1909 VkResult result;
1910 bool fence_emitted = false;
1911 bool tess_rings_needed = false;
1912 bool sample_positions_needed = false;
1913
1914 /* Do this first so failing to allocate scratch buffers can't result in
1915 * partially executed submissions. */
1916 for (uint32_t i = 0; i < submitCount; i++) {
1917 for (uint32_t j = 0; j < pSubmits[i].commandBufferCount; j++) {
1918 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer,
1919 pSubmits[i].pCommandBuffers[j]);
1920
1921 scratch_size = MAX2(scratch_size, cmd_buffer->scratch_size_needed);
1922 compute_scratch_size = MAX2(compute_scratch_size,
1923 cmd_buffer->compute_scratch_size_needed);
1924 esgs_ring_size = MAX2(esgs_ring_size, cmd_buffer->esgs_ring_size_needed);
1925 gsvs_ring_size = MAX2(gsvs_ring_size, cmd_buffer->gsvs_ring_size_needed);
1926 tess_rings_needed |= cmd_buffer->tess_rings_needed;
1927 sample_positions_needed |= cmd_buffer->sample_positions_needed;
1928 }
1929 }
1930
1931 result = radv_get_preamble_cs(queue, scratch_size, compute_scratch_size,
1932 esgs_ring_size, gsvs_ring_size, tess_rings_needed,
1933 sample_positions_needed, &initial_flush_preamble_cs,
1934 &initial_preamble_cs, &continue_preamble_cs);
1935 if (result != VK_SUCCESS)
1936 return result;
1937
1938 for (uint32_t i = 0; i < submitCount; i++) {
1939 struct radeon_winsys_cs **cs_array;
1940 bool do_flush = !i || pSubmits[i].pWaitDstStageMask;
1941 bool can_patch = true;
1942 uint32_t advance;
1943 struct radv_winsys_sem_info sem_info;
1944
1945 result = radv_alloc_sem_info(&sem_info,
1946 pSubmits[i].waitSemaphoreCount,
1947 pSubmits[i].pWaitSemaphores,
1948 pSubmits[i].signalSemaphoreCount,
1949 pSubmits[i].pSignalSemaphores);
1950 if (result != VK_SUCCESS)
1951 return result;
1952
1953 if (!pSubmits[i].commandBufferCount) {
1954 if (pSubmits[i].waitSemaphoreCount || pSubmits[i].signalSemaphoreCount) {
1955 ret = queue->device->ws->cs_submit(ctx, queue->queue_idx,
1956 &queue->device->empty_cs[queue->queue_family_index],
1957 1, NULL, NULL,
1958 &sem_info,
1959 false, base_fence);
1960 if (ret) {
1961 radv_loge("failed to submit CS %d\n", i);
1962 abort();
1963 }
1964 fence_emitted = true;
1965 }
1966 radv_free_sem_info(&sem_info);
1967 continue;
1968 }
1969
1970 cs_array = malloc(sizeof(struct radeon_winsys_cs *) *
1971 (pSubmits[i].commandBufferCount));
1972
1973 for (uint32_t j = 0; j < pSubmits[i].commandBufferCount; j++) {
1974 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer,
1975 pSubmits[i].pCommandBuffers[j]);
1976 assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
1977
1978 cs_array[j] = cmd_buffer->cs;
1979 if ((cmd_buffer->usage_flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT))
1980 can_patch = false;
1981 }
1982
1983 for (uint32_t j = 0; j < pSubmits[i].commandBufferCount; j += advance) {
1984 struct radeon_winsys_cs *initial_preamble = (do_flush && !j) ? initial_flush_preamble_cs : initial_preamble_cs;
1985 advance = MIN2(max_cs_submission,
1986 pSubmits[i].commandBufferCount - j);
1987
1988 if (queue->device->trace_bo)
1989 *queue->device->trace_id_ptr = 0;
1990
1991 sem_info.cs_emit_wait = j == 0;
1992 sem_info.cs_emit_signal = j + advance == pSubmits[i].commandBufferCount;
1993
1994 ret = queue->device->ws->cs_submit(ctx, queue->queue_idx, cs_array + j,
1995 advance, initial_preamble, continue_preamble_cs,
1996 &sem_info,
1997 can_patch, base_fence);
1998
1999 if (ret) {
2000 radv_loge("failed to submit CS %d\n", i);
2001 abort();
2002 }
2003 fence_emitted = true;
2004 if (queue->device->trace_bo) {
2005 radv_check_gpu_hangs(queue, cs_array[j]);
2006 }
2007 }
2008
2009 radv_free_temp_syncobjs(queue->device,
2010 pSubmits[i].waitSemaphoreCount,
2011 pSubmits[i].pWaitSemaphores);
2012 radv_free_sem_info(&sem_info);
2013 free(cs_array);
2014 }
2015
2016 if (fence) {
2017 if (!fence_emitted) {
2018 struct radv_winsys_sem_info sem_info = {0};
2019 ret = queue->device->ws->cs_submit(ctx, queue->queue_idx,
2020 &queue->device->empty_cs[queue->queue_family_index],
2021 1, NULL, NULL, &sem_info,
2022 false, base_fence);
2023 }
2024 fence->submitted = true;
2025 }
2026
2027 return VK_SUCCESS;
2028 }
2029
2030 VkResult radv_QueueWaitIdle(
2031 VkQueue _queue)
2032 {
2033 RADV_FROM_HANDLE(radv_queue, queue, _queue);
2034
2035 queue->device->ws->ctx_wait_idle(queue->hw_ctx,
2036 radv_queue_family_to_ring(queue->queue_family_index),
2037 queue->queue_idx);
2038 return VK_SUCCESS;
2039 }
2040
2041 VkResult radv_DeviceWaitIdle(
2042 VkDevice _device)
2043 {
2044 RADV_FROM_HANDLE(radv_device, device, _device);
2045
2046 for (unsigned i = 0; i < RADV_MAX_QUEUE_FAMILIES; i++) {
2047 for (unsigned q = 0; q < device->queue_count[i]; q++) {
2048 radv_QueueWaitIdle(radv_queue_to_handle(&device->queues[i][q]));
2049 }
2050 }
2051 return VK_SUCCESS;
2052 }
2053
2054 PFN_vkVoidFunction radv_GetInstanceProcAddr(
2055 VkInstance instance,
2056 const char* pName)
2057 {
2058 return radv_lookup_entrypoint(pName);
2059 }
2060
2061 /* The loader wants us to expose a second GetInstanceProcAddr function
2062 * to work around certain LD_PRELOAD issues seen in apps.
2063 */
2064 PUBLIC
2065 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vk_icdGetInstanceProcAddr(
2066 VkInstance instance,
2067 const char* pName);
2068
2069 PUBLIC
2070 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vk_icdGetInstanceProcAddr(
2071 VkInstance instance,
2072 const char* pName)
2073 {
2074 return radv_GetInstanceProcAddr(instance, pName);
2075 }
2076
2077 PFN_vkVoidFunction radv_GetDeviceProcAddr(
2078 VkDevice device,
2079 const char* pName)
2080 {
2081 return radv_lookup_entrypoint(pName);
2082 }
2083
2084 bool radv_get_memory_fd(struct radv_device *device,
2085 struct radv_device_memory *memory,
2086 int *pFD)
2087 {
2088 struct radeon_bo_metadata metadata;
2089
2090 if (memory->image) {
2091 radv_init_metadata(device, memory->image, &metadata);
2092 device->ws->buffer_set_metadata(memory->bo, &metadata);
2093 }
2094
2095 return device->ws->buffer_get_fd(device->ws, memory->bo,
2096 pFD);
2097 }
2098
2099 VkResult radv_alloc_memory(VkDevice _device,
2100 const VkMemoryAllocateInfo* pAllocateInfo,
2101 const VkAllocationCallbacks* pAllocator,
2102 enum radv_mem_flags_bits mem_flags,
2103 VkDeviceMemory* pMem)
2104 {
2105 RADV_FROM_HANDLE(radv_device, device, _device);
2106 struct radv_device_memory *mem;
2107 VkResult result;
2108 enum radeon_bo_domain domain;
2109 uint32_t flags = 0;
2110 enum radv_mem_type mem_type_index = device->physical_device->mem_type_indices[pAllocateInfo->memoryTypeIndex];
2111
2112 assert(pAllocateInfo->sType == VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO);
2113
2114 if (pAllocateInfo->allocationSize == 0) {
2115 /* Apparently, this is allowed */
2116 *pMem = VK_NULL_HANDLE;
2117 return VK_SUCCESS;
2118 }
2119
2120 const VkImportMemoryFdInfoKHR *import_info =
2121 vk_find_struct_const(pAllocateInfo->pNext, IMPORT_MEMORY_FD_INFO_KHR);
2122 const VkMemoryDedicatedAllocateInfoKHR *dedicate_info =
2123 vk_find_struct_const(pAllocateInfo->pNext, MEMORY_DEDICATED_ALLOCATE_INFO_KHR);
2124
2125 mem = vk_alloc2(&device->alloc, pAllocator, sizeof(*mem), 8,
2126 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2127 if (mem == NULL)
2128 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
2129
2130 if (dedicate_info) {
2131 mem->image = radv_image_from_handle(dedicate_info->image);
2132 mem->buffer = radv_buffer_from_handle(dedicate_info->buffer);
2133 } else {
2134 mem->image = NULL;
2135 mem->buffer = NULL;
2136 }
2137
2138 if (import_info) {
2139 assert(import_info->handleType ==
2140 VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR);
2141 mem->bo = device->ws->buffer_from_fd(device->ws, import_info->fd,
2142 NULL, NULL);
2143 if (!mem->bo) {
2144 result = VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR;
2145 goto fail;
2146 } else {
2147 close(import_info->fd);
2148 goto out_success;
2149 }
2150 }
2151
2152 uint64_t alloc_size = align_u64(pAllocateInfo->allocationSize, 4096);
2153 if (mem_type_index == RADV_MEM_TYPE_GTT_WRITE_COMBINE ||
2154 mem_type_index == RADV_MEM_TYPE_GTT_CACHED)
2155 domain = RADEON_DOMAIN_GTT;
2156 else
2157 domain = RADEON_DOMAIN_VRAM;
2158
2159 if (mem_type_index == RADV_MEM_TYPE_VRAM)
2160 flags |= RADEON_FLAG_NO_CPU_ACCESS;
2161 else
2162 flags |= RADEON_FLAG_CPU_ACCESS;
2163
2164 if (mem_type_index == RADV_MEM_TYPE_GTT_WRITE_COMBINE)
2165 flags |= RADEON_FLAG_GTT_WC;
2166
2167 if (mem_flags & RADV_MEM_IMPLICIT_SYNC)
2168 flags |= RADEON_FLAG_IMPLICIT_SYNC;
2169
2170 if (!dedicate_info && !import_info)
2171 flags |= RADEON_FLAG_NO_INTERPROCESS_SHARING;
2172
2173 mem->bo = device->ws->buffer_create(device->ws, alloc_size, device->physical_device->rad_info.max_alignment,
2174 domain, flags);
2175
2176 if (!mem->bo) {
2177 result = VK_ERROR_OUT_OF_DEVICE_MEMORY;
2178 goto fail;
2179 }
2180 mem->type_index = mem_type_index;
2181 out_success:
2182 *pMem = radv_device_memory_to_handle(mem);
2183
2184 return VK_SUCCESS;
2185
2186 fail:
2187 vk_free2(&device->alloc, pAllocator, mem);
2188
2189 return result;
2190 }
2191
2192 VkResult radv_AllocateMemory(
2193 VkDevice _device,
2194 const VkMemoryAllocateInfo* pAllocateInfo,
2195 const VkAllocationCallbacks* pAllocator,
2196 VkDeviceMemory* pMem)
2197 {
2198 return radv_alloc_memory(_device, pAllocateInfo, pAllocator, 0, pMem);
2199 }
2200
2201 void radv_FreeMemory(
2202 VkDevice _device,
2203 VkDeviceMemory _mem,
2204 const VkAllocationCallbacks* pAllocator)
2205 {
2206 RADV_FROM_HANDLE(radv_device, device, _device);
2207 RADV_FROM_HANDLE(radv_device_memory, mem, _mem);
2208
2209 if (mem == NULL)
2210 return;
2211
2212 device->ws->buffer_destroy(mem->bo);
2213 mem->bo = NULL;
2214
2215 vk_free2(&device->alloc, pAllocator, mem);
2216 }
2217
2218 VkResult radv_MapMemory(
2219 VkDevice _device,
2220 VkDeviceMemory _memory,
2221 VkDeviceSize offset,
2222 VkDeviceSize size,
2223 VkMemoryMapFlags flags,
2224 void** ppData)
2225 {
2226 RADV_FROM_HANDLE(radv_device, device, _device);
2227 RADV_FROM_HANDLE(radv_device_memory, mem, _memory);
2228
2229 if (mem == NULL) {
2230 *ppData = NULL;
2231 return VK_SUCCESS;
2232 }
2233
2234 *ppData = device->ws->buffer_map(mem->bo);
2235 if (*ppData) {
2236 *ppData += offset;
2237 return VK_SUCCESS;
2238 }
2239
2240 return vk_error(VK_ERROR_MEMORY_MAP_FAILED);
2241 }
2242
2243 void radv_UnmapMemory(
2244 VkDevice _device,
2245 VkDeviceMemory _memory)
2246 {
2247 RADV_FROM_HANDLE(radv_device, device, _device);
2248 RADV_FROM_HANDLE(radv_device_memory, mem, _memory);
2249
2250 if (mem == NULL)
2251 return;
2252
2253 device->ws->buffer_unmap(mem->bo);
2254 }
2255
2256 VkResult radv_FlushMappedMemoryRanges(
2257 VkDevice _device,
2258 uint32_t memoryRangeCount,
2259 const VkMappedMemoryRange* pMemoryRanges)
2260 {
2261 return VK_SUCCESS;
2262 }
2263
2264 VkResult radv_InvalidateMappedMemoryRanges(
2265 VkDevice _device,
2266 uint32_t memoryRangeCount,
2267 const VkMappedMemoryRange* pMemoryRanges)
2268 {
2269 return VK_SUCCESS;
2270 }
2271
2272 void radv_GetBufferMemoryRequirements(
2273 VkDevice _device,
2274 VkBuffer _buffer,
2275 VkMemoryRequirements* pMemoryRequirements)
2276 {
2277 RADV_FROM_HANDLE(radv_device, device, _device);
2278 RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
2279
2280 pMemoryRequirements->memoryTypeBits = (1u << device->physical_device->memory_properties.memoryTypeCount) - 1;
2281
2282 if (buffer->flags & VK_BUFFER_CREATE_SPARSE_BINDING_BIT)
2283 pMemoryRequirements->alignment = 4096;
2284 else
2285 pMemoryRequirements->alignment = 16;
2286
2287 pMemoryRequirements->size = align64(buffer->size, pMemoryRequirements->alignment);
2288 }
2289
2290 void radv_GetBufferMemoryRequirements2KHR(
2291 VkDevice device,
2292 const VkBufferMemoryRequirementsInfo2KHR* pInfo,
2293 VkMemoryRequirements2KHR* pMemoryRequirements)
2294 {
2295 radv_GetBufferMemoryRequirements(device, pInfo->buffer,
2296 &pMemoryRequirements->memoryRequirements);
2297 RADV_FROM_HANDLE(radv_buffer, buffer, pInfo->buffer);
2298 vk_foreach_struct(ext, pMemoryRequirements->pNext) {
2299 switch (ext->sType) {
2300 case VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR: {
2301 VkMemoryDedicatedRequirementsKHR *req =
2302 (VkMemoryDedicatedRequirementsKHR *) ext;
2303 req->requiresDedicatedAllocation = buffer->shareable;
2304 req->prefersDedicatedAllocation = req->requiresDedicatedAllocation;
2305 break;
2306 }
2307 default:
2308 break;
2309 }
2310 }
2311 }
2312
2313 void radv_GetImageMemoryRequirements(
2314 VkDevice _device,
2315 VkImage _image,
2316 VkMemoryRequirements* pMemoryRequirements)
2317 {
2318 RADV_FROM_HANDLE(radv_device, device, _device);
2319 RADV_FROM_HANDLE(radv_image, image, _image);
2320
2321 pMemoryRequirements->memoryTypeBits = (1u << device->physical_device->memory_properties.memoryTypeCount) - 1;
2322
2323 pMemoryRequirements->size = image->size;
2324 pMemoryRequirements->alignment = image->alignment;
2325 }
2326
2327 void radv_GetImageMemoryRequirements2KHR(
2328 VkDevice device,
2329 const VkImageMemoryRequirementsInfo2KHR* pInfo,
2330 VkMemoryRequirements2KHR* pMemoryRequirements)
2331 {
2332 radv_GetImageMemoryRequirements(device, pInfo->image,
2333 &pMemoryRequirements->memoryRequirements);
2334
2335 RADV_FROM_HANDLE(radv_image, image, pInfo->image);
2336
2337 vk_foreach_struct(ext, pMemoryRequirements->pNext) {
2338 switch (ext->sType) {
2339 case VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR: {
2340 VkMemoryDedicatedRequirementsKHR *req =
2341 (VkMemoryDedicatedRequirementsKHR *) ext;
2342 req->requiresDedicatedAllocation = image->shareable;
2343 req->prefersDedicatedAllocation = req->requiresDedicatedAllocation;
2344 break;
2345 }
2346 default:
2347 break;
2348 }
2349 }
2350 }
2351
2352 void radv_GetImageSparseMemoryRequirements(
2353 VkDevice device,
2354 VkImage image,
2355 uint32_t* pSparseMemoryRequirementCount,
2356 VkSparseImageMemoryRequirements* pSparseMemoryRequirements)
2357 {
2358 stub();
2359 }
2360
2361 void radv_GetImageSparseMemoryRequirements2KHR(
2362 VkDevice device,
2363 const VkImageSparseMemoryRequirementsInfo2KHR* pInfo,
2364 uint32_t* pSparseMemoryRequirementCount,
2365 VkSparseImageMemoryRequirements2KHR* pSparseMemoryRequirements)
2366 {
2367 stub();
2368 }
2369
2370 void radv_GetDeviceMemoryCommitment(
2371 VkDevice device,
2372 VkDeviceMemory memory,
2373 VkDeviceSize* pCommittedMemoryInBytes)
2374 {
2375 *pCommittedMemoryInBytes = 0;
2376 }
2377
2378 VkResult radv_BindBufferMemory2KHR(VkDevice device,
2379 uint32_t bindInfoCount,
2380 const VkBindBufferMemoryInfoKHR *pBindInfos)
2381 {
2382 for (uint32_t i = 0; i < bindInfoCount; ++i) {
2383 RADV_FROM_HANDLE(radv_device_memory, mem, pBindInfos[i].memory);
2384 RADV_FROM_HANDLE(radv_buffer, buffer, pBindInfos[i].buffer);
2385
2386 if (mem) {
2387 buffer->bo = mem->bo;
2388 buffer->offset = pBindInfos[i].memoryOffset;
2389 } else {
2390 buffer->bo = NULL;
2391 }
2392 }
2393 return VK_SUCCESS;
2394 }
2395
2396 VkResult radv_BindBufferMemory(
2397 VkDevice device,
2398 VkBuffer buffer,
2399 VkDeviceMemory memory,
2400 VkDeviceSize memoryOffset)
2401 {
2402 const VkBindBufferMemoryInfoKHR info = {
2403 .sType = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR,
2404 .buffer = buffer,
2405 .memory = memory,
2406 .memoryOffset = memoryOffset
2407 };
2408
2409 return radv_BindBufferMemory2KHR(device, 1, &info);
2410 }
2411
2412 VkResult radv_BindImageMemory2KHR(VkDevice device,
2413 uint32_t bindInfoCount,
2414 const VkBindImageMemoryInfoKHR *pBindInfos)
2415 {
2416 for (uint32_t i = 0; i < bindInfoCount; ++i) {
2417 RADV_FROM_HANDLE(radv_device_memory, mem, pBindInfos[i].memory);
2418 RADV_FROM_HANDLE(radv_image, image, pBindInfos[i].image);
2419
2420 if (mem) {
2421 image->bo = mem->bo;
2422 image->offset = pBindInfos[i].memoryOffset;
2423 } else {
2424 image->bo = NULL;
2425 image->offset = 0;
2426 }
2427 }
2428 return VK_SUCCESS;
2429 }
2430
2431
2432 VkResult radv_BindImageMemory(
2433 VkDevice device,
2434 VkImage image,
2435 VkDeviceMemory memory,
2436 VkDeviceSize memoryOffset)
2437 {
2438 const VkBindImageMemoryInfoKHR info = {
2439 .sType = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR,
2440 .image = image,
2441 .memory = memory,
2442 .memoryOffset = memoryOffset
2443 };
2444
2445 return radv_BindImageMemory2KHR(device, 1, &info);
2446 }
2447
2448
2449 static void
2450 radv_sparse_buffer_bind_memory(struct radv_device *device,
2451 const VkSparseBufferMemoryBindInfo *bind)
2452 {
2453 RADV_FROM_HANDLE(radv_buffer, buffer, bind->buffer);
2454
2455 for (uint32_t i = 0; i < bind->bindCount; ++i) {
2456 struct radv_device_memory *mem = NULL;
2457
2458 if (bind->pBinds[i].memory != VK_NULL_HANDLE)
2459 mem = radv_device_memory_from_handle(bind->pBinds[i].memory);
2460
2461 device->ws->buffer_virtual_bind(buffer->bo,
2462 bind->pBinds[i].resourceOffset,
2463 bind->pBinds[i].size,
2464 mem ? mem->bo : NULL,
2465 bind->pBinds[i].memoryOffset);
2466 }
2467 }
2468
2469 static void
2470 radv_sparse_image_opaque_bind_memory(struct radv_device *device,
2471 const VkSparseImageOpaqueMemoryBindInfo *bind)
2472 {
2473 RADV_FROM_HANDLE(radv_image, image, bind->image);
2474
2475 for (uint32_t i = 0; i < bind->bindCount; ++i) {
2476 struct radv_device_memory *mem = NULL;
2477
2478 if (bind->pBinds[i].memory != VK_NULL_HANDLE)
2479 mem = radv_device_memory_from_handle(bind->pBinds[i].memory);
2480
2481 device->ws->buffer_virtual_bind(image->bo,
2482 bind->pBinds[i].resourceOffset,
2483 bind->pBinds[i].size,
2484 mem ? mem->bo : NULL,
2485 bind->pBinds[i].memoryOffset);
2486 }
2487 }
2488
2489 VkResult radv_QueueBindSparse(
2490 VkQueue _queue,
2491 uint32_t bindInfoCount,
2492 const VkBindSparseInfo* pBindInfo,
2493 VkFence _fence)
2494 {
2495 RADV_FROM_HANDLE(radv_fence, fence, _fence);
2496 RADV_FROM_HANDLE(radv_queue, queue, _queue);
2497 struct radeon_winsys_fence *base_fence = fence ? fence->fence : NULL;
2498 bool fence_emitted = false;
2499
2500 for (uint32_t i = 0; i < bindInfoCount; ++i) {
2501 struct radv_winsys_sem_info sem_info;
2502 for (uint32_t j = 0; j < pBindInfo[i].bufferBindCount; ++j) {
2503 radv_sparse_buffer_bind_memory(queue->device,
2504 pBindInfo[i].pBufferBinds + j);
2505 }
2506
2507 for (uint32_t j = 0; j < pBindInfo[i].imageOpaqueBindCount; ++j) {
2508 radv_sparse_image_opaque_bind_memory(queue->device,
2509 pBindInfo[i].pImageOpaqueBinds + j);
2510 }
2511
2512 VkResult result;
2513 result = radv_alloc_sem_info(&sem_info,
2514 pBindInfo[i].waitSemaphoreCount,
2515 pBindInfo[i].pWaitSemaphores,
2516 pBindInfo[i].signalSemaphoreCount,
2517 pBindInfo[i].pSignalSemaphores);
2518 if (result != VK_SUCCESS)
2519 return result;
2520
2521 if (pBindInfo[i].waitSemaphoreCount || pBindInfo[i].signalSemaphoreCount) {
2522 queue->device->ws->cs_submit(queue->hw_ctx, queue->queue_idx,
2523 &queue->device->empty_cs[queue->queue_family_index],
2524 1, NULL, NULL,
2525 &sem_info,
2526 false, base_fence);
2527 fence_emitted = true;
2528 if (fence)
2529 fence->submitted = true;
2530 }
2531
2532 radv_free_sem_info(&sem_info);
2533
2534 }
2535
2536 if (fence && !fence_emitted) {
2537 fence->signalled = true;
2538 }
2539
2540 return VK_SUCCESS;
2541 }
2542
2543 VkResult radv_CreateFence(
2544 VkDevice _device,
2545 const VkFenceCreateInfo* pCreateInfo,
2546 const VkAllocationCallbacks* pAllocator,
2547 VkFence* pFence)
2548 {
2549 RADV_FROM_HANDLE(radv_device, device, _device);
2550 struct radv_fence *fence = vk_alloc2(&device->alloc, pAllocator,
2551 sizeof(*fence), 8,
2552 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2553
2554 if (!fence)
2555 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
2556
2557 fence->submitted = false;
2558 fence->signalled = !!(pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT);
2559 fence->fence = device->ws->create_fence();
2560 if (!fence->fence) {
2561 vk_free2(&device->alloc, pAllocator, fence);
2562 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
2563 }
2564
2565 *pFence = radv_fence_to_handle(fence);
2566
2567 return VK_SUCCESS;
2568 }
2569
2570 void radv_DestroyFence(
2571 VkDevice _device,
2572 VkFence _fence,
2573 const VkAllocationCallbacks* pAllocator)
2574 {
2575 RADV_FROM_HANDLE(radv_device, device, _device);
2576 RADV_FROM_HANDLE(radv_fence, fence, _fence);
2577
2578 if (!fence)
2579 return;
2580 device->ws->destroy_fence(fence->fence);
2581 vk_free2(&device->alloc, pAllocator, fence);
2582 }
2583
2584 static uint64_t radv_get_absolute_timeout(uint64_t timeout)
2585 {
2586 uint64_t current_time;
2587 struct timespec tv;
2588
2589 clock_gettime(CLOCK_MONOTONIC, &tv);
2590 current_time = tv.tv_nsec + tv.tv_sec*1000000000ull;
2591
2592 timeout = MIN2(UINT64_MAX - current_time, timeout);
2593
2594 return current_time + timeout;
2595 }
2596
2597 VkResult radv_WaitForFences(
2598 VkDevice _device,
2599 uint32_t fenceCount,
2600 const VkFence* pFences,
2601 VkBool32 waitAll,
2602 uint64_t timeout)
2603 {
2604 RADV_FROM_HANDLE(radv_device, device, _device);
2605 timeout = radv_get_absolute_timeout(timeout);
2606
2607 if (!waitAll && fenceCount > 1) {
2608 fprintf(stderr, "radv: WaitForFences without waitAll not implemented yet\n");
2609 }
2610
2611 for (uint32_t i = 0; i < fenceCount; ++i) {
2612 RADV_FROM_HANDLE(radv_fence, fence, pFences[i]);
2613 bool expired = false;
2614
2615 if (fence->signalled)
2616 continue;
2617
2618 if (!fence->submitted)
2619 return VK_TIMEOUT;
2620
2621 expired = device->ws->fence_wait(device->ws, fence->fence, true, timeout);
2622 if (!expired)
2623 return VK_TIMEOUT;
2624
2625 fence->signalled = true;
2626 }
2627
2628 return VK_SUCCESS;
2629 }
2630
2631 VkResult radv_ResetFences(VkDevice device,
2632 uint32_t fenceCount,
2633 const VkFence *pFences)
2634 {
2635 for (unsigned i = 0; i < fenceCount; ++i) {
2636 RADV_FROM_HANDLE(radv_fence, fence, pFences[i]);
2637 fence->submitted = fence->signalled = false;
2638 }
2639
2640 return VK_SUCCESS;
2641 }
2642
2643 VkResult radv_GetFenceStatus(VkDevice _device, VkFence _fence)
2644 {
2645 RADV_FROM_HANDLE(radv_device, device, _device);
2646 RADV_FROM_HANDLE(radv_fence, fence, _fence);
2647
2648 if (fence->signalled)
2649 return VK_SUCCESS;
2650 if (!fence->submitted)
2651 return VK_NOT_READY;
2652
2653 if (!device->ws->fence_wait(device->ws, fence->fence, false, 0))
2654 return VK_NOT_READY;
2655
2656 return VK_SUCCESS;
2657 }
2658
2659
2660 // Queue semaphore functions
2661
2662 VkResult radv_CreateSemaphore(
2663 VkDevice _device,
2664 const VkSemaphoreCreateInfo* pCreateInfo,
2665 const VkAllocationCallbacks* pAllocator,
2666 VkSemaphore* pSemaphore)
2667 {
2668 RADV_FROM_HANDLE(radv_device, device, _device);
2669 const VkExportSemaphoreCreateInfoKHR *export =
2670 vk_find_struct_const(pCreateInfo->pNext, EXPORT_SEMAPHORE_CREATE_INFO_KHR);
2671 VkExternalSemaphoreHandleTypeFlagsKHR handleTypes =
2672 export ? export->handleTypes : 0;
2673
2674 struct radv_semaphore *sem = vk_alloc2(&device->alloc, pAllocator,
2675 sizeof(*sem), 8,
2676 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2677 if (!sem)
2678 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
2679
2680 sem->temp_syncobj = 0;
2681 /* create a syncobject if we are going to export this semaphore */
2682 if (handleTypes) {
2683 assert (device->physical_device->rad_info.has_syncobj);
2684 assert (handleTypes == VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR);
2685 int ret = device->ws->create_syncobj(device->ws, &sem->syncobj);
2686 if (ret) {
2687 vk_free2(&device->alloc, pAllocator, sem);
2688 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
2689 }
2690 sem->sem = NULL;
2691 } else {
2692 sem->sem = device->ws->create_sem(device->ws);
2693 if (!sem->sem) {
2694 vk_free2(&device->alloc, pAllocator, sem);
2695 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
2696 }
2697 sem->syncobj = 0;
2698 }
2699
2700 *pSemaphore = radv_semaphore_to_handle(sem);
2701 return VK_SUCCESS;
2702 }
2703
2704 void radv_DestroySemaphore(
2705 VkDevice _device,
2706 VkSemaphore _semaphore,
2707 const VkAllocationCallbacks* pAllocator)
2708 {
2709 RADV_FROM_HANDLE(radv_device, device, _device);
2710 RADV_FROM_HANDLE(radv_semaphore, sem, _semaphore);
2711 if (!_semaphore)
2712 return;
2713
2714 if (sem->syncobj)
2715 device->ws->destroy_syncobj(device->ws, sem->syncobj);
2716 else
2717 device->ws->destroy_sem(sem->sem);
2718 vk_free2(&device->alloc, pAllocator, sem);
2719 }
2720
2721 VkResult radv_CreateEvent(
2722 VkDevice _device,
2723 const VkEventCreateInfo* pCreateInfo,
2724 const VkAllocationCallbacks* pAllocator,
2725 VkEvent* pEvent)
2726 {
2727 RADV_FROM_HANDLE(radv_device, device, _device);
2728 struct radv_event *event = vk_alloc2(&device->alloc, pAllocator,
2729 sizeof(*event), 8,
2730 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2731
2732 if (!event)
2733 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
2734
2735 event->bo = device->ws->buffer_create(device->ws, 8, 8,
2736 RADEON_DOMAIN_GTT,
2737 RADEON_FLAG_VA_UNCACHED | RADEON_FLAG_CPU_ACCESS | RADEON_FLAG_NO_INTERPROCESS_SHARING);
2738 if (!event->bo) {
2739 vk_free2(&device->alloc, pAllocator, event);
2740 return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
2741 }
2742
2743 event->map = (uint64_t*)device->ws->buffer_map(event->bo);
2744
2745 *pEvent = radv_event_to_handle(event);
2746
2747 return VK_SUCCESS;
2748 }
2749
2750 void radv_DestroyEvent(
2751 VkDevice _device,
2752 VkEvent _event,
2753 const VkAllocationCallbacks* pAllocator)
2754 {
2755 RADV_FROM_HANDLE(radv_device, device, _device);
2756 RADV_FROM_HANDLE(radv_event, event, _event);
2757
2758 if (!event)
2759 return;
2760 device->ws->buffer_destroy(event->bo);
2761 vk_free2(&device->alloc, pAllocator, event);
2762 }
2763
2764 VkResult radv_GetEventStatus(
2765 VkDevice _device,
2766 VkEvent _event)
2767 {
2768 RADV_FROM_HANDLE(radv_event, event, _event);
2769
2770 if (*event->map == 1)
2771 return VK_EVENT_SET;
2772 return VK_EVENT_RESET;
2773 }
2774
2775 VkResult radv_SetEvent(
2776 VkDevice _device,
2777 VkEvent _event)
2778 {
2779 RADV_FROM_HANDLE(radv_event, event, _event);
2780 *event->map = 1;
2781
2782 return VK_SUCCESS;
2783 }
2784
2785 VkResult radv_ResetEvent(
2786 VkDevice _device,
2787 VkEvent _event)
2788 {
2789 RADV_FROM_HANDLE(radv_event, event, _event);
2790 *event->map = 0;
2791
2792 return VK_SUCCESS;
2793 }
2794
2795 VkResult radv_CreateBuffer(
2796 VkDevice _device,
2797 const VkBufferCreateInfo* pCreateInfo,
2798 const VkAllocationCallbacks* pAllocator,
2799 VkBuffer* pBuffer)
2800 {
2801 RADV_FROM_HANDLE(radv_device, device, _device);
2802 struct radv_buffer *buffer;
2803
2804 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO);
2805
2806 buffer = vk_alloc2(&device->alloc, pAllocator, sizeof(*buffer), 8,
2807 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2808 if (buffer == NULL)
2809 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
2810
2811 buffer->size = pCreateInfo->size;
2812 buffer->usage = pCreateInfo->usage;
2813 buffer->bo = NULL;
2814 buffer->offset = 0;
2815 buffer->flags = pCreateInfo->flags;
2816
2817 buffer->shareable = vk_find_struct_const(pCreateInfo->pNext,
2818 EXTERNAL_MEMORY_BUFFER_CREATE_INFO_KHR) != NULL;
2819
2820 if (pCreateInfo->flags & VK_BUFFER_CREATE_SPARSE_BINDING_BIT) {
2821 buffer->bo = device->ws->buffer_create(device->ws,
2822 align64(buffer->size, 4096),
2823 4096, 0, RADEON_FLAG_VIRTUAL);
2824 if (!buffer->bo) {
2825 vk_free2(&device->alloc, pAllocator, buffer);
2826 return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
2827 }
2828 }
2829
2830 *pBuffer = radv_buffer_to_handle(buffer);
2831
2832 return VK_SUCCESS;
2833 }
2834
2835 void radv_DestroyBuffer(
2836 VkDevice _device,
2837 VkBuffer _buffer,
2838 const VkAllocationCallbacks* pAllocator)
2839 {
2840 RADV_FROM_HANDLE(radv_device, device, _device);
2841 RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
2842
2843 if (!buffer)
2844 return;
2845
2846 if (buffer->flags & VK_BUFFER_CREATE_SPARSE_BINDING_BIT)
2847 device->ws->buffer_destroy(buffer->bo);
2848
2849 vk_free2(&device->alloc, pAllocator, buffer);
2850 }
2851
2852 static inline unsigned
2853 si_tile_mode_index(const struct radv_image *image, unsigned level, bool stencil)
2854 {
2855 if (stencil)
2856 return image->surface.u.legacy.stencil_tiling_index[level];
2857 else
2858 return image->surface.u.legacy.tiling_index[level];
2859 }
2860
2861 static uint32_t radv_surface_layer_count(struct radv_image_view *iview)
2862 {
2863 return iview->type == VK_IMAGE_VIEW_TYPE_3D ? iview->extent.depth : iview->layer_count;
2864 }
2865
2866 static void
2867 radv_initialise_color_surface(struct radv_device *device,
2868 struct radv_color_buffer_info *cb,
2869 struct radv_image_view *iview)
2870 {
2871 const struct vk_format_description *desc;
2872 unsigned ntype, format, swap, endian;
2873 unsigned blend_clamp = 0, blend_bypass = 0;
2874 uint64_t va;
2875 const struct radeon_surf *surf = &iview->image->surface;
2876
2877 desc = vk_format_description(iview->vk_format);
2878
2879 memset(cb, 0, sizeof(*cb));
2880
2881 /* Intensity is implemented as Red, so treat it that way. */
2882 cb->cb_color_attrib = S_028C74_FORCE_DST_ALPHA_1(desc->swizzle[3] == VK_SWIZZLE_1);
2883
2884 va = radv_buffer_get_va(iview->bo) + iview->image->offset;
2885
2886 cb->cb_color_base = va >> 8;
2887
2888 if (device->physical_device->rad_info.chip_class >= GFX9) {
2889 struct gfx9_surf_meta_flags meta;
2890 if (iview->image->dcc_offset)
2891 meta = iview->image->surface.u.gfx9.dcc;
2892 else
2893 meta = iview->image->surface.u.gfx9.cmask;
2894
2895 cb->cb_color_attrib |= S_028C74_COLOR_SW_MODE(iview->image->surface.u.gfx9.surf.swizzle_mode) |
2896 S_028C74_FMASK_SW_MODE(iview->image->surface.u.gfx9.fmask.swizzle_mode) |
2897 S_028C74_RB_ALIGNED(meta.rb_aligned) |
2898 S_028C74_PIPE_ALIGNED(meta.pipe_aligned);
2899
2900 cb->cb_color_base += iview->image->surface.u.gfx9.surf_offset >> 8;
2901 cb->cb_color_base |= iview->image->surface.tile_swizzle;
2902 } else {
2903 const struct legacy_surf_level *level_info = &surf->u.legacy.level[iview->base_mip];
2904 unsigned pitch_tile_max, slice_tile_max, tile_mode_index;
2905
2906 cb->cb_color_base += level_info->offset >> 8;
2907 if (level_info->mode == RADEON_SURF_MODE_2D)
2908 cb->cb_color_base |= iview->image->surface.tile_swizzle;
2909
2910 pitch_tile_max = level_info->nblk_x / 8 - 1;
2911 slice_tile_max = (level_info->nblk_x * level_info->nblk_y) / 64 - 1;
2912 tile_mode_index = si_tile_mode_index(iview->image, iview->base_mip, false);
2913
2914 cb->cb_color_pitch = S_028C64_TILE_MAX(pitch_tile_max);
2915 cb->cb_color_slice = S_028C68_TILE_MAX(slice_tile_max);
2916 cb->cb_color_cmask_slice = iview->image->cmask.slice_tile_max;
2917
2918 cb->cb_color_attrib |= S_028C74_TILE_MODE_INDEX(tile_mode_index);
2919 cb->micro_tile_mode = iview->image->surface.micro_tile_mode;
2920
2921 if (iview->image->fmask.size) {
2922 if (device->physical_device->rad_info.chip_class >= CIK)
2923 cb->cb_color_pitch |= S_028C64_FMASK_TILE_MAX(iview->image->fmask.pitch_in_pixels / 8 - 1);
2924 cb->cb_color_attrib |= S_028C74_FMASK_TILE_MODE_INDEX(iview->image->fmask.tile_mode_index);
2925 cb->cb_color_fmask_slice = S_028C88_TILE_MAX(iview->image->fmask.slice_tile_max);
2926 } else {
2927 /* This must be set for fast clear to work without FMASK. */
2928 if (device->physical_device->rad_info.chip_class >= CIK)
2929 cb->cb_color_pitch |= S_028C64_FMASK_TILE_MAX(pitch_tile_max);
2930 cb->cb_color_attrib |= S_028C74_FMASK_TILE_MODE_INDEX(tile_mode_index);
2931 cb->cb_color_fmask_slice = S_028C88_TILE_MAX(slice_tile_max);
2932 }
2933 }
2934
2935 /* CMASK variables */
2936 va = radv_buffer_get_va(iview->bo) + iview->image->offset;
2937 va += iview->image->cmask.offset;
2938 cb->cb_color_cmask = va >> 8;
2939
2940 va = radv_buffer_get_va(iview->bo) + iview->image->offset;
2941 va += iview->image->dcc_offset;
2942 cb->cb_dcc_base = va >> 8;
2943 cb->cb_dcc_base |= iview->image->surface.tile_swizzle;
2944
2945 uint32_t max_slice = radv_surface_layer_count(iview);
2946 cb->cb_color_view = S_028C6C_SLICE_START(iview->base_layer) |
2947 S_028C6C_SLICE_MAX(iview->base_layer + max_slice - 1);
2948
2949 if (iview->image->info.samples > 1) {
2950 unsigned log_samples = util_logbase2(iview->image->info.samples);
2951
2952 cb->cb_color_attrib |= S_028C74_NUM_SAMPLES(log_samples) |
2953 S_028C74_NUM_FRAGMENTS(log_samples);
2954 }
2955
2956 if (iview->image->fmask.size) {
2957 va = radv_buffer_get_va(iview->bo) + iview->image->offset + iview->image->fmask.offset;
2958 cb->cb_color_fmask = va >> 8;
2959 cb->cb_color_fmask |= iview->image->fmask.tile_swizzle;
2960 } else {
2961 cb->cb_color_fmask = cb->cb_color_base;
2962 }
2963
2964 ntype = radv_translate_color_numformat(iview->vk_format,
2965 desc,
2966 vk_format_get_first_non_void_channel(iview->vk_format));
2967 format = radv_translate_colorformat(iview->vk_format);
2968 if (format == V_028C70_COLOR_INVALID || ntype == ~0u)
2969 radv_finishme("Illegal color\n");
2970 swap = radv_translate_colorswap(iview->vk_format, FALSE);
2971 endian = radv_colorformat_endian_swap(format);
2972
2973 /* blend clamp should be set for all NORM/SRGB types */
2974 if (ntype == V_028C70_NUMBER_UNORM ||
2975 ntype == V_028C70_NUMBER_SNORM ||
2976 ntype == V_028C70_NUMBER_SRGB)
2977 blend_clamp = 1;
2978
2979 /* set blend bypass according to docs if SINT/UINT or
2980 8/24 COLOR variants */
2981 if (ntype == V_028C70_NUMBER_UINT || ntype == V_028C70_NUMBER_SINT ||
2982 format == V_028C70_COLOR_8_24 || format == V_028C70_COLOR_24_8 ||
2983 format == V_028C70_COLOR_X24_8_32_FLOAT) {
2984 blend_clamp = 0;
2985 blend_bypass = 1;
2986 }
2987 #if 0
2988 if ((ntype == V_028C70_NUMBER_UINT || ntype == V_028C70_NUMBER_SINT) &&
2989 (format == V_028C70_COLOR_8 ||
2990 format == V_028C70_COLOR_8_8 ||
2991 format == V_028C70_COLOR_8_8_8_8))
2992 ->color_is_int8 = true;
2993 #endif
2994 cb->cb_color_info = S_028C70_FORMAT(format) |
2995 S_028C70_COMP_SWAP(swap) |
2996 S_028C70_BLEND_CLAMP(blend_clamp) |
2997 S_028C70_BLEND_BYPASS(blend_bypass) |
2998 S_028C70_SIMPLE_FLOAT(1) |
2999 S_028C70_ROUND_MODE(ntype != V_028C70_NUMBER_UNORM &&
3000 ntype != V_028C70_NUMBER_SNORM &&
3001 ntype != V_028C70_NUMBER_SRGB &&
3002 format != V_028C70_COLOR_8_24 &&
3003 format != V_028C70_COLOR_24_8) |
3004 S_028C70_NUMBER_TYPE(ntype) |
3005 S_028C70_ENDIAN(endian);
3006 if ((iview->image->info.samples > 1) && iview->image->fmask.size) {
3007 cb->cb_color_info |= S_028C70_COMPRESSION(1);
3008 if (device->physical_device->rad_info.chip_class == SI) {
3009 unsigned fmask_bankh = util_logbase2(iview->image->fmask.bank_height);
3010 cb->cb_color_attrib |= S_028C74_FMASK_BANK_HEIGHT(fmask_bankh);
3011 }
3012 }
3013
3014 if (iview->image->cmask.size &&
3015 !(device->instance->debug_flags & RADV_DEBUG_NO_FAST_CLEARS))
3016 cb->cb_color_info |= S_028C70_FAST_CLEAR(1);
3017
3018 if (radv_vi_dcc_enabled(iview->image, iview->base_mip))
3019 cb->cb_color_info |= S_028C70_DCC_ENABLE(1);
3020
3021 if (device->physical_device->rad_info.chip_class >= VI) {
3022 unsigned max_uncompressed_block_size = 2;
3023 if (iview->image->info.samples > 1) {
3024 if (iview->image->surface.bpe == 1)
3025 max_uncompressed_block_size = 0;
3026 else if (iview->image->surface.bpe == 2)
3027 max_uncompressed_block_size = 1;
3028 }
3029
3030 cb->cb_dcc_control = S_028C78_MAX_UNCOMPRESSED_BLOCK_SIZE(max_uncompressed_block_size) |
3031 S_028C78_INDEPENDENT_64B_BLOCKS(1);
3032 }
3033
3034 /* This must be set for fast clear to work without FMASK. */
3035 if (!iview->image->fmask.size &&
3036 device->physical_device->rad_info.chip_class == SI) {
3037 unsigned bankh = util_logbase2(iview->image->surface.u.legacy.bankh);
3038 cb->cb_color_attrib |= S_028C74_FMASK_BANK_HEIGHT(bankh);
3039 }
3040
3041 if (device->physical_device->rad_info.chip_class >= GFX9) {
3042 unsigned mip0_depth = iview->image->type == VK_IMAGE_TYPE_3D ?
3043 (iview->extent.depth - 1) : (iview->image->info.array_size - 1);
3044
3045 cb->cb_color_view |= S_028C6C_MIP_LEVEL(iview->base_mip);
3046 cb->cb_color_attrib |= S_028C74_MIP0_DEPTH(mip0_depth) |
3047 S_028C74_RESOURCE_TYPE(iview->image->surface.u.gfx9.resource_type);
3048 cb->cb_color_attrib2 = S_028C68_MIP0_WIDTH(iview->extent.width - 1) |
3049 S_028C68_MIP0_HEIGHT(iview->extent.height - 1) |
3050 S_028C68_MAX_MIP(iview->image->info.levels - 1);
3051
3052 cb->gfx9_epitch = S_0287A0_EPITCH(iview->image->surface.u.gfx9.surf.epitch);
3053
3054 }
3055 }
3056
3057 static void
3058 radv_initialise_ds_surface(struct radv_device *device,
3059 struct radv_ds_buffer_info *ds,
3060 struct radv_image_view *iview)
3061 {
3062 unsigned level = iview->base_mip;
3063 unsigned format, stencil_format;
3064 uint64_t va, s_offs, z_offs;
3065 bool stencil_only = false;
3066 memset(ds, 0, sizeof(*ds));
3067 switch (iview->image->vk_format) {
3068 case VK_FORMAT_D24_UNORM_S8_UINT:
3069 case VK_FORMAT_X8_D24_UNORM_PACK32:
3070 ds->pa_su_poly_offset_db_fmt_cntl = S_028B78_POLY_OFFSET_NEG_NUM_DB_BITS(-24);
3071 ds->offset_scale = 2.0f;
3072 break;
3073 case VK_FORMAT_D16_UNORM:
3074 case VK_FORMAT_D16_UNORM_S8_UINT:
3075 ds->pa_su_poly_offset_db_fmt_cntl = S_028B78_POLY_OFFSET_NEG_NUM_DB_BITS(-16);
3076 ds->offset_scale = 4.0f;
3077 break;
3078 case VK_FORMAT_D32_SFLOAT:
3079 case VK_FORMAT_D32_SFLOAT_S8_UINT:
3080 ds->pa_su_poly_offset_db_fmt_cntl = S_028B78_POLY_OFFSET_NEG_NUM_DB_BITS(-23) |
3081 S_028B78_POLY_OFFSET_DB_IS_FLOAT_FMT(1);
3082 ds->offset_scale = 1.0f;
3083 break;
3084 case VK_FORMAT_S8_UINT:
3085 stencil_only = true;
3086 break;
3087 default:
3088 break;
3089 }
3090
3091 format = radv_translate_dbformat(iview->image->vk_format);
3092 stencil_format = iview->image->surface.has_stencil ?
3093 V_028044_STENCIL_8 : V_028044_STENCIL_INVALID;
3094
3095 uint32_t max_slice = radv_surface_layer_count(iview);
3096 ds->db_depth_view = S_028008_SLICE_START(iview->base_layer) |
3097 S_028008_SLICE_MAX(iview->base_layer + max_slice - 1);
3098
3099 ds->db_htile_data_base = 0;
3100 ds->db_htile_surface = 0;
3101
3102 va = radv_buffer_get_va(iview->bo) + iview->image->offset;
3103 s_offs = z_offs = va;
3104
3105 if (device->physical_device->rad_info.chip_class >= GFX9) {
3106 assert(iview->image->surface.u.gfx9.surf_offset == 0);
3107 s_offs += iview->image->surface.u.gfx9.stencil_offset;
3108
3109 ds->db_z_info = S_028038_FORMAT(format) |
3110 S_028038_NUM_SAMPLES(util_logbase2(iview->image->info.samples)) |
3111 S_028038_SW_MODE(iview->image->surface.u.gfx9.surf.swizzle_mode) |
3112 S_028038_MAXMIP(iview->image->info.levels - 1);
3113 ds->db_stencil_info = S_02803C_FORMAT(stencil_format) |
3114 S_02803C_SW_MODE(iview->image->surface.u.gfx9.stencil.swizzle_mode);
3115
3116 ds->db_z_info2 = S_028068_EPITCH(iview->image->surface.u.gfx9.surf.epitch);
3117 ds->db_stencil_info2 = S_02806C_EPITCH(iview->image->surface.u.gfx9.stencil.epitch);
3118 ds->db_depth_view |= S_028008_MIPID(level);
3119
3120 ds->db_depth_size = S_02801C_X_MAX(iview->image->info.width - 1) |
3121 S_02801C_Y_MAX(iview->image->info.height - 1);
3122
3123 if (radv_htile_enabled(iview->image, level)) {
3124 ds->db_z_info |= S_028038_TILE_SURFACE_ENABLE(1);
3125
3126 if (iview->image->tc_compatible_htile) {
3127 unsigned max_zplanes = 4;
3128
3129 if (iview->vk_format == VK_FORMAT_D16_UNORM &&
3130 iview->image->info.samples > 1)
3131 max_zplanes = 2;
3132
3133 ds->db_z_info |= S_028038_DECOMPRESS_ON_N_ZPLANES(max_zplanes + 1) |
3134 S_028038_ITERATE_FLUSH(1);
3135 ds->db_stencil_info |= S_02803C_ITERATE_FLUSH(1);
3136 }
3137
3138 if (!iview->image->surface.has_stencil)
3139 /* Use all of the htile_buffer for depth if there's no stencil. */
3140 ds->db_stencil_info |= S_02803C_TILE_STENCIL_DISABLE(1);
3141 va = radv_buffer_get_va(iview->bo) + iview->image->offset +
3142 iview->image->htile_offset;
3143 ds->db_htile_data_base = va >> 8;
3144 ds->db_htile_surface = S_028ABC_FULL_CACHE(1) |
3145 S_028ABC_PIPE_ALIGNED(iview->image->surface.u.gfx9.htile.pipe_aligned) |
3146 S_028ABC_RB_ALIGNED(iview->image->surface.u.gfx9.htile.rb_aligned);
3147 }
3148 } else {
3149 const struct legacy_surf_level *level_info = &iview->image->surface.u.legacy.level[level];
3150
3151 if (stencil_only)
3152 level_info = &iview->image->surface.u.legacy.stencil_level[level];
3153
3154 z_offs += iview->image->surface.u.legacy.level[level].offset;
3155 s_offs += iview->image->surface.u.legacy.stencil_level[level].offset;
3156
3157 ds->db_depth_info = S_02803C_ADDR5_SWIZZLE_MASK(!iview->image->tc_compatible_htile);
3158 ds->db_z_info = S_028040_FORMAT(format) | S_028040_ZRANGE_PRECISION(1);
3159 ds->db_stencil_info = S_028044_FORMAT(stencil_format);
3160
3161 if (iview->image->info.samples > 1)
3162 ds->db_z_info |= S_028040_NUM_SAMPLES(util_logbase2(iview->image->info.samples));
3163
3164 if (device->physical_device->rad_info.chip_class >= CIK) {
3165 struct radeon_info *info = &device->physical_device->rad_info;
3166 unsigned tiling_index = iview->image->surface.u.legacy.tiling_index[level];
3167 unsigned stencil_index = iview->image->surface.u.legacy.stencil_tiling_index[level];
3168 unsigned macro_index = iview->image->surface.u.legacy.macro_tile_index;
3169 unsigned tile_mode = info->si_tile_mode_array[tiling_index];
3170 unsigned stencil_tile_mode = info->si_tile_mode_array[stencil_index];
3171 unsigned macro_mode = info->cik_macrotile_mode_array[macro_index];
3172
3173 if (stencil_only)
3174 tile_mode = stencil_tile_mode;
3175
3176 ds->db_depth_info |=
3177 S_02803C_ARRAY_MODE(G_009910_ARRAY_MODE(tile_mode)) |
3178 S_02803C_PIPE_CONFIG(G_009910_PIPE_CONFIG(tile_mode)) |
3179 S_02803C_BANK_WIDTH(G_009990_BANK_WIDTH(macro_mode)) |
3180 S_02803C_BANK_HEIGHT(G_009990_BANK_HEIGHT(macro_mode)) |
3181 S_02803C_MACRO_TILE_ASPECT(G_009990_MACRO_TILE_ASPECT(macro_mode)) |
3182 S_02803C_NUM_BANKS(G_009990_NUM_BANKS(macro_mode));
3183 ds->db_z_info |= S_028040_TILE_SPLIT(G_009910_TILE_SPLIT(tile_mode));
3184 ds->db_stencil_info |= S_028044_TILE_SPLIT(G_009910_TILE_SPLIT(stencil_tile_mode));
3185 } else {
3186 unsigned tile_mode_index = si_tile_mode_index(iview->image, level, false);
3187 ds->db_z_info |= S_028040_TILE_MODE_INDEX(tile_mode_index);
3188 tile_mode_index = si_tile_mode_index(iview->image, level, true);
3189 ds->db_stencil_info |= S_028044_TILE_MODE_INDEX(tile_mode_index);
3190 if (stencil_only)
3191 ds->db_z_info |= S_028040_TILE_MODE_INDEX(tile_mode_index);
3192 }
3193
3194 ds->db_depth_size = S_028058_PITCH_TILE_MAX((level_info->nblk_x / 8) - 1) |
3195 S_028058_HEIGHT_TILE_MAX((level_info->nblk_y / 8) - 1);
3196 ds->db_depth_slice = S_02805C_SLICE_TILE_MAX((level_info->nblk_x * level_info->nblk_y) / 64 - 1);
3197
3198 if (radv_htile_enabled(iview->image, level)) {
3199 ds->db_z_info |= S_028040_TILE_SURFACE_ENABLE(1);
3200
3201 if (!iview->image->surface.has_stencil &&
3202 !iview->image->tc_compatible_htile)
3203 /* Use all of the htile_buffer for depth if there's no stencil. */
3204 ds->db_stencil_info |= S_028044_TILE_STENCIL_DISABLE(1);
3205
3206 va = radv_buffer_get_va(iview->bo) + iview->image->offset +
3207 iview->image->htile_offset;
3208 ds->db_htile_data_base = va >> 8;
3209 ds->db_htile_surface = S_028ABC_FULL_CACHE(1);
3210
3211 if (iview->image->tc_compatible_htile) {
3212 ds->db_htile_surface |= S_028ABC_TC_COMPATIBLE(1);
3213
3214 if (iview->image->info.samples <= 1)
3215 ds->db_z_info |= S_028040_DECOMPRESS_ON_N_ZPLANES(5);
3216 else if (iview->image->info.samples <= 4)
3217 ds->db_z_info |= S_028040_DECOMPRESS_ON_N_ZPLANES(3);
3218 else
3219 ds->db_z_info|= S_028040_DECOMPRESS_ON_N_ZPLANES(2);
3220 }
3221 }
3222 }
3223
3224 ds->db_z_read_base = ds->db_z_write_base = z_offs >> 8;
3225 ds->db_stencil_read_base = ds->db_stencil_write_base = s_offs >> 8;
3226 }
3227
3228 VkResult radv_CreateFramebuffer(
3229 VkDevice _device,
3230 const VkFramebufferCreateInfo* pCreateInfo,
3231 const VkAllocationCallbacks* pAllocator,
3232 VkFramebuffer* pFramebuffer)
3233 {
3234 RADV_FROM_HANDLE(radv_device, device, _device);
3235 struct radv_framebuffer *framebuffer;
3236
3237 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO);
3238
3239 size_t size = sizeof(*framebuffer) +
3240 sizeof(struct radv_attachment_info) * pCreateInfo->attachmentCount;
3241 framebuffer = vk_alloc2(&device->alloc, pAllocator, size, 8,
3242 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
3243 if (framebuffer == NULL)
3244 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
3245
3246 framebuffer->attachment_count = pCreateInfo->attachmentCount;
3247 framebuffer->width = pCreateInfo->width;
3248 framebuffer->height = pCreateInfo->height;
3249 framebuffer->layers = pCreateInfo->layers;
3250 for (uint32_t i = 0; i < pCreateInfo->attachmentCount; i++) {
3251 VkImageView _iview = pCreateInfo->pAttachments[i];
3252 struct radv_image_view *iview = radv_image_view_from_handle(_iview);
3253 framebuffer->attachments[i].attachment = iview;
3254 if (iview->aspect_mask & VK_IMAGE_ASPECT_COLOR_BIT) {
3255 radv_initialise_color_surface(device, &framebuffer->attachments[i].cb, iview);
3256 } else if (iview->aspect_mask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) {
3257 radv_initialise_ds_surface(device, &framebuffer->attachments[i].ds, iview);
3258 }
3259 framebuffer->width = MIN2(framebuffer->width, iview->extent.width);
3260 framebuffer->height = MIN2(framebuffer->height, iview->extent.height);
3261 framebuffer->layers = MIN2(framebuffer->layers, radv_surface_layer_count(iview));
3262 }
3263
3264 *pFramebuffer = radv_framebuffer_to_handle(framebuffer);
3265 return VK_SUCCESS;
3266 }
3267
3268 void radv_DestroyFramebuffer(
3269 VkDevice _device,
3270 VkFramebuffer _fb,
3271 const VkAllocationCallbacks* pAllocator)
3272 {
3273 RADV_FROM_HANDLE(radv_device, device, _device);
3274 RADV_FROM_HANDLE(radv_framebuffer, fb, _fb);
3275
3276 if (!fb)
3277 return;
3278 vk_free2(&device->alloc, pAllocator, fb);
3279 }
3280
3281 static unsigned radv_tex_wrap(VkSamplerAddressMode address_mode)
3282 {
3283 switch (address_mode) {
3284 case VK_SAMPLER_ADDRESS_MODE_REPEAT:
3285 return V_008F30_SQ_TEX_WRAP;
3286 case VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT:
3287 return V_008F30_SQ_TEX_MIRROR;
3288 case VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE:
3289 return V_008F30_SQ_TEX_CLAMP_LAST_TEXEL;
3290 case VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER:
3291 return V_008F30_SQ_TEX_CLAMP_BORDER;
3292 case VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE:
3293 return V_008F30_SQ_TEX_MIRROR_ONCE_LAST_TEXEL;
3294 default:
3295 unreachable("illegal tex wrap mode");
3296 break;
3297 }
3298 }
3299
3300 static unsigned
3301 radv_tex_compare(VkCompareOp op)
3302 {
3303 switch (op) {
3304 case VK_COMPARE_OP_NEVER:
3305 return V_008F30_SQ_TEX_DEPTH_COMPARE_NEVER;
3306 case VK_COMPARE_OP_LESS:
3307 return V_008F30_SQ_TEX_DEPTH_COMPARE_LESS;
3308 case VK_COMPARE_OP_EQUAL:
3309 return V_008F30_SQ_TEX_DEPTH_COMPARE_EQUAL;
3310 case VK_COMPARE_OP_LESS_OR_EQUAL:
3311 return V_008F30_SQ_TEX_DEPTH_COMPARE_LESSEQUAL;
3312 case VK_COMPARE_OP_GREATER:
3313 return V_008F30_SQ_TEX_DEPTH_COMPARE_GREATER;
3314 case VK_COMPARE_OP_NOT_EQUAL:
3315 return V_008F30_SQ_TEX_DEPTH_COMPARE_NOTEQUAL;
3316 case VK_COMPARE_OP_GREATER_OR_EQUAL:
3317 return V_008F30_SQ_TEX_DEPTH_COMPARE_GREATEREQUAL;
3318 case VK_COMPARE_OP_ALWAYS:
3319 return V_008F30_SQ_TEX_DEPTH_COMPARE_ALWAYS;
3320 default:
3321 unreachable("illegal compare mode");
3322 break;
3323 }
3324 }
3325
3326 static unsigned
3327 radv_tex_filter(VkFilter filter, unsigned max_ansio)
3328 {
3329 switch (filter) {
3330 case VK_FILTER_NEAREST:
3331 return (max_ansio > 1 ? V_008F38_SQ_TEX_XY_FILTER_ANISO_POINT :
3332 V_008F38_SQ_TEX_XY_FILTER_POINT);
3333 case VK_FILTER_LINEAR:
3334 return (max_ansio > 1 ? V_008F38_SQ_TEX_XY_FILTER_ANISO_BILINEAR :
3335 V_008F38_SQ_TEX_XY_FILTER_BILINEAR);
3336 case VK_FILTER_CUBIC_IMG:
3337 default:
3338 fprintf(stderr, "illegal texture filter");
3339 return 0;
3340 }
3341 }
3342
3343 static unsigned
3344 radv_tex_mipfilter(VkSamplerMipmapMode mode)
3345 {
3346 switch (mode) {
3347 case VK_SAMPLER_MIPMAP_MODE_NEAREST:
3348 return V_008F38_SQ_TEX_Z_FILTER_POINT;
3349 case VK_SAMPLER_MIPMAP_MODE_LINEAR:
3350 return V_008F38_SQ_TEX_Z_FILTER_LINEAR;
3351 default:
3352 return V_008F38_SQ_TEX_Z_FILTER_NONE;
3353 }
3354 }
3355
3356 static unsigned
3357 radv_tex_bordercolor(VkBorderColor bcolor)
3358 {
3359 switch (bcolor) {
3360 case VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK:
3361 case VK_BORDER_COLOR_INT_TRANSPARENT_BLACK:
3362 return V_008F3C_SQ_TEX_BORDER_COLOR_TRANS_BLACK;
3363 case VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK:
3364 case VK_BORDER_COLOR_INT_OPAQUE_BLACK:
3365 return V_008F3C_SQ_TEX_BORDER_COLOR_OPAQUE_BLACK;
3366 case VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE:
3367 case VK_BORDER_COLOR_INT_OPAQUE_WHITE:
3368 return V_008F3C_SQ_TEX_BORDER_COLOR_OPAQUE_WHITE;
3369 default:
3370 break;
3371 }
3372 return 0;
3373 }
3374
3375 static unsigned
3376 radv_tex_aniso_filter(unsigned filter)
3377 {
3378 if (filter < 2)
3379 return 0;
3380 if (filter < 4)
3381 return 1;
3382 if (filter < 8)
3383 return 2;
3384 if (filter < 16)
3385 return 3;
3386 return 4;
3387 }
3388
3389 static void
3390 radv_init_sampler(struct radv_device *device,
3391 struct radv_sampler *sampler,
3392 const VkSamplerCreateInfo *pCreateInfo)
3393 {
3394 uint32_t max_aniso = pCreateInfo->anisotropyEnable && pCreateInfo->maxAnisotropy > 1.0 ?
3395 (uint32_t) pCreateInfo->maxAnisotropy : 0;
3396 uint32_t max_aniso_ratio = radv_tex_aniso_filter(max_aniso);
3397 bool is_vi = (device->physical_device->rad_info.chip_class >= VI);
3398
3399 sampler->state[0] = (S_008F30_CLAMP_X(radv_tex_wrap(pCreateInfo->addressModeU)) |
3400 S_008F30_CLAMP_Y(radv_tex_wrap(pCreateInfo->addressModeV)) |
3401 S_008F30_CLAMP_Z(radv_tex_wrap(pCreateInfo->addressModeW)) |
3402 S_008F30_MAX_ANISO_RATIO(max_aniso_ratio) |
3403 S_008F30_DEPTH_COMPARE_FUNC(radv_tex_compare(pCreateInfo->compareOp)) |
3404 S_008F30_FORCE_UNNORMALIZED(pCreateInfo->unnormalizedCoordinates ? 1 : 0) |
3405 S_008F30_ANISO_THRESHOLD(max_aniso_ratio >> 1) |
3406 S_008F30_ANISO_BIAS(max_aniso_ratio) |
3407 S_008F30_DISABLE_CUBE_WRAP(0) |
3408 S_008F30_COMPAT_MODE(is_vi));
3409 sampler->state[1] = (S_008F34_MIN_LOD(S_FIXED(CLAMP(pCreateInfo->minLod, 0, 15), 8)) |
3410 S_008F34_MAX_LOD(S_FIXED(CLAMP(pCreateInfo->maxLod, 0, 15), 8)) |
3411 S_008F34_PERF_MIP(max_aniso_ratio ? max_aniso_ratio + 6 : 0));
3412 sampler->state[2] = (S_008F38_LOD_BIAS(S_FIXED(CLAMP(pCreateInfo->mipLodBias, -16, 16), 8)) |
3413 S_008F38_XY_MAG_FILTER(radv_tex_filter(pCreateInfo->magFilter, max_aniso)) |
3414 S_008F38_XY_MIN_FILTER(radv_tex_filter(pCreateInfo->minFilter, max_aniso)) |
3415 S_008F38_MIP_FILTER(radv_tex_mipfilter(pCreateInfo->mipmapMode)) |
3416 S_008F38_MIP_POINT_PRECLAMP(0) |
3417 S_008F38_DISABLE_LSB_CEIL(1) |
3418 S_008F38_FILTER_PREC_FIX(1) |
3419 S_008F38_ANISO_OVERRIDE(is_vi));
3420 sampler->state[3] = (S_008F3C_BORDER_COLOR_PTR(0) |
3421 S_008F3C_BORDER_COLOR_TYPE(radv_tex_bordercolor(pCreateInfo->borderColor)));
3422 }
3423
3424 VkResult radv_CreateSampler(
3425 VkDevice _device,
3426 const VkSamplerCreateInfo* pCreateInfo,
3427 const VkAllocationCallbacks* pAllocator,
3428 VkSampler* pSampler)
3429 {
3430 RADV_FROM_HANDLE(radv_device, device, _device);
3431 struct radv_sampler *sampler;
3432
3433 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO);
3434
3435 sampler = vk_alloc2(&device->alloc, pAllocator, sizeof(*sampler), 8,
3436 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
3437 if (!sampler)
3438 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
3439
3440 radv_init_sampler(device, sampler, pCreateInfo);
3441 *pSampler = radv_sampler_to_handle(sampler);
3442
3443 return VK_SUCCESS;
3444 }
3445
3446 void radv_DestroySampler(
3447 VkDevice _device,
3448 VkSampler _sampler,
3449 const VkAllocationCallbacks* pAllocator)
3450 {
3451 RADV_FROM_HANDLE(radv_device, device, _device);
3452 RADV_FROM_HANDLE(radv_sampler, sampler, _sampler);
3453
3454 if (!sampler)
3455 return;
3456 vk_free2(&device->alloc, pAllocator, sampler);
3457 }
3458
3459 /* vk_icd.h does not declare this function, so we declare it here to
3460 * suppress Wmissing-prototypes.
3461 */
3462 PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
3463 vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion);
3464
3465 PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
3466 vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion)
3467 {
3468 /* For the full details on loader interface versioning, see
3469 * <https://github.com/KhronosGroup/Vulkan-LoaderAndValidationLayers/blob/master/loader/LoaderAndLayerInterface.md>.
3470 * What follows is a condensed summary, to help you navigate the large and
3471 * confusing official doc.
3472 *
3473 * - Loader interface v0 is incompatible with later versions. We don't
3474 * support it.
3475 *
3476 * - In loader interface v1:
3477 * - The first ICD entrypoint called by the loader is
3478 * vk_icdGetInstanceProcAddr(). The ICD must statically expose this
3479 * entrypoint.
3480 * - The ICD must statically expose no other Vulkan symbol unless it is
3481 * linked with -Bsymbolic.
3482 * - Each dispatchable Vulkan handle created by the ICD must be
3483 * a pointer to a struct whose first member is VK_LOADER_DATA. The
3484 * ICD must initialize VK_LOADER_DATA.loadMagic to ICD_LOADER_MAGIC.
3485 * - The loader implements vkCreate{PLATFORM}SurfaceKHR() and
3486 * vkDestroySurfaceKHR(). The ICD must be capable of working with
3487 * such loader-managed surfaces.
3488 *
3489 * - Loader interface v2 differs from v1 in:
3490 * - The first ICD entrypoint called by the loader is
3491 * vk_icdNegotiateLoaderICDInterfaceVersion(). The ICD must
3492 * statically expose this entrypoint.
3493 *
3494 * - Loader interface v3 differs from v2 in:
3495 * - The ICD must implement vkCreate{PLATFORM}SurfaceKHR(),
3496 * vkDestroySurfaceKHR(), and other API which uses VKSurfaceKHR,
3497 * because the loader no longer does so.
3498 */
3499 *pSupportedVersion = MIN2(*pSupportedVersion, 3u);
3500 return VK_SUCCESS;
3501 }
3502
3503 VkResult radv_GetMemoryFdKHR(VkDevice _device,
3504 const VkMemoryGetFdInfoKHR *pGetFdInfo,
3505 int *pFD)
3506 {
3507 RADV_FROM_HANDLE(radv_device, device, _device);
3508 RADV_FROM_HANDLE(radv_device_memory, memory, pGetFdInfo->memory);
3509
3510 assert(pGetFdInfo->sType == VK_STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR);
3511
3512 /* We support only one handle type. */
3513 assert(pGetFdInfo->handleType ==
3514 VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR);
3515
3516 bool ret = radv_get_memory_fd(device, memory, pFD);
3517 if (ret == false)
3518 return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
3519 return VK_SUCCESS;
3520 }
3521
3522 VkResult radv_GetMemoryFdPropertiesKHR(VkDevice _device,
3523 VkExternalMemoryHandleTypeFlagBitsKHR handleType,
3524 int fd,
3525 VkMemoryFdPropertiesKHR *pMemoryFdProperties)
3526 {
3527 /* The valid usage section for this function says:
3528 *
3529 * "handleType must not be one of the handle types defined as opaque."
3530 *
3531 * Since we only handle opaque handles for now, there are no FD properties.
3532 */
3533 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR);
3534 }
3535
3536 VkResult radv_ImportSemaphoreFdKHR(VkDevice _device,
3537 const VkImportSemaphoreFdInfoKHR *pImportSemaphoreFdInfo)
3538 {
3539 RADV_FROM_HANDLE(radv_device, device, _device);
3540 RADV_FROM_HANDLE(radv_semaphore, sem, pImportSemaphoreFdInfo->semaphore);
3541 uint32_t syncobj_handle = 0;
3542 uint32_t *syncobj_dst = NULL;
3543 assert(pImportSemaphoreFdInfo->handleType == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR);
3544
3545 int ret = device->ws->import_syncobj(device->ws, pImportSemaphoreFdInfo->fd, &syncobj_handle);
3546 if (ret != 0)
3547 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR);
3548
3549 if (pImportSemaphoreFdInfo->flags & VK_SEMAPHORE_IMPORT_TEMPORARY_BIT_KHR) {
3550 syncobj_dst = &sem->temp_syncobj;
3551 } else {
3552 syncobj_dst = &sem->syncobj;
3553 }
3554
3555 if (*syncobj_dst)
3556 device->ws->destroy_syncobj(device->ws, *syncobj_dst);
3557
3558 *syncobj_dst = syncobj_handle;
3559 close(pImportSemaphoreFdInfo->fd);
3560 return VK_SUCCESS;
3561 }
3562
3563 VkResult radv_GetSemaphoreFdKHR(VkDevice _device,
3564 const VkSemaphoreGetFdInfoKHR *pGetFdInfo,
3565 int *pFd)
3566 {
3567 RADV_FROM_HANDLE(radv_device, device, _device);
3568 RADV_FROM_HANDLE(radv_semaphore, sem, pGetFdInfo->semaphore);
3569 int ret;
3570 uint32_t syncobj_handle;
3571
3572 assert(pGetFdInfo->handleType == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR);
3573 if (sem->temp_syncobj)
3574 syncobj_handle = sem->temp_syncobj;
3575 else
3576 syncobj_handle = sem->syncobj;
3577 ret = device->ws->export_syncobj(device->ws, syncobj_handle, pFd);
3578 if (ret)
3579 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR);
3580 return VK_SUCCESS;
3581 }
3582
3583 void radv_GetPhysicalDeviceExternalSemaphorePropertiesKHR(
3584 VkPhysicalDevice physicalDevice,
3585 const VkPhysicalDeviceExternalSemaphoreInfoKHR* pExternalSemaphoreInfo,
3586 VkExternalSemaphorePropertiesKHR* pExternalSemaphoreProperties)
3587 {
3588 if (pExternalSemaphoreInfo->handleType == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR) {
3589 pExternalSemaphoreProperties->exportFromImportedHandleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR;
3590 pExternalSemaphoreProperties->compatibleHandleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR;
3591 pExternalSemaphoreProperties->externalSemaphoreFeatures = VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT_KHR |
3592 VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT_KHR;
3593 } else {
3594 pExternalSemaphoreProperties->exportFromImportedHandleTypes = 0;
3595 pExternalSemaphoreProperties->compatibleHandleTypes = 0;
3596 pExternalSemaphoreProperties->externalSemaphoreFeatures = 0;
3597 }
3598 }