radv: add tessellation ring allocation support. (v2)
[mesa.git] / src / amd / vulkan / radv_device.c
1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 */
27
28 #include <stdbool.h>
29 #include <string.h>
30 #include <unistd.h>
31 #include <fcntl.h>
32 #include "radv_private.h"
33 #include "radv_cs.h"
34 #include "util/disk_cache.h"
35 #include "util/strtod.h"
36 #include "util/vk_util.h"
37 #include <xf86drm.h>
38 #include <amdgpu.h>
39 #include <amdgpu_drm.h>
40 #include "amdgpu_id.h"
41 #include "winsys/amdgpu/radv_amdgpu_winsys_public.h"
42 #include "ac_llvm_util.h"
43 #include "vk_format.h"
44 #include "sid.h"
45 #include "util/debug.h"
46
47 static int
48 radv_device_get_cache_uuid(enum radeon_family family, void *uuid)
49 {
50 uint32_t mesa_timestamp, llvm_timestamp;
51 uint16_t f = family;
52 memset(uuid, 0, VK_UUID_SIZE);
53 if (!disk_cache_get_function_timestamp(radv_device_get_cache_uuid, &mesa_timestamp) ||
54 !disk_cache_get_function_timestamp(LLVMInitializeAMDGPUTargetInfo, &llvm_timestamp))
55 return -1;
56
57 memcpy(uuid, &mesa_timestamp, 4);
58 memcpy((char*)uuid + 4, &llvm_timestamp, 4);
59 memcpy((char*)uuid + 8, &f, 2);
60 snprintf((char*)uuid + 10, VK_UUID_SIZE - 10, "radv");
61 return 0;
62 }
63
64 static const VkExtensionProperties instance_extensions[] = {
65 {
66 .extensionName = VK_KHR_SURFACE_EXTENSION_NAME,
67 .specVersion = 25,
68 },
69 #ifdef VK_USE_PLATFORM_XCB_KHR
70 {
71 .extensionName = VK_KHR_XCB_SURFACE_EXTENSION_NAME,
72 .specVersion = 6,
73 },
74 #endif
75 #ifdef VK_USE_PLATFORM_XLIB_KHR
76 {
77 .extensionName = VK_KHR_XLIB_SURFACE_EXTENSION_NAME,
78 .specVersion = 6,
79 },
80 #endif
81 #ifdef VK_USE_PLATFORM_WAYLAND_KHR
82 {
83 .extensionName = VK_KHR_WAYLAND_SURFACE_EXTENSION_NAME,
84 .specVersion = 5,
85 },
86 #endif
87 {
88 .extensionName = VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME,
89 .specVersion = 1,
90 },
91 };
92
93 static const VkExtensionProperties common_device_extensions[] = {
94 {
95 .extensionName = VK_KHR_MAINTENANCE1_EXTENSION_NAME,
96 .specVersion = 1,
97 },
98 {
99 .extensionName = VK_KHR_SAMPLER_MIRROR_CLAMP_TO_EDGE_EXTENSION_NAME,
100 .specVersion = 1,
101 },
102 {
103 .extensionName = VK_KHR_SWAPCHAIN_EXTENSION_NAME,
104 .specVersion = 68,
105 },
106 {
107 .extensionName = VK_AMD_DRAW_INDIRECT_COUNT_EXTENSION_NAME,
108 .specVersion = 1,
109 },
110 {
111 .extensionName = VK_KHR_SHADER_DRAW_PARAMETERS_EXTENSION_NAME,
112 .specVersion = 1,
113 },
114 {
115 .extensionName = VK_NV_DEDICATED_ALLOCATION_EXTENSION_NAME,
116 .specVersion = 1,
117 },
118 };
119
120 static VkResult
121 radv_extensions_register(struct radv_instance *instance,
122 struct radv_extensions *extensions,
123 const VkExtensionProperties *new_ext,
124 uint32_t num_ext)
125 {
126 size_t new_size;
127 VkExtensionProperties *new_ptr;
128
129 assert(new_ext && num_ext > 0);
130
131 if (!new_ext)
132 return VK_ERROR_INITIALIZATION_FAILED;
133
134 new_size = (extensions->num_ext + num_ext) * sizeof(VkExtensionProperties);
135 new_ptr = vk_realloc(&instance->alloc, extensions->ext_array,
136 new_size, 8, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
137
138 /* Old array continues to be valid, update nothing */
139 if (!new_ptr)
140 return VK_ERROR_OUT_OF_HOST_MEMORY;
141
142 memcpy(&new_ptr[extensions->num_ext], new_ext,
143 num_ext * sizeof(VkExtensionProperties));
144 extensions->ext_array = new_ptr;
145 extensions->num_ext += num_ext;
146
147 return VK_SUCCESS;
148 }
149
150 static void
151 radv_extensions_finish(struct radv_instance *instance,
152 struct radv_extensions *extensions)
153 {
154 assert(extensions);
155
156 if (!extensions)
157 radv_loge("Attemted to free invalid extension struct\n");
158
159 if (extensions->ext_array)
160 vk_free(&instance->alloc, extensions->ext_array);
161 }
162
163 static bool
164 is_extension_enabled(const VkExtensionProperties *extensions,
165 size_t num_ext,
166 const char *name)
167 {
168 assert(extensions && name);
169
170 for (uint32_t i = 0; i < num_ext; i++) {
171 if (strcmp(name, extensions[i].extensionName) == 0)
172 return true;
173 }
174
175 return false;
176 }
177
178 static VkResult
179 radv_physical_device_init(struct radv_physical_device *device,
180 struct radv_instance *instance,
181 const char *path)
182 {
183 VkResult result;
184 drmVersionPtr version;
185 int fd;
186
187 fd = open(path, O_RDWR | O_CLOEXEC);
188 if (fd < 0)
189 return VK_ERROR_INCOMPATIBLE_DRIVER;
190
191 version = drmGetVersion(fd);
192 if (!version) {
193 close(fd);
194 return vk_errorf(VK_ERROR_INCOMPATIBLE_DRIVER,
195 "failed to get version %s: %m", path);
196 }
197
198 if (strcmp(version->name, "amdgpu")) {
199 drmFreeVersion(version);
200 close(fd);
201 return VK_ERROR_INCOMPATIBLE_DRIVER;
202 }
203 drmFreeVersion(version);
204
205 device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
206 device->instance = instance;
207 assert(strlen(path) < ARRAY_SIZE(device->path));
208 strncpy(device->path, path, ARRAY_SIZE(device->path));
209
210 device->ws = radv_amdgpu_winsys_create(fd, instance->debug_flags);
211 if (!device->ws) {
212 result = VK_ERROR_INCOMPATIBLE_DRIVER;
213 goto fail;
214 }
215
216 device->local_fd = fd;
217 device->ws->query_info(device->ws, &device->rad_info);
218 result = radv_init_wsi(device);
219 if (result != VK_SUCCESS) {
220 device->ws->destroy(device->ws);
221 goto fail;
222 }
223
224 if (radv_device_get_cache_uuid(device->rad_info.family, device->uuid)) {
225 radv_finish_wsi(device);
226 device->ws->destroy(device->ws);
227 result = vk_errorf(VK_ERROR_INITIALIZATION_FAILED,
228 "cannot generate UUID");
229 goto fail;
230 }
231
232 result = radv_extensions_register(instance,
233 &device->extensions,
234 common_device_extensions,
235 ARRAY_SIZE(common_device_extensions));
236 if (result != VK_SUCCESS)
237 goto fail;
238
239 fprintf(stderr, "WARNING: radv is not a conformant vulkan implementation, testing use only.\n");
240 device->name = device->rad_info.name;
241
242 return VK_SUCCESS;
243
244 fail:
245 close(fd);
246 return result;
247 }
248
249 static void
250 radv_physical_device_finish(struct radv_physical_device *device)
251 {
252 radv_extensions_finish(device->instance, &device->extensions);
253 radv_finish_wsi(device);
254 device->ws->destroy(device->ws);
255 close(device->local_fd);
256 }
257
258
259 static void *
260 default_alloc_func(void *pUserData, size_t size, size_t align,
261 VkSystemAllocationScope allocationScope)
262 {
263 return malloc(size);
264 }
265
266 static void *
267 default_realloc_func(void *pUserData, void *pOriginal, size_t size,
268 size_t align, VkSystemAllocationScope allocationScope)
269 {
270 return realloc(pOriginal, size);
271 }
272
273 static void
274 default_free_func(void *pUserData, void *pMemory)
275 {
276 free(pMemory);
277 }
278
279 static const VkAllocationCallbacks default_alloc = {
280 .pUserData = NULL,
281 .pfnAllocation = default_alloc_func,
282 .pfnReallocation = default_realloc_func,
283 .pfnFree = default_free_func,
284 };
285
286 static const struct debug_control radv_debug_options[] = {
287 {"nofastclears", RADV_DEBUG_NO_FAST_CLEARS},
288 {"nodcc", RADV_DEBUG_NO_DCC},
289 {"shaders", RADV_DEBUG_DUMP_SHADERS},
290 {"nocache", RADV_DEBUG_NO_CACHE},
291 {"shaderstats", RADV_DEBUG_DUMP_SHADER_STATS},
292 {"nohiz", RADV_DEBUG_NO_HIZ},
293 {"nocompute", RADV_DEBUG_NO_COMPUTE_QUEUE},
294 {"unsafemath", RADV_DEBUG_UNSAFE_MATH},
295 {"allbos", RADV_DEBUG_ALL_BOS},
296 {"noibs", RADV_DEBUG_NO_IBS},
297 {NULL, 0}
298 };
299
300 VkResult radv_CreateInstance(
301 const VkInstanceCreateInfo* pCreateInfo,
302 const VkAllocationCallbacks* pAllocator,
303 VkInstance* pInstance)
304 {
305 struct radv_instance *instance;
306
307 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO);
308
309 uint32_t client_version;
310 if (pCreateInfo->pApplicationInfo &&
311 pCreateInfo->pApplicationInfo->apiVersion != 0) {
312 client_version = pCreateInfo->pApplicationInfo->apiVersion;
313 } else {
314 client_version = VK_MAKE_VERSION(1, 0, 0);
315 }
316
317 if (VK_MAKE_VERSION(1, 0, 0) > client_version ||
318 client_version > VK_MAKE_VERSION(1, 0, 0xfff)) {
319 return vk_errorf(VK_ERROR_INCOMPATIBLE_DRIVER,
320 "Client requested version %d.%d.%d",
321 VK_VERSION_MAJOR(client_version),
322 VK_VERSION_MINOR(client_version),
323 VK_VERSION_PATCH(client_version));
324 }
325
326 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
327 if (!is_extension_enabled(instance_extensions,
328 ARRAY_SIZE(instance_extensions),
329 pCreateInfo->ppEnabledExtensionNames[i]))
330 return vk_error(VK_ERROR_EXTENSION_NOT_PRESENT);
331 }
332
333 instance = vk_alloc2(&default_alloc, pAllocator, sizeof(*instance), 8,
334 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
335 if (!instance)
336 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
337
338 memset(instance, 0, sizeof(*instance));
339
340 instance->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
341
342 if (pAllocator)
343 instance->alloc = *pAllocator;
344 else
345 instance->alloc = default_alloc;
346
347 instance->apiVersion = client_version;
348 instance->physicalDeviceCount = -1;
349
350 _mesa_locale_init();
351
352 VG(VALGRIND_CREATE_MEMPOOL(instance, 0, false));
353
354 instance->debug_flags = parse_debug_string(getenv("RADV_DEBUG"),
355 radv_debug_options);
356
357 *pInstance = radv_instance_to_handle(instance);
358
359 return VK_SUCCESS;
360 }
361
362 void radv_DestroyInstance(
363 VkInstance _instance,
364 const VkAllocationCallbacks* pAllocator)
365 {
366 RADV_FROM_HANDLE(radv_instance, instance, _instance);
367
368 if (!instance)
369 return;
370
371 for (int i = 0; i < instance->physicalDeviceCount; ++i) {
372 radv_physical_device_finish(instance->physicalDevices + i);
373 }
374
375 VG(VALGRIND_DESTROY_MEMPOOL(instance));
376
377 _mesa_locale_fini();
378
379 vk_free(&instance->alloc, instance);
380 }
381
382 static VkResult
383 radv_enumerate_devices(struct radv_instance *instance)
384 {
385 /* TODO: Check for more devices ? */
386 drmDevicePtr devices[8];
387 VkResult result = VK_ERROR_INCOMPATIBLE_DRIVER;
388 int max_devices;
389
390 instance->physicalDeviceCount = 0;
391
392 max_devices = drmGetDevices2(0, devices, sizeof(devices));
393 if (max_devices < 1)
394 return VK_ERROR_INCOMPATIBLE_DRIVER;
395
396 for (unsigned i = 0; i < (unsigned)max_devices; i++) {
397 if (devices[i]->available_nodes & 1 << DRM_NODE_RENDER &&
398 devices[i]->bustype == DRM_BUS_PCI &&
399 devices[i]->deviceinfo.pci->vendor_id == 0x1002) {
400
401 result = radv_physical_device_init(instance->physicalDevices +
402 instance->physicalDeviceCount,
403 instance,
404 devices[i]->nodes[DRM_NODE_RENDER]);
405 if (result == VK_SUCCESS)
406 ++instance->physicalDeviceCount;
407 else if (result != VK_ERROR_INCOMPATIBLE_DRIVER)
408 return result;
409 }
410 }
411 return result;
412 }
413
414 VkResult radv_EnumeratePhysicalDevices(
415 VkInstance _instance,
416 uint32_t* pPhysicalDeviceCount,
417 VkPhysicalDevice* pPhysicalDevices)
418 {
419 RADV_FROM_HANDLE(radv_instance, instance, _instance);
420 VkResult result;
421
422 if (instance->physicalDeviceCount < 0) {
423 result = radv_enumerate_devices(instance);
424 if (result != VK_SUCCESS &&
425 result != VK_ERROR_INCOMPATIBLE_DRIVER)
426 return result;
427 }
428
429 if (!pPhysicalDevices) {
430 *pPhysicalDeviceCount = instance->physicalDeviceCount;
431 } else {
432 *pPhysicalDeviceCount = MIN2(*pPhysicalDeviceCount, instance->physicalDeviceCount);
433 for (unsigned i = 0; i < *pPhysicalDeviceCount; ++i)
434 pPhysicalDevices[i] = radv_physical_device_to_handle(instance->physicalDevices + i);
435 }
436
437 return *pPhysicalDeviceCount < instance->physicalDeviceCount ? VK_INCOMPLETE
438 : VK_SUCCESS;
439 }
440
441 void radv_GetPhysicalDeviceFeatures(
442 VkPhysicalDevice physicalDevice,
443 VkPhysicalDeviceFeatures* pFeatures)
444 {
445 // RADV_FROM_HANDLE(radv_physical_device, pdevice, physicalDevice);
446
447 memset(pFeatures, 0, sizeof(*pFeatures));
448
449 *pFeatures = (VkPhysicalDeviceFeatures) {
450 .robustBufferAccess = true,
451 .fullDrawIndexUint32 = true,
452 .imageCubeArray = true,
453 .independentBlend = true,
454 .geometryShader = true,
455 .tessellationShader = false,
456 .sampleRateShading = false,
457 .dualSrcBlend = true,
458 .logicOp = true,
459 .multiDrawIndirect = true,
460 .drawIndirectFirstInstance = true,
461 .depthClamp = true,
462 .depthBiasClamp = true,
463 .fillModeNonSolid = true,
464 .depthBounds = true,
465 .wideLines = true,
466 .largePoints = true,
467 .alphaToOne = true,
468 .multiViewport = true,
469 .samplerAnisotropy = true,
470 .textureCompressionETC2 = false,
471 .textureCompressionASTC_LDR = false,
472 .textureCompressionBC = true,
473 .occlusionQueryPrecise = true,
474 .pipelineStatisticsQuery = false,
475 .vertexPipelineStoresAndAtomics = true,
476 .fragmentStoresAndAtomics = true,
477 .shaderTessellationAndGeometryPointSize = true,
478 .shaderImageGatherExtended = true,
479 .shaderStorageImageExtendedFormats = true,
480 .shaderStorageImageMultisample = false,
481 .shaderUniformBufferArrayDynamicIndexing = true,
482 .shaderSampledImageArrayDynamicIndexing = true,
483 .shaderStorageBufferArrayDynamicIndexing = true,
484 .shaderStorageImageArrayDynamicIndexing = true,
485 .shaderStorageImageReadWithoutFormat = true,
486 .shaderStorageImageWriteWithoutFormat = true,
487 .shaderClipDistance = true,
488 .shaderCullDistance = true,
489 .shaderFloat64 = true,
490 .shaderInt64 = false,
491 .shaderInt16 = false,
492 .sparseBinding = true,
493 .variableMultisampleRate = false,
494 .inheritedQueries = false,
495 };
496 }
497
498 void radv_GetPhysicalDeviceFeatures2KHR(
499 VkPhysicalDevice physicalDevice,
500 VkPhysicalDeviceFeatures2KHR *pFeatures)
501 {
502 return radv_GetPhysicalDeviceFeatures(physicalDevice, &pFeatures->features);
503 }
504
505 static uint32_t radv_get_driver_version()
506 {
507 const char *minor_string = strchr(VERSION, '.');
508 const char *patch_string = minor_string ? strchr(minor_string + 1, ','): NULL;
509 int major = atoi(VERSION);
510 int minor = minor_string ? atoi(minor_string + 1) : 0;
511 int patch = patch_string ? atoi(patch_string + 1) : 0;
512 if (strstr(VERSION, "devel")) {
513 if (patch == 0) {
514 patch = 99;
515 if (minor == 0) {
516 minor = 99;
517 --major;
518 } else
519 --minor;
520 } else
521 --patch;
522 }
523 uint32_t version = VK_MAKE_VERSION(major, minor, patch);
524 return version;
525 }
526
527 void radv_GetPhysicalDeviceProperties(
528 VkPhysicalDevice physicalDevice,
529 VkPhysicalDeviceProperties* pProperties)
530 {
531 RADV_FROM_HANDLE(radv_physical_device, pdevice, physicalDevice);
532 VkSampleCountFlags sample_counts = 0xf;
533 VkPhysicalDeviceLimits limits = {
534 .maxImageDimension1D = (1 << 14),
535 .maxImageDimension2D = (1 << 14),
536 .maxImageDimension3D = (1 << 11),
537 .maxImageDimensionCube = (1 << 14),
538 .maxImageArrayLayers = (1 << 11),
539 .maxTexelBufferElements = 128 * 1024 * 1024,
540 .maxUniformBufferRange = UINT32_MAX,
541 .maxStorageBufferRange = UINT32_MAX,
542 .maxPushConstantsSize = MAX_PUSH_CONSTANTS_SIZE,
543 .maxMemoryAllocationCount = UINT32_MAX,
544 .maxSamplerAllocationCount = 64 * 1024,
545 .bufferImageGranularity = 64, /* A cache line */
546 .sparseAddressSpaceSize = 0xffffffffu, /* buffer max size */
547 .maxBoundDescriptorSets = MAX_SETS,
548 .maxPerStageDescriptorSamplers = 64,
549 .maxPerStageDescriptorUniformBuffers = 64,
550 .maxPerStageDescriptorStorageBuffers = 64,
551 .maxPerStageDescriptorSampledImages = 64,
552 .maxPerStageDescriptorStorageImages = 64,
553 .maxPerStageDescriptorInputAttachments = 64,
554 .maxPerStageResources = 128,
555 .maxDescriptorSetSamplers = 256,
556 .maxDescriptorSetUniformBuffers = 256,
557 .maxDescriptorSetUniformBuffersDynamic = 256,
558 .maxDescriptorSetStorageBuffers = 256,
559 .maxDescriptorSetStorageBuffersDynamic = 256,
560 .maxDescriptorSetSampledImages = 256,
561 .maxDescriptorSetStorageImages = 256,
562 .maxDescriptorSetInputAttachments = 256,
563 .maxVertexInputAttributes = 32,
564 .maxVertexInputBindings = 32,
565 .maxVertexInputAttributeOffset = 2047,
566 .maxVertexInputBindingStride = 2048,
567 .maxVertexOutputComponents = 128,
568 .maxTessellationGenerationLevel = 0,
569 .maxTessellationPatchSize = 0,
570 .maxTessellationControlPerVertexInputComponents = 0,
571 .maxTessellationControlPerVertexOutputComponents = 0,
572 .maxTessellationControlPerPatchOutputComponents = 0,
573 .maxTessellationControlTotalOutputComponents = 0,
574 .maxTessellationEvaluationInputComponents = 0,
575 .maxTessellationEvaluationOutputComponents = 0,
576 .maxGeometryShaderInvocations = 32,
577 .maxGeometryInputComponents = 64,
578 .maxGeometryOutputComponents = 128,
579 .maxGeometryOutputVertices = 256,
580 .maxGeometryTotalOutputComponents = 1024,
581 .maxFragmentInputComponents = 128,
582 .maxFragmentOutputAttachments = 8,
583 .maxFragmentDualSrcAttachments = 1,
584 .maxFragmentCombinedOutputResources = 8,
585 .maxComputeSharedMemorySize = 32768,
586 .maxComputeWorkGroupCount = { 65535, 65535, 65535 },
587 .maxComputeWorkGroupInvocations = 2048,
588 .maxComputeWorkGroupSize = {
589 2048,
590 2048,
591 2048
592 },
593 .subPixelPrecisionBits = 4 /* FIXME */,
594 .subTexelPrecisionBits = 4 /* FIXME */,
595 .mipmapPrecisionBits = 4 /* FIXME */,
596 .maxDrawIndexedIndexValue = UINT32_MAX,
597 .maxDrawIndirectCount = UINT32_MAX,
598 .maxSamplerLodBias = 16,
599 .maxSamplerAnisotropy = 16,
600 .maxViewports = MAX_VIEWPORTS,
601 .maxViewportDimensions = { (1 << 14), (1 << 14) },
602 .viewportBoundsRange = { INT16_MIN, INT16_MAX },
603 .viewportSubPixelBits = 13, /* We take a float? */
604 .minMemoryMapAlignment = 4096, /* A page */
605 .minTexelBufferOffsetAlignment = 1,
606 .minUniformBufferOffsetAlignment = 4,
607 .minStorageBufferOffsetAlignment = 4,
608 .minTexelOffset = -32,
609 .maxTexelOffset = 31,
610 .minTexelGatherOffset = -32,
611 .maxTexelGatherOffset = 31,
612 .minInterpolationOffset = -2,
613 .maxInterpolationOffset = 2,
614 .subPixelInterpolationOffsetBits = 8,
615 .maxFramebufferWidth = (1 << 14),
616 .maxFramebufferHeight = (1 << 14),
617 .maxFramebufferLayers = (1 << 10),
618 .framebufferColorSampleCounts = sample_counts,
619 .framebufferDepthSampleCounts = sample_counts,
620 .framebufferStencilSampleCounts = sample_counts,
621 .framebufferNoAttachmentsSampleCounts = sample_counts,
622 .maxColorAttachments = MAX_RTS,
623 .sampledImageColorSampleCounts = sample_counts,
624 .sampledImageIntegerSampleCounts = VK_SAMPLE_COUNT_1_BIT,
625 .sampledImageDepthSampleCounts = sample_counts,
626 .sampledImageStencilSampleCounts = sample_counts,
627 .storageImageSampleCounts = VK_SAMPLE_COUNT_1_BIT,
628 .maxSampleMaskWords = 1,
629 .timestampComputeAndGraphics = false,
630 .timestampPeriod = 100000.0 / pdevice->rad_info.clock_crystal_freq,
631 .maxClipDistances = 8,
632 .maxCullDistances = 8,
633 .maxCombinedClipAndCullDistances = 8,
634 .discreteQueuePriorities = 1,
635 .pointSizeRange = { 0.125, 255.875 },
636 .lineWidthRange = { 0.0, 7.9921875 },
637 .pointSizeGranularity = (1.0 / 8.0),
638 .lineWidthGranularity = (1.0 / 128.0),
639 .strictLines = false, /* FINISHME */
640 .standardSampleLocations = true,
641 .optimalBufferCopyOffsetAlignment = 128,
642 .optimalBufferCopyRowPitchAlignment = 128,
643 .nonCoherentAtomSize = 64,
644 };
645
646 *pProperties = (VkPhysicalDeviceProperties) {
647 .apiVersion = VK_MAKE_VERSION(1, 0, 42),
648 .driverVersion = radv_get_driver_version(),
649 .vendorID = 0x1002,
650 .deviceID = pdevice->rad_info.pci_id,
651 .deviceType = VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU,
652 .limits = limits,
653 .sparseProperties = {0}, /* Broadwell doesn't do sparse. */
654 };
655
656 strcpy(pProperties->deviceName, pdevice->name);
657 memcpy(pProperties->pipelineCacheUUID, pdevice->uuid, VK_UUID_SIZE);
658 }
659
660 void radv_GetPhysicalDeviceProperties2KHR(
661 VkPhysicalDevice physicalDevice,
662 VkPhysicalDeviceProperties2KHR *pProperties)
663 {
664 return radv_GetPhysicalDeviceProperties(physicalDevice, &pProperties->properties);
665 }
666
667 static void radv_get_physical_device_queue_family_properties(
668 struct radv_physical_device* pdevice,
669 uint32_t* pCount,
670 VkQueueFamilyProperties** pQueueFamilyProperties)
671 {
672 int num_queue_families = 1;
673 int idx;
674 if (pdevice->rad_info.compute_rings > 0 &&
675 pdevice->rad_info.chip_class >= CIK &&
676 !(pdevice->instance->debug_flags & RADV_DEBUG_NO_COMPUTE_QUEUE))
677 num_queue_families++;
678
679 if (pQueueFamilyProperties == NULL) {
680 *pCount = num_queue_families;
681 return;
682 }
683
684 if (!*pCount)
685 return;
686
687 idx = 0;
688 if (*pCount >= 1) {
689 *pQueueFamilyProperties[idx] = (VkQueueFamilyProperties) {
690 .queueFlags = VK_QUEUE_GRAPHICS_BIT |
691 VK_QUEUE_COMPUTE_BIT |
692 VK_QUEUE_TRANSFER_BIT |
693 VK_QUEUE_SPARSE_BINDING_BIT,
694 .queueCount = 1,
695 .timestampValidBits = 64,
696 .minImageTransferGranularity = (VkExtent3D) { 1, 1, 1 },
697 };
698 idx++;
699 }
700
701 if (pdevice->rad_info.compute_rings > 0 &&
702 pdevice->rad_info.chip_class >= CIK &&
703 !(pdevice->instance->debug_flags & RADV_DEBUG_NO_COMPUTE_QUEUE)) {
704 if (*pCount > idx) {
705 *pQueueFamilyProperties[idx] = (VkQueueFamilyProperties) {
706 .queueFlags = VK_QUEUE_COMPUTE_BIT |
707 VK_QUEUE_TRANSFER_BIT |
708 VK_QUEUE_SPARSE_BINDING_BIT,
709 .queueCount = pdevice->rad_info.compute_rings,
710 .timestampValidBits = 64,
711 .minImageTransferGranularity = (VkExtent3D) { 1, 1, 1 },
712 };
713 idx++;
714 }
715 }
716 *pCount = idx;
717 }
718
719 void radv_GetPhysicalDeviceQueueFamilyProperties(
720 VkPhysicalDevice physicalDevice,
721 uint32_t* pCount,
722 VkQueueFamilyProperties* pQueueFamilyProperties)
723 {
724 RADV_FROM_HANDLE(radv_physical_device, pdevice, physicalDevice);
725 if (!pQueueFamilyProperties) {
726 return radv_get_physical_device_queue_family_properties(pdevice, pCount, NULL);
727 return;
728 }
729 VkQueueFamilyProperties *properties[] = {
730 pQueueFamilyProperties + 0,
731 pQueueFamilyProperties + 1,
732 pQueueFamilyProperties + 2,
733 };
734 radv_get_physical_device_queue_family_properties(pdevice, pCount, properties);
735 assert(*pCount <= 3);
736 }
737
738 void radv_GetPhysicalDeviceQueueFamilyProperties2KHR(
739 VkPhysicalDevice physicalDevice,
740 uint32_t* pCount,
741 VkQueueFamilyProperties2KHR *pQueueFamilyProperties)
742 {
743 RADV_FROM_HANDLE(radv_physical_device, pdevice, physicalDevice);
744 if (!pQueueFamilyProperties) {
745 return radv_get_physical_device_queue_family_properties(pdevice, pCount, NULL);
746 return;
747 }
748 VkQueueFamilyProperties *properties[] = {
749 &pQueueFamilyProperties[0].queueFamilyProperties,
750 &pQueueFamilyProperties[1].queueFamilyProperties,
751 &pQueueFamilyProperties[2].queueFamilyProperties,
752 };
753 radv_get_physical_device_queue_family_properties(pdevice, pCount, properties);
754 assert(*pCount <= 3);
755 }
756
757 void radv_GetPhysicalDeviceMemoryProperties(
758 VkPhysicalDevice physicalDevice,
759 VkPhysicalDeviceMemoryProperties *pMemoryProperties)
760 {
761 RADV_FROM_HANDLE(radv_physical_device, physical_device, physicalDevice);
762
763 STATIC_ASSERT(RADV_MEM_TYPE_COUNT <= VK_MAX_MEMORY_TYPES);
764
765 pMemoryProperties->memoryTypeCount = RADV_MEM_TYPE_COUNT;
766 pMemoryProperties->memoryTypes[RADV_MEM_TYPE_VRAM] = (VkMemoryType) {
767 .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT,
768 .heapIndex = RADV_MEM_HEAP_VRAM,
769 };
770 pMemoryProperties->memoryTypes[RADV_MEM_TYPE_GTT_WRITE_COMBINE] = (VkMemoryType) {
771 .propertyFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
772 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
773 .heapIndex = RADV_MEM_HEAP_GTT,
774 };
775 pMemoryProperties->memoryTypes[RADV_MEM_TYPE_VRAM_CPU_ACCESS] = (VkMemoryType) {
776 .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
777 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
778 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
779 .heapIndex = RADV_MEM_HEAP_VRAM_CPU_ACCESS,
780 };
781 pMemoryProperties->memoryTypes[RADV_MEM_TYPE_GTT_CACHED] = (VkMemoryType) {
782 .propertyFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
783 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT |
784 VK_MEMORY_PROPERTY_HOST_CACHED_BIT,
785 .heapIndex = RADV_MEM_HEAP_GTT,
786 };
787
788 STATIC_ASSERT(RADV_MEM_HEAP_COUNT <= VK_MAX_MEMORY_HEAPS);
789
790 pMemoryProperties->memoryHeapCount = RADV_MEM_HEAP_COUNT;
791 pMemoryProperties->memoryHeaps[RADV_MEM_HEAP_VRAM] = (VkMemoryHeap) {
792 .size = physical_device->rad_info.vram_size -
793 physical_device->rad_info.visible_vram_size,
794 .flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT,
795 };
796 pMemoryProperties->memoryHeaps[RADV_MEM_HEAP_VRAM_CPU_ACCESS] = (VkMemoryHeap) {
797 .size = physical_device->rad_info.visible_vram_size,
798 .flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT,
799 };
800 pMemoryProperties->memoryHeaps[RADV_MEM_HEAP_GTT] = (VkMemoryHeap) {
801 .size = physical_device->rad_info.gart_size,
802 .flags = 0,
803 };
804 }
805
806 void radv_GetPhysicalDeviceMemoryProperties2KHR(
807 VkPhysicalDevice physicalDevice,
808 VkPhysicalDeviceMemoryProperties2KHR *pMemoryProperties)
809 {
810 return radv_GetPhysicalDeviceMemoryProperties(physicalDevice,
811 &pMemoryProperties->memoryProperties);
812 }
813
814 static int
815 radv_queue_init(struct radv_device *device, struct radv_queue *queue,
816 int queue_family_index, int idx)
817 {
818 queue->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
819 queue->device = device;
820 queue->queue_family_index = queue_family_index;
821 queue->queue_idx = idx;
822
823 queue->hw_ctx = device->ws->ctx_create(device->ws);
824 if (!queue->hw_ctx)
825 return VK_ERROR_OUT_OF_HOST_MEMORY;
826
827 return VK_SUCCESS;
828 }
829
830 static void
831 radv_queue_finish(struct radv_queue *queue)
832 {
833 if (queue->hw_ctx)
834 queue->device->ws->ctx_destroy(queue->hw_ctx);
835
836 if (queue->initial_preamble_cs)
837 queue->device->ws->cs_destroy(queue->initial_preamble_cs);
838 if (queue->continue_preamble_cs)
839 queue->device->ws->cs_destroy(queue->continue_preamble_cs);
840 if (queue->descriptor_bo)
841 queue->device->ws->buffer_destroy(queue->descriptor_bo);
842 if (queue->scratch_bo)
843 queue->device->ws->buffer_destroy(queue->scratch_bo);
844 if (queue->esgs_ring_bo)
845 queue->device->ws->buffer_destroy(queue->esgs_ring_bo);
846 if (queue->gsvs_ring_bo)
847 queue->device->ws->buffer_destroy(queue->gsvs_ring_bo);
848 if (queue->tess_factor_ring_bo)
849 queue->device->ws->buffer_destroy(queue->tess_factor_ring_bo);
850 if (queue->tess_offchip_ring_bo)
851 queue->device->ws->buffer_destroy(queue->tess_offchip_ring_bo);
852 if (queue->compute_scratch_bo)
853 queue->device->ws->buffer_destroy(queue->compute_scratch_bo);
854 }
855
856 static void
857 radv_device_init_gs_info(struct radv_device *device)
858 {
859 switch (device->physical_device->rad_info.family) {
860 case CHIP_OLAND:
861 case CHIP_HAINAN:
862 case CHIP_KAVERI:
863 case CHIP_KABINI:
864 case CHIP_MULLINS:
865 case CHIP_ICELAND:
866 case CHIP_CARRIZO:
867 case CHIP_STONEY:
868 device->gs_table_depth = 16;
869 return;
870 case CHIP_TAHITI:
871 case CHIP_PITCAIRN:
872 case CHIP_VERDE:
873 case CHIP_BONAIRE:
874 case CHIP_HAWAII:
875 case CHIP_TONGA:
876 case CHIP_FIJI:
877 case CHIP_POLARIS10:
878 case CHIP_POLARIS11:
879 device->gs_table_depth = 32;
880 return;
881 default:
882 unreachable("unknown GPU");
883 }
884 }
885
886 VkResult radv_CreateDevice(
887 VkPhysicalDevice physicalDevice,
888 const VkDeviceCreateInfo* pCreateInfo,
889 const VkAllocationCallbacks* pAllocator,
890 VkDevice* pDevice)
891 {
892 RADV_FROM_HANDLE(radv_physical_device, physical_device, physicalDevice);
893 VkResult result;
894 struct radv_device *device;
895
896 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
897 if (!is_extension_enabled(physical_device->extensions.ext_array,
898 physical_device->extensions.num_ext,
899 pCreateInfo->ppEnabledExtensionNames[i]))
900 return vk_error(VK_ERROR_EXTENSION_NOT_PRESENT);
901 }
902
903 device = vk_alloc2(&physical_device->instance->alloc, pAllocator,
904 sizeof(*device), 8,
905 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
906 if (!device)
907 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
908
909 memset(device, 0, sizeof(*device));
910
911 device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
912 device->instance = physical_device->instance;
913 device->physical_device = physical_device;
914
915 device->debug_flags = device->instance->debug_flags;
916
917 device->ws = physical_device->ws;
918 if (pAllocator)
919 device->alloc = *pAllocator;
920 else
921 device->alloc = physical_device->instance->alloc;
922
923 for (unsigned i = 0; i < pCreateInfo->queueCreateInfoCount; i++) {
924 const VkDeviceQueueCreateInfo *queue_create = &pCreateInfo->pQueueCreateInfos[i];
925 uint32_t qfi = queue_create->queueFamilyIndex;
926
927 device->queues[qfi] = vk_alloc(&device->alloc,
928 queue_create->queueCount * sizeof(struct radv_queue), 8, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
929 if (!device->queues[qfi]) {
930 result = VK_ERROR_OUT_OF_HOST_MEMORY;
931 goto fail;
932 }
933
934 memset(device->queues[qfi], 0, queue_create->queueCount * sizeof(struct radv_queue));
935
936 device->queue_count[qfi] = queue_create->queueCount;
937
938 for (unsigned q = 0; q < queue_create->queueCount; q++) {
939 result = radv_queue_init(device, &device->queues[qfi][q], qfi, q);
940 if (result != VK_SUCCESS)
941 goto fail;
942 }
943 }
944
945 #if HAVE_LLVM < 0x0400
946 device->llvm_supports_spill = false;
947 #else
948 device->llvm_supports_spill = true;
949 #endif
950
951 /* The maximum number of scratch waves. Scratch space isn't divided
952 * evenly between CUs. The number is only a function of the number of CUs.
953 * We can decrease the constant to decrease the scratch buffer size.
954 *
955 * sctx->scratch_waves must be >= the maximum posible size of
956 * 1 threadgroup, so that the hw doesn't hang from being unable
957 * to start any.
958 *
959 * The recommended value is 4 per CU at most. Higher numbers don't
960 * bring much benefit, but they still occupy chip resources (think
961 * async compute). I've seen ~2% performance difference between 4 and 32.
962 */
963 uint32_t max_threads_per_block = 2048;
964 device->scratch_waves = MAX2(32 * physical_device->rad_info.num_good_compute_units,
965 max_threads_per_block / 64);
966
967 radv_device_init_gs_info(device);
968
969 device->tess_offchip_block_dw_size =
970 device->physical_device->rad_info.family == CHIP_HAWAII ? 4096 : 8192;
971 device->has_distributed_tess =
972 device->physical_device->rad_info.chip_class >= VI &&
973 device->physical_device->rad_info.max_se >= 2;
974
975 result = radv_device_init_meta(device);
976 if (result != VK_SUCCESS)
977 goto fail;
978
979 radv_device_init_msaa(device);
980
981 for (int family = 0; family < RADV_MAX_QUEUE_FAMILIES; ++family) {
982 device->empty_cs[family] = device->ws->cs_create(device->ws, family);
983 switch (family) {
984 case RADV_QUEUE_GENERAL:
985 radeon_emit(device->empty_cs[family], PKT3(PKT3_CONTEXT_CONTROL, 1, 0));
986 radeon_emit(device->empty_cs[family], CONTEXT_CONTROL_LOAD_ENABLE(1));
987 radeon_emit(device->empty_cs[family], CONTEXT_CONTROL_SHADOW_ENABLE(1));
988 break;
989 case RADV_QUEUE_COMPUTE:
990 radeon_emit(device->empty_cs[family], PKT3(PKT3_NOP, 0, 0));
991 radeon_emit(device->empty_cs[family], 0);
992 break;
993 }
994 device->ws->cs_finalize(device->empty_cs[family]);
995
996 device->flush_cs[family] = device->ws->cs_create(device->ws, family);
997 switch (family) {
998 case RADV_QUEUE_GENERAL:
999 case RADV_QUEUE_COMPUTE:
1000 si_cs_emit_cache_flush(device->flush_cs[family],
1001 device->physical_device->rad_info.chip_class,
1002 family == RADV_QUEUE_COMPUTE && device->physical_device->rad_info.chip_class >= CIK,
1003 RADV_CMD_FLAG_INV_ICACHE |
1004 RADV_CMD_FLAG_INV_SMEM_L1 |
1005 RADV_CMD_FLAG_INV_VMEM_L1 |
1006 RADV_CMD_FLAG_INV_GLOBAL_L2);
1007 break;
1008 }
1009 device->ws->cs_finalize(device->flush_cs[family]);
1010 }
1011
1012 if (getenv("RADV_TRACE_FILE")) {
1013 device->trace_bo = device->ws->buffer_create(device->ws, 4096, 8,
1014 RADEON_DOMAIN_VRAM, RADEON_FLAG_CPU_ACCESS);
1015 if (!device->trace_bo)
1016 goto fail;
1017
1018 device->trace_id_ptr = device->ws->buffer_map(device->trace_bo);
1019 if (!device->trace_id_ptr)
1020 goto fail;
1021 }
1022
1023 if (device->physical_device->rad_info.chip_class >= CIK)
1024 cik_create_gfx_config(device);
1025
1026 VkPipelineCacheCreateInfo ci;
1027 ci.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
1028 ci.pNext = NULL;
1029 ci.flags = 0;
1030 ci.pInitialData = NULL;
1031 ci.initialDataSize = 0;
1032 VkPipelineCache pc;
1033 result = radv_CreatePipelineCache(radv_device_to_handle(device),
1034 &ci, NULL, &pc);
1035 if (result != VK_SUCCESS)
1036 goto fail;
1037
1038 device->mem_cache = radv_pipeline_cache_from_handle(pc);
1039
1040 *pDevice = radv_device_to_handle(device);
1041 return VK_SUCCESS;
1042
1043 fail:
1044 if (device->trace_bo)
1045 device->ws->buffer_destroy(device->trace_bo);
1046
1047 if (device->gfx_init)
1048 device->ws->buffer_destroy(device->gfx_init);
1049
1050 for (unsigned i = 0; i < RADV_MAX_QUEUE_FAMILIES; i++) {
1051 for (unsigned q = 0; q < device->queue_count[i]; q++)
1052 radv_queue_finish(&device->queues[i][q]);
1053 if (device->queue_count[i])
1054 vk_free(&device->alloc, device->queues[i]);
1055 }
1056
1057 vk_free(&device->alloc, device);
1058 return result;
1059 }
1060
1061 void radv_DestroyDevice(
1062 VkDevice _device,
1063 const VkAllocationCallbacks* pAllocator)
1064 {
1065 RADV_FROM_HANDLE(radv_device, device, _device);
1066
1067 if (!device)
1068 return;
1069
1070 if (device->trace_bo)
1071 device->ws->buffer_destroy(device->trace_bo);
1072
1073 if (device->gfx_init)
1074 device->ws->buffer_destroy(device->gfx_init);
1075
1076 for (unsigned i = 0; i < RADV_MAX_QUEUE_FAMILIES; i++) {
1077 for (unsigned q = 0; q < device->queue_count[i]; q++)
1078 radv_queue_finish(&device->queues[i][q]);
1079 if (device->queue_count[i])
1080 vk_free(&device->alloc, device->queues[i]);
1081 if (device->empty_cs[i])
1082 device->ws->cs_destroy(device->empty_cs[i]);
1083 if (device->flush_cs[i])
1084 device->ws->cs_destroy(device->flush_cs[i]);
1085 }
1086 radv_device_finish_meta(device);
1087
1088 VkPipelineCache pc = radv_pipeline_cache_to_handle(device->mem_cache);
1089 radv_DestroyPipelineCache(radv_device_to_handle(device), pc, NULL);
1090
1091 vk_free(&device->alloc, device);
1092 }
1093
1094 VkResult radv_EnumerateInstanceExtensionProperties(
1095 const char* pLayerName,
1096 uint32_t* pPropertyCount,
1097 VkExtensionProperties* pProperties)
1098 {
1099 if (pProperties == NULL) {
1100 *pPropertyCount = ARRAY_SIZE(instance_extensions);
1101 return VK_SUCCESS;
1102 }
1103
1104 *pPropertyCount = MIN2(*pPropertyCount, ARRAY_SIZE(instance_extensions));
1105 typed_memcpy(pProperties, instance_extensions, *pPropertyCount);
1106
1107 if (*pPropertyCount < ARRAY_SIZE(instance_extensions))
1108 return VK_INCOMPLETE;
1109
1110 return VK_SUCCESS;
1111 }
1112
1113 VkResult radv_EnumerateDeviceExtensionProperties(
1114 VkPhysicalDevice physicalDevice,
1115 const char* pLayerName,
1116 uint32_t* pPropertyCount,
1117 VkExtensionProperties* pProperties)
1118 {
1119 RADV_FROM_HANDLE(radv_physical_device, pdevice, physicalDevice);
1120
1121 if (pProperties == NULL) {
1122 *pPropertyCount = pdevice->extensions.num_ext;
1123 return VK_SUCCESS;
1124 }
1125
1126 *pPropertyCount = MIN2(*pPropertyCount, pdevice->extensions.num_ext);
1127 typed_memcpy(pProperties, pdevice->extensions.ext_array, *pPropertyCount);
1128
1129 if (*pPropertyCount < pdevice->extensions.num_ext)
1130 return VK_INCOMPLETE;
1131
1132 return VK_SUCCESS;
1133 }
1134
1135 VkResult radv_EnumerateInstanceLayerProperties(
1136 uint32_t* pPropertyCount,
1137 VkLayerProperties* pProperties)
1138 {
1139 if (pProperties == NULL) {
1140 *pPropertyCount = 0;
1141 return VK_SUCCESS;
1142 }
1143
1144 /* None supported at this time */
1145 return vk_error(VK_ERROR_LAYER_NOT_PRESENT);
1146 }
1147
1148 VkResult radv_EnumerateDeviceLayerProperties(
1149 VkPhysicalDevice physicalDevice,
1150 uint32_t* pPropertyCount,
1151 VkLayerProperties* pProperties)
1152 {
1153 if (pProperties == NULL) {
1154 *pPropertyCount = 0;
1155 return VK_SUCCESS;
1156 }
1157
1158 /* None supported at this time */
1159 return vk_error(VK_ERROR_LAYER_NOT_PRESENT);
1160 }
1161
1162 void radv_GetDeviceQueue(
1163 VkDevice _device,
1164 uint32_t queueFamilyIndex,
1165 uint32_t queueIndex,
1166 VkQueue* pQueue)
1167 {
1168 RADV_FROM_HANDLE(radv_device, device, _device);
1169
1170 *pQueue = radv_queue_to_handle(&device->queues[queueFamilyIndex][queueIndex]);
1171 }
1172
1173 static void radv_dump_trace(struct radv_device *device,
1174 struct radeon_winsys_cs *cs)
1175 {
1176 const char *filename = getenv("RADV_TRACE_FILE");
1177 FILE *f = fopen(filename, "w");
1178 if (!f) {
1179 fprintf(stderr, "Failed to write trace dump to %s\n", filename);
1180 return;
1181 }
1182
1183 fprintf(f, "Trace ID: %x\n", *device->trace_id_ptr);
1184 device->ws->cs_dump(cs, f, *device->trace_id_ptr);
1185 fclose(f);
1186 }
1187
1188 static void
1189 fill_geom_tess_rings(struct radv_queue *queue,
1190 uint32_t *map,
1191 uint32_t esgs_ring_size,
1192 struct radeon_winsys_bo *esgs_ring_bo,
1193 uint32_t gsvs_ring_size,
1194 struct radeon_winsys_bo *gsvs_ring_bo,
1195 uint32_t tess_factor_ring_size,
1196 struct radeon_winsys_bo *tess_factor_ring_bo,
1197 uint32_t tess_offchip_ring_size,
1198 struct radeon_winsys_bo *tess_offchip_ring_bo)
1199 {
1200 uint64_t esgs_va = 0, gsvs_va = 0;
1201 uint64_t tess_factor_va = 0, tess_offchip_va = 0;
1202 uint32_t *desc = &map[4];
1203
1204 if (esgs_ring_bo)
1205 esgs_va = queue->device->ws->buffer_get_va(esgs_ring_bo);
1206 if (gsvs_ring_bo)
1207 gsvs_va = queue->device->ws->buffer_get_va(gsvs_ring_bo);
1208 if (tess_factor_ring_bo)
1209 tess_factor_va = queue->device->ws->buffer_get_va(tess_factor_ring_bo);
1210 if (tess_offchip_ring_bo)
1211 tess_offchip_va = queue->device->ws->buffer_get_va(tess_offchip_ring_bo);
1212
1213 /* stride 0, num records - size, add tid, swizzle, elsize4,
1214 index stride 64 */
1215 desc[0] = esgs_va;
1216 desc[1] = S_008F04_BASE_ADDRESS_HI(esgs_va >> 32) |
1217 S_008F04_STRIDE(0) |
1218 S_008F04_SWIZZLE_ENABLE(true);
1219 desc[2] = esgs_ring_size;
1220 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
1221 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
1222 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
1223 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
1224 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
1225 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32) |
1226 S_008F0C_ELEMENT_SIZE(1) |
1227 S_008F0C_INDEX_STRIDE(3) |
1228 S_008F0C_ADD_TID_ENABLE(true);
1229
1230 desc += 4;
1231 /* GS entry for ES->GS ring */
1232 /* stride 0, num records - size, elsize0,
1233 index stride 0 */
1234 desc[0] = esgs_va;
1235 desc[1] = S_008F04_BASE_ADDRESS_HI(esgs_va >> 32)|
1236 S_008F04_STRIDE(0) |
1237 S_008F04_SWIZZLE_ENABLE(false);
1238 desc[2] = esgs_ring_size;
1239 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
1240 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
1241 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
1242 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
1243 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
1244 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32) |
1245 S_008F0C_ELEMENT_SIZE(0) |
1246 S_008F0C_INDEX_STRIDE(0) |
1247 S_008F0C_ADD_TID_ENABLE(false);
1248
1249 desc += 4;
1250 /* VS entry for GS->VS ring */
1251 /* stride 0, num records - size, elsize0,
1252 index stride 0 */
1253 desc[0] = gsvs_va;
1254 desc[1] = S_008F04_BASE_ADDRESS_HI(gsvs_va >> 32)|
1255 S_008F04_STRIDE(0) |
1256 S_008F04_SWIZZLE_ENABLE(false);
1257 desc[2] = gsvs_ring_size;
1258 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
1259 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
1260 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
1261 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
1262 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
1263 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32) |
1264 S_008F0C_ELEMENT_SIZE(0) |
1265 S_008F0C_INDEX_STRIDE(0) |
1266 S_008F0C_ADD_TID_ENABLE(false);
1267 desc += 4;
1268
1269 /* stride gsvs_itemsize, num records 64
1270 elsize 4, index stride 16 */
1271 /* shader will patch stride and desc[2] */
1272 desc[0] = gsvs_va;
1273 desc[1] = S_008F04_BASE_ADDRESS_HI(gsvs_va >> 32)|
1274 S_008F04_STRIDE(0) |
1275 S_008F04_SWIZZLE_ENABLE(true);
1276 desc[2] = 0;
1277 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
1278 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
1279 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
1280 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
1281 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
1282 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32) |
1283 S_008F0C_ELEMENT_SIZE(1) |
1284 S_008F0C_INDEX_STRIDE(1) |
1285 S_008F0C_ADD_TID_ENABLE(true);
1286 desc += 4;
1287
1288 desc[0] = tess_factor_va;
1289 desc[1] = S_008F04_BASE_ADDRESS_HI(tess_factor_va >> 32) |
1290 S_008F04_STRIDE(0) |
1291 S_008F04_SWIZZLE_ENABLE(false);
1292 desc[2] = tess_factor_ring_size;
1293 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
1294 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
1295 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
1296 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
1297 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
1298 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32) |
1299 S_008F0C_ELEMENT_SIZE(0) |
1300 S_008F0C_INDEX_STRIDE(0) |
1301 S_008F0C_ADD_TID_ENABLE(false);
1302 desc += 4;
1303
1304 desc[0] = tess_offchip_va;
1305 desc[1] = S_008F04_BASE_ADDRESS_HI(tess_offchip_va >> 32) |
1306 S_008F04_STRIDE(0) |
1307 S_008F04_SWIZZLE_ENABLE(false);
1308 desc[2] = tess_offchip_ring_size;
1309 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
1310 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
1311 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
1312 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
1313 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
1314 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32) |
1315 S_008F0C_ELEMENT_SIZE(0) |
1316 S_008F0C_INDEX_STRIDE(0) |
1317 S_008F0C_ADD_TID_ENABLE(false);
1318 }
1319
1320 static unsigned
1321 radv_get_hs_offchip_param(struct radv_device *device, uint32_t *max_offchip_buffers_p)
1322 {
1323 bool double_offchip_buffers = device->physical_device->rad_info.chip_class >= CIK &&
1324 device->physical_device->rad_info.family != CHIP_CARRIZO &&
1325 device->physical_device->rad_info.family != CHIP_STONEY;
1326 unsigned max_offchip_buffers_per_se = double_offchip_buffers ? 128 : 64;
1327 unsigned max_offchip_buffers = max_offchip_buffers_per_se *
1328 device->physical_device->rad_info.max_se;
1329 unsigned offchip_granularity;
1330 unsigned hs_offchip_param;
1331 switch (device->tess_offchip_block_dw_size) {
1332 default:
1333 assert(0);
1334 /* fall through */
1335 case 8192:
1336 offchip_granularity = V_03093C_X_8K_DWORDS;
1337 break;
1338 case 4096:
1339 offchip_granularity = V_03093C_X_4K_DWORDS;
1340 break;
1341 }
1342
1343 switch (device->physical_device->rad_info.chip_class) {
1344 case SI:
1345 max_offchip_buffers = MIN2(max_offchip_buffers, 126);
1346 break;
1347 case CIK:
1348 max_offchip_buffers = MIN2(max_offchip_buffers, 508);
1349 break;
1350 case VI:
1351 default:
1352 max_offchip_buffers = MIN2(max_offchip_buffers, 512);
1353 break;
1354 }
1355
1356 *max_offchip_buffers_p = max_offchip_buffers;
1357 if (device->physical_device->rad_info.chip_class >= CIK) {
1358 if (device->physical_device->rad_info.chip_class >= VI)
1359 --max_offchip_buffers;
1360 hs_offchip_param =
1361 S_03093C_OFFCHIP_BUFFERING(max_offchip_buffers) |
1362 S_03093C_OFFCHIP_GRANULARITY(offchip_granularity);
1363 } else {
1364 hs_offchip_param =
1365 S_0089B0_OFFCHIP_BUFFERING(max_offchip_buffers);
1366 }
1367 return hs_offchip_param;
1368 }
1369
1370 static VkResult
1371 radv_get_preamble_cs(struct radv_queue *queue,
1372 uint32_t scratch_size,
1373 uint32_t compute_scratch_size,
1374 uint32_t esgs_ring_size,
1375 uint32_t gsvs_ring_size,
1376 bool needs_tess_rings,
1377 struct radeon_winsys_cs **initial_preamble_cs,
1378 struct radeon_winsys_cs **continue_preamble_cs)
1379 {
1380 struct radeon_winsys_bo *scratch_bo = NULL;
1381 struct radeon_winsys_bo *descriptor_bo = NULL;
1382 struct radeon_winsys_bo *compute_scratch_bo = NULL;
1383 struct radeon_winsys_bo *esgs_ring_bo = NULL;
1384 struct radeon_winsys_bo *gsvs_ring_bo = NULL;
1385 struct radeon_winsys_bo *tess_factor_ring_bo = NULL;
1386 struct radeon_winsys_bo *tess_offchip_ring_bo = NULL;
1387 struct radeon_winsys_cs *dest_cs[2] = {0};
1388 bool add_tess_rings = false;
1389 unsigned tess_factor_ring_size = 0, tess_offchip_ring_size = 0;
1390 unsigned max_offchip_buffers;
1391 unsigned hs_offchip_param = 0;
1392 if (!queue->has_tess_rings) {
1393 if (needs_tess_rings)
1394 add_tess_rings = true;
1395 }
1396 tess_factor_ring_size = 32768 * queue->device->physical_device->rad_info.max_se;
1397 hs_offchip_param = radv_get_hs_offchip_param(queue->device,
1398 &max_offchip_buffers);
1399 tess_offchip_ring_size = max_offchip_buffers *
1400 queue->device->tess_offchip_block_dw_size * 4;
1401
1402 if (scratch_size <= queue->scratch_size &&
1403 compute_scratch_size <= queue->compute_scratch_size &&
1404 esgs_ring_size <= queue->esgs_ring_size &&
1405 gsvs_ring_size <= queue->gsvs_ring_size &&
1406 !add_tess_rings &&
1407 queue->initial_preamble_cs) {
1408 *initial_preamble_cs = queue->initial_preamble_cs;
1409 *continue_preamble_cs = queue->continue_preamble_cs;
1410 if (!scratch_size && !compute_scratch_size && !esgs_ring_size && !gsvs_ring_size)
1411 *continue_preamble_cs = NULL;
1412 return VK_SUCCESS;
1413 }
1414
1415 if (scratch_size > queue->scratch_size) {
1416 scratch_bo = queue->device->ws->buffer_create(queue->device->ws,
1417 scratch_size,
1418 4096,
1419 RADEON_DOMAIN_VRAM,
1420 RADEON_FLAG_NO_CPU_ACCESS);
1421 if (!scratch_bo)
1422 goto fail;
1423 } else
1424 scratch_bo = queue->scratch_bo;
1425
1426 if (compute_scratch_size > queue->compute_scratch_size) {
1427 compute_scratch_bo = queue->device->ws->buffer_create(queue->device->ws,
1428 compute_scratch_size,
1429 4096,
1430 RADEON_DOMAIN_VRAM,
1431 RADEON_FLAG_NO_CPU_ACCESS);
1432 if (!compute_scratch_bo)
1433 goto fail;
1434
1435 } else
1436 compute_scratch_bo = queue->compute_scratch_bo;
1437
1438 if (esgs_ring_size > queue->esgs_ring_size) {
1439 esgs_ring_bo = queue->device->ws->buffer_create(queue->device->ws,
1440 esgs_ring_size,
1441 4096,
1442 RADEON_DOMAIN_VRAM,
1443 RADEON_FLAG_NO_CPU_ACCESS);
1444 if (!esgs_ring_bo)
1445 goto fail;
1446 } else {
1447 esgs_ring_bo = queue->esgs_ring_bo;
1448 esgs_ring_size = queue->esgs_ring_size;
1449 }
1450
1451 if (gsvs_ring_size > queue->gsvs_ring_size) {
1452 gsvs_ring_bo = queue->device->ws->buffer_create(queue->device->ws,
1453 gsvs_ring_size,
1454 4096,
1455 RADEON_DOMAIN_VRAM,
1456 RADEON_FLAG_NO_CPU_ACCESS);
1457 if (!gsvs_ring_bo)
1458 goto fail;
1459 } else {
1460 gsvs_ring_bo = queue->gsvs_ring_bo;
1461 gsvs_ring_size = queue->gsvs_ring_size;
1462 }
1463
1464 if (add_tess_rings) {
1465 tess_factor_ring_bo = queue->device->ws->buffer_create(queue->device->ws,
1466 tess_factor_ring_size,
1467 256,
1468 RADEON_DOMAIN_VRAM,
1469 RADEON_FLAG_NO_CPU_ACCESS);
1470 if (!tess_factor_ring_bo)
1471 goto fail;
1472 tess_offchip_ring_bo = queue->device->ws->buffer_create(queue->device->ws,
1473 tess_offchip_ring_size,
1474 256,
1475 RADEON_DOMAIN_VRAM,
1476 RADEON_FLAG_NO_CPU_ACCESS);
1477 if (!tess_offchip_ring_bo)
1478 goto fail;
1479 } else {
1480 tess_factor_ring_bo = queue->tess_factor_ring_bo;
1481 tess_offchip_ring_bo = queue->tess_offchip_ring_bo;
1482 }
1483
1484 if (scratch_bo != queue->scratch_bo ||
1485 esgs_ring_bo != queue->esgs_ring_bo ||
1486 gsvs_ring_bo != queue->gsvs_ring_bo ||
1487 tess_factor_ring_bo != queue->tess_factor_ring_bo ||
1488 tess_offchip_ring_bo != queue->tess_offchip_ring_bo) {
1489 uint32_t size = 0;
1490 if (gsvs_ring_bo || esgs_ring_bo ||
1491 tess_factor_ring_bo || tess_offchip_ring_bo)
1492 size = 112; /* 2 dword + 2 padding + 4 dword * 6 */
1493 else if (scratch_bo)
1494 size = 8; /* 2 dword */
1495
1496 descriptor_bo = queue->device->ws->buffer_create(queue->device->ws,
1497 size,
1498 4096,
1499 RADEON_DOMAIN_VRAM,
1500 RADEON_FLAG_CPU_ACCESS);
1501 if (!descriptor_bo)
1502 goto fail;
1503 } else
1504 descriptor_bo = queue->descriptor_bo;
1505
1506 for(int i = 0; i < 2; ++i) {
1507 struct radeon_winsys_cs *cs = NULL;
1508 cs = queue->device->ws->cs_create(queue->device->ws,
1509 queue->queue_family_index ? RING_COMPUTE : RING_GFX);
1510 if (!cs)
1511 goto fail;
1512
1513 dest_cs[i] = cs;
1514
1515 if (scratch_bo)
1516 queue->device->ws->cs_add_buffer(cs, scratch_bo, 8);
1517
1518 if (esgs_ring_bo)
1519 queue->device->ws->cs_add_buffer(cs, esgs_ring_bo, 8);
1520
1521 if (gsvs_ring_bo)
1522 queue->device->ws->cs_add_buffer(cs, gsvs_ring_bo, 8);
1523
1524 if (tess_factor_ring_bo)
1525 queue->device->ws->cs_add_buffer(cs, tess_factor_ring_bo, 8);
1526
1527 if (tess_offchip_ring_bo)
1528 queue->device->ws->cs_add_buffer(cs, tess_offchip_ring_bo, 8);
1529
1530 if (descriptor_bo)
1531 queue->device->ws->cs_add_buffer(cs, descriptor_bo, 8);
1532
1533 if (descriptor_bo != queue->descriptor_bo) {
1534 uint32_t *map = (uint32_t*)queue->device->ws->buffer_map(descriptor_bo);
1535
1536 if (scratch_bo) {
1537 uint64_t scratch_va = queue->device->ws->buffer_get_va(scratch_bo);
1538 uint32_t rsrc1 = S_008F04_BASE_ADDRESS_HI(scratch_va >> 32) |
1539 S_008F04_SWIZZLE_ENABLE(1);
1540 map[0] = scratch_va;
1541 map[1] = rsrc1;
1542 }
1543
1544 if (esgs_ring_bo || gsvs_ring_bo || tess_factor_ring_bo || tess_offchip_ring_bo)
1545 fill_geom_tess_rings(queue, map,
1546 esgs_ring_size, esgs_ring_bo,
1547 gsvs_ring_size, gsvs_ring_bo,
1548 tess_factor_ring_size, tess_factor_ring_bo,
1549 tess_offchip_ring_size, tess_offchip_ring_bo);
1550
1551 queue->device->ws->buffer_unmap(descriptor_bo);
1552 }
1553
1554 if (esgs_ring_bo || gsvs_ring_bo || tess_factor_ring_bo || tess_offchip_ring_bo) {
1555 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1556 radeon_emit(cs, EVENT_TYPE(V_028A90_VS_PARTIAL_FLUSH) | EVENT_INDEX(4));
1557 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1558 radeon_emit(cs, EVENT_TYPE(V_028A90_VGT_FLUSH) | EVENT_INDEX(0));
1559 }
1560
1561 if (esgs_ring_bo || gsvs_ring_bo) {
1562 if (queue->device->physical_device->rad_info.chip_class >= CIK) {
1563 radeon_set_uconfig_reg_seq(cs, R_030900_VGT_ESGS_RING_SIZE, 2);
1564 radeon_emit(cs, esgs_ring_size >> 8);
1565 radeon_emit(cs, gsvs_ring_size >> 8);
1566 } else {
1567 radeon_set_config_reg_seq(cs, R_0088C8_VGT_ESGS_RING_SIZE, 2);
1568 radeon_emit(cs, esgs_ring_size >> 8);
1569 radeon_emit(cs, gsvs_ring_size >> 8);
1570 }
1571 }
1572
1573 if (tess_factor_ring_bo) {
1574 uint64_t tf_va = queue->device->ws->buffer_get_va(tess_factor_ring_bo);
1575 if (queue->device->physical_device->rad_info.chip_class >= CIK) {
1576 radeon_set_uconfig_reg(cs, R_030938_VGT_TF_RING_SIZE,
1577 S_030938_SIZE(tess_factor_ring_size / 4));
1578 radeon_set_uconfig_reg(cs, R_030940_VGT_TF_MEMORY_BASE,
1579 tf_va >> 8);
1580 radeon_set_uconfig_reg(cs, R_03093C_VGT_HS_OFFCHIP_PARAM, hs_offchip_param);
1581 } else {
1582 radeon_set_config_reg(cs, R_008988_VGT_TF_RING_SIZE,
1583 S_008988_SIZE(tess_factor_ring_size / 4));
1584 radeon_set_config_reg(cs, R_0089B8_VGT_TF_MEMORY_BASE,
1585 tf_va >> 8);
1586 radeon_set_config_reg(cs, R_0089B0_VGT_HS_OFFCHIP_PARAM,
1587 hs_offchip_param);
1588 }
1589 }
1590
1591 if (descriptor_bo) {
1592 uint32_t regs[] = {R_00B030_SPI_SHADER_USER_DATA_PS_0,
1593 R_00B130_SPI_SHADER_USER_DATA_VS_0,
1594 R_00B230_SPI_SHADER_USER_DATA_GS_0,
1595 R_00B330_SPI_SHADER_USER_DATA_ES_0,
1596 R_00B430_SPI_SHADER_USER_DATA_HS_0,
1597 R_00B530_SPI_SHADER_USER_DATA_LS_0};
1598
1599 uint64_t va = queue->device->ws->buffer_get_va(descriptor_bo);
1600
1601 for (int i = 0; i < ARRAY_SIZE(regs); ++i) {
1602 radeon_set_sh_reg_seq(cs, regs[i], 2);
1603 radeon_emit(cs, va);
1604 radeon_emit(cs, va >> 32);
1605 }
1606 }
1607
1608 if (compute_scratch_bo) {
1609 uint64_t scratch_va = queue->device->ws->buffer_get_va(compute_scratch_bo);
1610 uint32_t rsrc1 = S_008F04_BASE_ADDRESS_HI(scratch_va >> 32) |
1611 S_008F04_SWIZZLE_ENABLE(1);
1612
1613 queue->device->ws->cs_add_buffer(cs, compute_scratch_bo, 8);
1614
1615 radeon_set_sh_reg_seq(cs, R_00B900_COMPUTE_USER_DATA_0, 2);
1616 radeon_emit(cs, scratch_va);
1617 radeon_emit(cs, rsrc1);
1618 }
1619
1620 if (!i) {
1621 si_cs_emit_cache_flush(cs,
1622 queue->device->physical_device->rad_info.chip_class,
1623 queue->queue_family_index == RING_COMPUTE &&
1624 queue->device->physical_device->rad_info.chip_class >= CIK,
1625 RADV_CMD_FLAG_INV_ICACHE |
1626 RADV_CMD_FLAG_INV_SMEM_L1 |
1627 RADV_CMD_FLAG_INV_VMEM_L1 |
1628 RADV_CMD_FLAG_INV_GLOBAL_L2);
1629 }
1630
1631 if (!queue->device->ws->cs_finalize(cs))
1632 goto fail;
1633 }
1634
1635 if (queue->initial_preamble_cs)
1636 queue->device->ws->cs_destroy(queue->initial_preamble_cs);
1637
1638 if (queue->continue_preamble_cs)
1639 queue->device->ws->cs_destroy(queue->continue_preamble_cs);
1640
1641 queue->initial_preamble_cs = dest_cs[0];
1642 queue->continue_preamble_cs = dest_cs[1];
1643
1644 if (scratch_bo != queue->scratch_bo) {
1645 if (queue->scratch_bo)
1646 queue->device->ws->buffer_destroy(queue->scratch_bo);
1647 queue->scratch_bo = scratch_bo;
1648 queue->scratch_size = scratch_size;
1649 }
1650
1651 if (compute_scratch_bo != queue->compute_scratch_bo) {
1652 if (queue->compute_scratch_bo)
1653 queue->device->ws->buffer_destroy(queue->compute_scratch_bo);
1654 queue->compute_scratch_bo = compute_scratch_bo;
1655 queue->compute_scratch_size = compute_scratch_size;
1656 }
1657
1658 if (esgs_ring_bo != queue->esgs_ring_bo) {
1659 if (queue->esgs_ring_bo)
1660 queue->device->ws->buffer_destroy(queue->esgs_ring_bo);
1661 queue->esgs_ring_bo = esgs_ring_bo;
1662 queue->esgs_ring_size = esgs_ring_size;
1663 }
1664
1665 if (gsvs_ring_bo != queue->gsvs_ring_bo) {
1666 if (queue->gsvs_ring_bo)
1667 queue->device->ws->buffer_destroy(queue->gsvs_ring_bo);
1668 queue->gsvs_ring_bo = gsvs_ring_bo;
1669 queue->gsvs_ring_size = gsvs_ring_size;
1670 }
1671
1672 if (tess_factor_ring_bo != queue->tess_factor_ring_bo) {
1673 queue->tess_factor_ring_bo = tess_factor_ring_bo;
1674 }
1675
1676 if (tess_offchip_ring_bo != queue->tess_offchip_ring_bo) {
1677 queue->tess_offchip_ring_bo = tess_offchip_ring_bo;
1678 queue->has_tess_rings = true;
1679 }
1680
1681 if (descriptor_bo != queue->descriptor_bo) {
1682 if (queue->descriptor_bo)
1683 queue->device->ws->buffer_destroy(queue->descriptor_bo);
1684
1685 queue->descriptor_bo = descriptor_bo;
1686 }
1687
1688 *initial_preamble_cs = queue->initial_preamble_cs;
1689 *continue_preamble_cs = queue->continue_preamble_cs;
1690 if (!scratch_size && !compute_scratch_size && !esgs_ring_size && !gsvs_ring_size)
1691 *continue_preamble_cs = NULL;
1692 return VK_SUCCESS;
1693 fail:
1694 for (int i = 0; i < ARRAY_SIZE(dest_cs); ++i)
1695 if (dest_cs[i])
1696 queue->device->ws->cs_destroy(dest_cs[i]);
1697 if (descriptor_bo && descriptor_bo != queue->descriptor_bo)
1698 queue->device->ws->buffer_destroy(descriptor_bo);
1699 if (scratch_bo && scratch_bo != queue->scratch_bo)
1700 queue->device->ws->buffer_destroy(scratch_bo);
1701 if (compute_scratch_bo && compute_scratch_bo != queue->compute_scratch_bo)
1702 queue->device->ws->buffer_destroy(compute_scratch_bo);
1703 if (esgs_ring_bo && esgs_ring_bo != queue->esgs_ring_bo)
1704 queue->device->ws->buffer_destroy(esgs_ring_bo);
1705 if (gsvs_ring_bo && gsvs_ring_bo != queue->gsvs_ring_bo)
1706 queue->device->ws->buffer_destroy(gsvs_ring_bo);
1707 if (tess_factor_ring_bo && tess_factor_ring_bo != queue->tess_factor_ring_bo)
1708 queue->device->ws->buffer_destroy(tess_factor_ring_bo);
1709 if (tess_offchip_ring_bo && tess_offchip_ring_bo != queue->tess_offchip_ring_bo)
1710 queue->device->ws->buffer_destroy(tess_offchip_ring_bo);
1711 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
1712 }
1713
1714 VkResult radv_QueueSubmit(
1715 VkQueue _queue,
1716 uint32_t submitCount,
1717 const VkSubmitInfo* pSubmits,
1718 VkFence _fence)
1719 {
1720 RADV_FROM_HANDLE(radv_queue, queue, _queue);
1721 RADV_FROM_HANDLE(radv_fence, fence, _fence);
1722 struct radeon_winsys_fence *base_fence = fence ? fence->fence : NULL;
1723 struct radeon_winsys_ctx *ctx = queue->hw_ctx;
1724 int ret;
1725 uint32_t max_cs_submission = queue->device->trace_bo ? 1 : UINT32_MAX;
1726 uint32_t scratch_size = 0;
1727 uint32_t compute_scratch_size = 0;
1728 uint32_t esgs_ring_size = 0, gsvs_ring_size = 0;
1729 struct radeon_winsys_cs *initial_preamble_cs = NULL, *continue_preamble_cs = NULL;
1730 VkResult result;
1731 bool fence_emitted = false;
1732 bool tess_rings_needed = false;
1733
1734 /* Do this first so failing to allocate scratch buffers can't result in
1735 * partially executed submissions. */
1736 for (uint32_t i = 0; i < submitCount; i++) {
1737 for (uint32_t j = 0; j < pSubmits[i].commandBufferCount; j++) {
1738 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer,
1739 pSubmits[i].pCommandBuffers[j]);
1740
1741 scratch_size = MAX2(scratch_size, cmd_buffer->scratch_size_needed);
1742 compute_scratch_size = MAX2(compute_scratch_size,
1743 cmd_buffer->compute_scratch_size_needed);
1744 esgs_ring_size = MAX2(esgs_ring_size, cmd_buffer->esgs_ring_size_needed);
1745 gsvs_ring_size = MAX2(gsvs_ring_size, cmd_buffer->gsvs_ring_size_needed);
1746 tess_rings_needed |= cmd_buffer->tess_rings_needed;
1747 }
1748 }
1749
1750 result = radv_get_preamble_cs(queue, scratch_size, compute_scratch_size,
1751 esgs_ring_size, gsvs_ring_size, tess_rings_needed,
1752 &initial_preamble_cs, &continue_preamble_cs);
1753 if (result != VK_SUCCESS)
1754 return result;
1755
1756 for (uint32_t i = 0; i < submitCount; i++) {
1757 struct radeon_winsys_cs **cs_array;
1758 bool do_flush = !i;
1759 bool can_patch = !do_flush;
1760 uint32_t advance;
1761
1762 if (!pSubmits[i].commandBufferCount) {
1763 if (pSubmits[i].waitSemaphoreCount || pSubmits[i].signalSemaphoreCount) {
1764 ret = queue->device->ws->cs_submit(ctx, queue->queue_idx,
1765 &queue->device->empty_cs[queue->queue_family_index],
1766 1, NULL, NULL,
1767 (struct radeon_winsys_sem **)pSubmits[i].pWaitSemaphores,
1768 pSubmits[i].waitSemaphoreCount,
1769 (struct radeon_winsys_sem **)pSubmits[i].pSignalSemaphores,
1770 pSubmits[i].signalSemaphoreCount,
1771 false, base_fence);
1772 if (ret) {
1773 radv_loge("failed to submit CS %d\n", i);
1774 abort();
1775 }
1776 fence_emitted = true;
1777 }
1778 continue;
1779 }
1780
1781 cs_array = malloc(sizeof(struct radeon_winsys_cs *) *
1782 (pSubmits[i].commandBufferCount + do_flush));
1783
1784 if(do_flush)
1785 cs_array[0] = queue->device->flush_cs[queue->queue_family_index];
1786
1787 for (uint32_t j = 0; j < pSubmits[i].commandBufferCount; j++) {
1788 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer,
1789 pSubmits[i].pCommandBuffers[j]);
1790 assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
1791
1792 cs_array[j + do_flush] = cmd_buffer->cs;
1793 if ((cmd_buffer->usage_flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT))
1794 can_patch = false;
1795 }
1796
1797 for (uint32_t j = 0; j < pSubmits[i].commandBufferCount + do_flush; j += advance) {
1798 advance = MIN2(max_cs_submission,
1799 pSubmits[i].commandBufferCount + do_flush - j);
1800 bool b = j == 0;
1801 bool e = j + advance == pSubmits[i].commandBufferCount + do_flush;
1802
1803 if (queue->device->trace_bo)
1804 *queue->device->trace_id_ptr = 0;
1805
1806 ret = queue->device->ws->cs_submit(ctx, queue->queue_idx, cs_array + j,
1807 advance, initial_preamble_cs, continue_preamble_cs,
1808 (struct radeon_winsys_sem **)pSubmits[i].pWaitSemaphores,
1809 b ? pSubmits[i].waitSemaphoreCount : 0,
1810 (struct radeon_winsys_sem **)pSubmits[i].pSignalSemaphores,
1811 e ? pSubmits[i].signalSemaphoreCount : 0,
1812 can_patch, base_fence);
1813
1814 if (ret) {
1815 radv_loge("failed to submit CS %d\n", i);
1816 abort();
1817 }
1818 fence_emitted = true;
1819 if (queue->device->trace_bo) {
1820 bool success = queue->device->ws->ctx_wait_idle(
1821 queue->hw_ctx,
1822 radv_queue_family_to_ring(
1823 queue->queue_family_index),
1824 queue->queue_idx);
1825
1826 if (!success) { /* Hang */
1827 radv_dump_trace(queue->device, cs_array[j]);
1828 abort();
1829 }
1830 }
1831 }
1832 free(cs_array);
1833 }
1834
1835 if (fence) {
1836 if (!fence_emitted)
1837 ret = queue->device->ws->cs_submit(ctx, queue->queue_idx,
1838 &queue->device->empty_cs[queue->queue_family_index],
1839 1, NULL, NULL, NULL, 0, NULL, 0,
1840 false, base_fence);
1841
1842 fence->submitted = true;
1843 }
1844
1845 return VK_SUCCESS;
1846 }
1847
1848 VkResult radv_QueueWaitIdle(
1849 VkQueue _queue)
1850 {
1851 RADV_FROM_HANDLE(radv_queue, queue, _queue);
1852
1853 queue->device->ws->ctx_wait_idle(queue->hw_ctx,
1854 radv_queue_family_to_ring(queue->queue_family_index),
1855 queue->queue_idx);
1856 return VK_SUCCESS;
1857 }
1858
1859 VkResult radv_DeviceWaitIdle(
1860 VkDevice _device)
1861 {
1862 RADV_FROM_HANDLE(radv_device, device, _device);
1863
1864 for (unsigned i = 0; i < RADV_MAX_QUEUE_FAMILIES; i++) {
1865 for (unsigned q = 0; q < device->queue_count[i]; q++) {
1866 radv_QueueWaitIdle(radv_queue_to_handle(&device->queues[i][q]));
1867 }
1868 }
1869 return VK_SUCCESS;
1870 }
1871
1872 PFN_vkVoidFunction radv_GetInstanceProcAddr(
1873 VkInstance instance,
1874 const char* pName)
1875 {
1876 return radv_lookup_entrypoint(pName);
1877 }
1878
1879 /* The loader wants us to expose a second GetInstanceProcAddr function
1880 * to work around certain LD_PRELOAD issues seen in apps.
1881 */
1882 PUBLIC
1883 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vk_icdGetInstanceProcAddr(
1884 VkInstance instance,
1885 const char* pName);
1886
1887 PUBLIC
1888 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vk_icdGetInstanceProcAddr(
1889 VkInstance instance,
1890 const char* pName)
1891 {
1892 return radv_GetInstanceProcAddr(instance, pName);
1893 }
1894
1895 PFN_vkVoidFunction radv_GetDeviceProcAddr(
1896 VkDevice device,
1897 const char* pName)
1898 {
1899 return radv_lookup_entrypoint(pName);
1900 }
1901
1902 bool radv_get_memory_fd(struct radv_device *device,
1903 struct radv_device_memory *memory,
1904 int *pFD)
1905 {
1906 struct radeon_bo_metadata metadata;
1907
1908 if (memory->image) {
1909 radv_init_metadata(device, memory->image, &metadata);
1910 device->ws->buffer_set_metadata(memory->bo, &metadata);
1911 }
1912
1913 return device->ws->buffer_get_fd(device->ws, memory->bo,
1914 pFD);
1915 }
1916
1917 VkResult radv_AllocateMemory(
1918 VkDevice _device,
1919 const VkMemoryAllocateInfo* pAllocateInfo,
1920 const VkAllocationCallbacks* pAllocator,
1921 VkDeviceMemory* pMem)
1922 {
1923 RADV_FROM_HANDLE(radv_device, device, _device);
1924 struct radv_device_memory *mem;
1925 VkResult result;
1926 enum radeon_bo_domain domain;
1927 uint32_t flags = 0;
1928 const VkDedicatedAllocationMemoryAllocateInfoNV *dedicate_info = NULL;
1929 assert(pAllocateInfo->sType == VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO);
1930
1931 if (pAllocateInfo->allocationSize == 0) {
1932 /* Apparently, this is allowed */
1933 *pMem = VK_NULL_HANDLE;
1934 return VK_SUCCESS;
1935 }
1936
1937 vk_foreach_struct(ext, pAllocateInfo->pNext) {
1938 switch (ext->sType) {
1939 case VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_MEMORY_ALLOCATE_INFO_NV:
1940 dedicate_info = (const VkDedicatedAllocationMemoryAllocateInfoNV *)ext;
1941 break;
1942 default:
1943 break;
1944 }
1945 }
1946
1947 mem = vk_alloc2(&device->alloc, pAllocator, sizeof(*mem), 8,
1948 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1949 if (mem == NULL)
1950 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1951
1952 if (dedicate_info) {
1953 mem->image = radv_image_from_handle(dedicate_info->image);
1954 mem->buffer = radv_buffer_from_handle(dedicate_info->buffer);
1955 } else {
1956 mem->image = NULL;
1957 mem->buffer = NULL;
1958 }
1959
1960 uint64_t alloc_size = align_u64(pAllocateInfo->allocationSize, 4096);
1961 if (pAllocateInfo->memoryTypeIndex == RADV_MEM_TYPE_GTT_WRITE_COMBINE ||
1962 pAllocateInfo->memoryTypeIndex == RADV_MEM_TYPE_GTT_CACHED)
1963 domain = RADEON_DOMAIN_GTT;
1964 else
1965 domain = RADEON_DOMAIN_VRAM;
1966
1967 if (pAllocateInfo->memoryTypeIndex == RADV_MEM_TYPE_VRAM)
1968 flags |= RADEON_FLAG_NO_CPU_ACCESS;
1969 else
1970 flags |= RADEON_FLAG_CPU_ACCESS;
1971
1972 if (pAllocateInfo->memoryTypeIndex == RADV_MEM_TYPE_GTT_WRITE_COMBINE)
1973 flags |= RADEON_FLAG_GTT_WC;
1974
1975 mem->bo = device->ws->buffer_create(device->ws, alloc_size, 65536,
1976 domain, flags);
1977
1978 if (!mem->bo) {
1979 result = VK_ERROR_OUT_OF_DEVICE_MEMORY;
1980 goto fail;
1981 }
1982 mem->type_index = pAllocateInfo->memoryTypeIndex;
1983
1984 *pMem = radv_device_memory_to_handle(mem);
1985
1986 return VK_SUCCESS;
1987
1988 fail:
1989 vk_free2(&device->alloc, pAllocator, mem);
1990
1991 return result;
1992 }
1993
1994 void radv_FreeMemory(
1995 VkDevice _device,
1996 VkDeviceMemory _mem,
1997 const VkAllocationCallbacks* pAllocator)
1998 {
1999 RADV_FROM_HANDLE(radv_device, device, _device);
2000 RADV_FROM_HANDLE(radv_device_memory, mem, _mem);
2001
2002 if (mem == NULL)
2003 return;
2004
2005 device->ws->buffer_destroy(mem->bo);
2006 mem->bo = NULL;
2007
2008 vk_free2(&device->alloc, pAllocator, mem);
2009 }
2010
2011 VkResult radv_MapMemory(
2012 VkDevice _device,
2013 VkDeviceMemory _memory,
2014 VkDeviceSize offset,
2015 VkDeviceSize size,
2016 VkMemoryMapFlags flags,
2017 void** ppData)
2018 {
2019 RADV_FROM_HANDLE(radv_device, device, _device);
2020 RADV_FROM_HANDLE(radv_device_memory, mem, _memory);
2021
2022 if (mem == NULL) {
2023 *ppData = NULL;
2024 return VK_SUCCESS;
2025 }
2026
2027 *ppData = device->ws->buffer_map(mem->bo);
2028 if (*ppData) {
2029 *ppData += offset;
2030 return VK_SUCCESS;
2031 }
2032
2033 return VK_ERROR_MEMORY_MAP_FAILED;
2034 }
2035
2036 void radv_UnmapMemory(
2037 VkDevice _device,
2038 VkDeviceMemory _memory)
2039 {
2040 RADV_FROM_HANDLE(radv_device, device, _device);
2041 RADV_FROM_HANDLE(radv_device_memory, mem, _memory);
2042
2043 if (mem == NULL)
2044 return;
2045
2046 device->ws->buffer_unmap(mem->bo);
2047 }
2048
2049 VkResult radv_FlushMappedMemoryRanges(
2050 VkDevice _device,
2051 uint32_t memoryRangeCount,
2052 const VkMappedMemoryRange* pMemoryRanges)
2053 {
2054 return VK_SUCCESS;
2055 }
2056
2057 VkResult radv_InvalidateMappedMemoryRanges(
2058 VkDevice _device,
2059 uint32_t memoryRangeCount,
2060 const VkMappedMemoryRange* pMemoryRanges)
2061 {
2062 return VK_SUCCESS;
2063 }
2064
2065 void radv_GetBufferMemoryRequirements(
2066 VkDevice device,
2067 VkBuffer _buffer,
2068 VkMemoryRequirements* pMemoryRequirements)
2069 {
2070 RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
2071
2072 pMemoryRequirements->memoryTypeBits = (1u << RADV_MEM_TYPE_COUNT) - 1;
2073
2074 if (buffer->flags & VK_BUFFER_CREATE_SPARSE_BINDING_BIT)
2075 pMemoryRequirements->alignment = 4096;
2076 else
2077 pMemoryRequirements->alignment = 16;
2078
2079 pMemoryRequirements->size = align64(buffer->size, pMemoryRequirements->alignment);
2080 }
2081
2082 void radv_GetImageMemoryRequirements(
2083 VkDevice device,
2084 VkImage _image,
2085 VkMemoryRequirements* pMemoryRequirements)
2086 {
2087 RADV_FROM_HANDLE(radv_image, image, _image);
2088
2089 pMemoryRequirements->memoryTypeBits = (1u << RADV_MEM_TYPE_COUNT) - 1;
2090
2091 pMemoryRequirements->size = image->size;
2092 pMemoryRequirements->alignment = image->alignment;
2093 }
2094
2095 void radv_GetImageSparseMemoryRequirements(
2096 VkDevice device,
2097 VkImage image,
2098 uint32_t* pSparseMemoryRequirementCount,
2099 VkSparseImageMemoryRequirements* pSparseMemoryRequirements)
2100 {
2101 stub();
2102 }
2103
2104 void radv_GetDeviceMemoryCommitment(
2105 VkDevice device,
2106 VkDeviceMemory memory,
2107 VkDeviceSize* pCommittedMemoryInBytes)
2108 {
2109 *pCommittedMemoryInBytes = 0;
2110 }
2111
2112 VkResult radv_BindBufferMemory(
2113 VkDevice device,
2114 VkBuffer _buffer,
2115 VkDeviceMemory _memory,
2116 VkDeviceSize memoryOffset)
2117 {
2118 RADV_FROM_HANDLE(radv_device_memory, mem, _memory);
2119 RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
2120
2121 if (mem) {
2122 buffer->bo = mem->bo;
2123 buffer->offset = memoryOffset;
2124 } else {
2125 buffer->bo = NULL;
2126 buffer->offset = 0;
2127 }
2128
2129 return VK_SUCCESS;
2130 }
2131
2132 VkResult radv_BindImageMemory(
2133 VkDevice device,
2134 VkImage _image,
2135 VkDeviceMemory _memory,
2136 VkDeviceSize memoryOffset)
2137 {
2138 RADV_FROM_HANDLE(radv_device_memory, mem, _memory);
2139 RADV_FROM_HANDLE(radv_image, image, _image);
2140
2141 if (mem) {
2142 image->bo = mem->bo;
2143 image->offset = memoryOffset;
2144 } else {
2145 image->bo = NULL;
2146 image->offset = 0;
2147 }
2148
2149 return VK_SUCCESS;
2150 }
2151
2152
2153 static void
2154 radv_sparse_buffer_bind_memory(struct radv_device *device,
2155 const VkSparseBufferMemoryBindInfo *bind)
2156 {
2157 RADV_FROM_HANDLE(radv_buffer, buffer, bind->buffer);
2158
2159 for (uint32_t i = 0; i < bind->bindCount; ++i) {
2160 struct radv_device_memory *mem = NULL;
2161
2162 if (bind->pBinds[i].memory != VK_NULL_HANDLE)
2163 mem = radv_device_memory_from_handle(bind->pBinds[i].memory);
2164
2165 device->ws->buffer_virtual_bind(buffer->bo,
2166 bind->pBinds[i].resourceOffset,
2167 bind->pBinds[i].size,
2168 mem ? mem->bo : NULL,
2169 bind->pBinds[i].memoryOffset);
2170 }
2171 }
2172
2173 static void
2174 radv_sparse_image_opaque_bind_memory(struct radv_device *device,
2175 const VkSparseImageOpaqueMemoryBindInfo *bind)
2176 {
2177 RADV_FROM_HANDLE(radv_image, image, bind->image);
2178
2179 for (uint32_t i = 0; i < bind->bindCount; ++i) {
2180 struct radv_device_memory *mem = NULL;
2181
2182 if (bind->pBinds[i].memory != VK_NULL_HANDLE)
2183 mem = radv_device_memory_from_handle(bind->pBinds[i].memory);
2184
2185 device->ws->buffer_virtual_bind(image->bo,
2186 bind->pBinds[i].resourceOffset,
2187 bind->pBinds[i].size,
2188 mem ? mem->bo : NULL,
2189 bind->pBinds[i].memoryOffset);
2190 }
2191 }
2192
2193 VkResult radv_QueueBindSparse(
2194 VkQueue _queue,
2195 uint32_t bindInfoCount,
2196 const VkBindSparseInfo* pBindInfo,
2197 VkFence _fence)
2198 {
2199 RADV_FROM_HANDLE(radv_fence, fence, _fence);
2200 RADV_FROM_HANDLE(radv_queue, queue, _queue);
2201 struct radeon_winsys_fence *base_fence = fence ? fence->fence : NULL;
2202 bool fence_emitted = false;
2203
2204 for (uint32_t i = 0; i < bindInfoCount; ++i) {
2205 for (uint32_t j = 0; j < pBindInfo[i].bufferBindCount; ++j) {
2206 radv_sparse_buffer_bind_memory(queue->device,
2207 pBindInfo[i].pBufferBinds + j);
2208 }
2209
2210 for (uint32_t j = 0; j < pBindInfo[i].imageOpaqueBindCount; ++j) {
2211 radv_sparse_image_opaque_bind_memory(queue->device,
2212 pBindInfo[i].pImageOpaqueBinds + j);
2213 }
2214
2215 if (pBindInfo[i].waitSemaphoreCount || pBindInfo[i].signalSemaphoreCount) {
2216 queue->device->ws->cs_submit(queue->hw_ctx, queue->queue_idx,
2217 &queue->device->empty_cs[queue->queue_family_index],
2218 1, NULL, NULL,
2219 (struct radeon_winsys_sem **)pBindInfo[i].pWaitSemaphores,
2220 pBindInfo[i].waitSemaphoreCount,
2221 (struct radeon_winsys_sem **)pBindInfo[i].pSignalSemaphores,
2222 pBindInfo[i].signalSemaphoreCount,
2223 false, base_fence);
2224 fence_emitted = true;
2225 if (fence)
2226 fence->submitted = true;
2227 }
2228 }
2229
2230 if (fence && !fence_emitted) {
2231 fence->signalled = true;
2232 }
2233
2234 return VK_SUCCESS;
2235 }
2236
2237 VkResult radv_CreateFence(
2238 VkDevice _device,
2239 const VkFenceCreateInfo* pCreateInfo,
2240 const VkAllocationCallbacks* pAllocator,
2241 VkFence* pFence)
2242 {
2243 RADV_FROM_HANDLE(radv_device, device, _device);
2244 struct radv_fence *fence = vk_alloc2(&device->alloc, pAllocator,
2245 sizeof(*fence), 8,
2246 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2247
2248 if (!fence)
2249 return VK_ERROR_OUT_OF_HOST_MEMORY;
2250
2251 memset(fence, 0, sizeof(*fence));
2252 fence->submitted = false;
2253 fence->signalled = !!(pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT);
2254 fence->fence = device->ws->create_fence();
2255 if (!fence->fence) {
2256 vk_free2(&device->alloc, pAllocator, fence);
2257 return VK_ERROR_OUT_OF_HOST_MEMORY;
2258 }
2259
2260 *pFence = radv_fence_to_handle(fence);
2261
2262 return VK_SUCCESS;
2263 }
2264
2265 void radv_DestroyFence(
2266 VkDevice _device,
2267 VkFence _fence,
2268 const VkAllocationCallbacks* pAllocator)
2269 {
2270 RADV_FROM_HANDLE(radv_device, device, _device);
2271 RADV_FROM_HANDLE(radv_fence, fence, _fence);
2272
2273 if (!fence)
2274 return;
2275 device->ws->destroy_fence(fence->fence);
2276 vk_free2(&device->alloc, pAllocator, fence);
2277 }
2278
2279 static uint64_t radv_get_absolute_timeout(uint64_t timeout)
2280 {
2281 uint64_t current_time;
2282 struct timespec tv;
2283
2284 clock_gettime(CLOCK_MONOTONIC, &tv);
2285 current_time = tv.tv_nsec + tv.tv_sec*1000000000ull;
2286
2287 timeout = MIN2(UINT64_MAX - current_time, timeout);
2288
2289 return current_time + timeout;
2290 }
2291
2292 VkResult radv_WaitForFences(
2293 VkDevice _device,
2294 uint32_t fenceCount,
2295 const VkFence* pFences,
2296 VkBool32 waitAll,
2297 uint64_t timeout)
2298 {
2299 RADV_FROM_HANDLE(radv_device, device, _device);
2300 timeout = radv_get_absolute_timeout(timeout);
2301
2302 if (!waitAll && fenceCount > 1) {
2303 fprintf(stderr, "radv: WaitForFences without waitAll not implemented yet\n");
2304 }
2305
2306 for (uint32_t i = 0; i < fenceCount; ++i) {
2307 RADV_FROM_HANDLE(radv_fence, fence, pFences[i]);
2308 bool expired = false;
2309
2310 if (fence->signalled)
2311 continue;
2312
2313 if (!fence->submitted)
2314 return VK_TIMEOUT;
2315
2316 expired = device->ws->fence_wait(device->ws, fence->fence, true, timeout);
2317 if (!expired)
2318 return VK_TIMEOUT;
2319
2320 fence->signalled = true;
2321 }
2322
2323 return VK_SUCCESS;
2324 }
2325
2326 VkResult radv_ResetFences(VkDevice device,
2327 uint32_t fenceCount,
2328 const VkFence *pFences)
2329 {
2330 for (unsigned i = 0; i < fenceCount; ++i) {
2331 RADV_FROM_HANDLE(radv_fence, fence, pFences[i]);
2332 fence->submitted = fence->signalled = false;
2333 }
2334
2335 return VK_SUCCESS;
2336 }
2337
2338 VkResult radv_GetFenceStatus(VkDevice _device, VkFence _fence)
2339 {
2340 RADV_FROM_HANDLE(radv_device, device, _device);
2341 RADV_FROM_HANDLE(radv_fence, fence, _fence);
2342
2343 if (fence->signalled)
2344 return VK_SUCCESS;
2345 if (!fence->submitted)
2346 return VK_NOT_READY;
2347
2348 if (!device->ws->fence_wait(device->ws, fence->fence, false, 0))
2349 return VK_NOT_READY;
2350
2351 return VK_SUCCESS;
2352 }
2353
2354
2355 // Queue semaphore functions
2356
2357 VkResult radv_CreateSemaphore(
2358 VkDevice _device,
2359 const VkSemaphoreCreateInfo* pCreateInfo,
2360 const VkAllocationCallbacks* pAllocator,
2361 VkSemaphore* pSemaphore)
2362 {
2363 RADV_FROM_HANDLE(radv_device, device, _device);
2364 struct radeon_winsys_sem *sem;
2365
2366 sem = device->ws->create_sem(device->ws);
2367 if (!sem)
2368 return VK_ERROR_OUT_OF_HOST_MEMORY;
2369
2370 *pSemaphore = radeon_winsys_sem_to_handle(sem);
2371 return VK_SUCCESS;
2372 }
2373
2374 void radv_DestroySemaphore(
2375 VkDevice _device,
2376 VkSemaphore _semaphore,
2377 const VkAllocationCallbacks* pAllocator)
2378 {
2379 RADV_FROM_HANDLE(radv_device, device, _device);
2380 RADV_FROM_HANDLE(radeon_winsys_sem, sem, _semaphore);
2381 if (!_semaphore)
2382 return;
2383
2384 device->ws->destroy_sem(sem);
2385 }
2386
2387 VkResult radv_CreateEvent(
2388 VkDevice _device,
2389 const VkEventCreateInfo* pCreateInfo,
2390 const VkAllocationCallbacks* pAllocator,
2391 VkEvent* pEvent)
2392 {
2393 RADV_FROM_HANDLE(radv_device, device, _device);
2394 struct radv_event *event = vk_alloc2(&device->alloc, pAllocator,
2395 sizeof(*event), 8,
2396 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2397
2398 if (!event)
2399 return VK_ERROR_OUT_OF_HOST_MEMORY;
2400
2401 event->bo = device->ws->buffer_create(device->ws, 8, 8,
2402 RADEON_DOMAIN_GTT,
2403 RADEON_FLAG_CPU_ACCESS);
2404 if (!event->bo) {
2405 vk_free2(&device->alloc, pAllocator, event);
2406 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
2407 }
2408
2409 event->map = (uint64_t*)device->ws->buffer_map(event->bo);
2410
2411 *pEvent = radv_event_to_handle(event);
2412
2413 return VK_SUCCESS;
2414 }
2415
2416 void radv_DestroyEvent(
2417 VkDevice _device,
2418 VkEvent _event,
2419 const VkAllocationCallbacks* pAllocator)
2420 {
2421 RADV_FROM_HANDLE(radv_device, device, _device);
2422 RADV_FROM_HANDLE(radv_event, event, _event);
2423
2424 if (!event)
2425 return;
2426 device->ws->buffer_destroy(event->bo);
2427 vk_free2(&device->alloc, pAllocator, event);
2428 }
2429
2430 VkResult radv_GetEventStatus(
2431 VkDevice _device,
2432 VkEvent _event)
2433 {
2434 RADV_FROM_HANDLE(radv_event, event, _event);
2435
2436 if (*event->map == 1)
2437 return VK_EVENT_SET;
2438 return VK_EVENT_RESET;
2439 }
2440
2441 VkResult radv_SetEvent(
2442 VkDevice _device,
2443 VkEvent _event)
2444 {
2445 RADV_FROM_HANDLE(radv_event, event, _event);
2446 *event->map = 1;
2447
2448 return VK_SUCCESS;
2449 }
2450
2451 VkResult radv_ResetEvent(
2452 VkDevice _device,
2453 VkEvent _event)
2454 {
2455 RADV_FROM_HANDLE(radv_event, event, _event);
2456 *event->map = 0;
2457
2458 return VK_SUCCESS;
2459 }
2460
2461 VkResult radv_CreateBuffer(
2462 VkDevice _device,
2463 const VkBufferCreateInfo* pCreateInfo,
2464 const VkAllocationCallbacks* pAllocator,
2465 VkBuffer* pBuffer)
2466 {
2467 RADV_FROM_HANDLE(radv_device, device, _device);
2468 struct radv_buffer *buffer;
2469
2470 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO);
2471
2472 buffer = vk_alloc2(&device->alloc, pAllocator, sizeof(*buffer), 8,
2473 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2474 if (buffer == NULL)
2475 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
2476
2477 buffer->size = pCreateInfo->size;
2478 buffer->usage = pCreateInfo->usage;
2479 buffer->bo = NULL;
2480 buffer->offset = 0;
2481 buffer->flags = pCreateInfo->flags;
2482
2483 if (pCreateInfo->flags & VK_BUFFER_CREATE_SPARSE_BINDING_BIT) {
2484 buffer->bo = device->ws->buffer_create(device->ws,
2485 align64(buffer->size, 4096),
2486 4096, 0, RADEON_FLAG_VIRTUAL);
2487 if (!buffer->bo) {
2488 vk_free2(&device->alloc, pAllocator, buffer);
2489 return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
2490 }
2491 }
2492
2493 *pBuffer = radv_buffer_to_handle(buffer);
2494
2495 return VK_SUCCESS;
2496 }
2497
2498 void radv_DestroyBuffer(
2499 VkDevice _device,
2500 VkBuffer _buffer,
2501 const VkAllocationCallbacks* pAllocator)
2502 {
2503 RADV_FROM_HANDLE(radv_device, device, _device);
2504 RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
2505
2506 if (!buffer)
2507 return;
2508
2509 if (buffer->flags & VK_BUFFER_CREATE_SPARSE_BINDING_BIT)
2510 device->ws->buffer_destroy(buffer->bo);
2511
2512 vk_free2(&device->alloc, pAllocator, buffer);
2513 }
2514
2515 static inline unsigned
2516 si_tile_mode_index(const struct radv_image *image, unsigned level, bool stencil)
2517 {
2518 if (stencil)
2519 return image->surface.stencil_tiling_index[level];
2520 else
2521 return image->surface.tiling_index[level];
2522 }
2523
2524 static uint32_t radv_surface_layer_count(struct radv_image_view *iview)
2525 {
2526 return iview->type == VK_IMAGE_VIEW_TYPE_3D ? iview->extent.depth : iview->layer_count;
2527 }
2528
2529 static void
2530 radv_initialise_color_surface(struct radv_device *device,
2531 struct radv_color_buffer_info *cb,
2532 struct radv_image_view *iview)
2533 {
2534 const struct vk_format_description *desc;
2535 unsigned ntype, format, swap, endian;
2536 unsigned blend_clamp = 0, blend_bypass = 0;
2537 unsigned pitch_tile_max, slice_tile_max, tile_mode_index;
2538 uint64_t va;
2539 const struct radeon_surf *surf = &iview->image->surface;
2540 const struct radeon_surf_level *level_info = &surf->level[iview->base_mip];
2541
2542 desc = vk_format_description(iview->vk_format);
2543
2544 memset(cb, 0, sizeof(*cb));
2545
2546 va = device->ws->buffer_get_va(iview->bo) + iview->image->offset;
2547 va += level_info->offset;
2548 cb->cb_color_base = va >> 8;
2549
2550 /* CMASK variables */
2551 va = device->ws->buffer_get_va(iview->bo) + iview->image->offset;
2552 va += iview->image->cmask.offset;
2553 cb->cb_color_cmask = va >> 8;
2554 cb->cb_color_cmask_slice = iview->image->cmask.slice_tile_max;
2555
2556 va = device->ws->buffer_get_va(iview->bo) + iview->image->offset;
2557 va += iview->image->dcc_offset;
2558 cb->cb_dcc_base = va >> 8;
2559
2560 uint32_t max_slice = radv_surface_layer_count(iview);
2561 cb->cb_color_view = S_028C6C_SLICE_START(iview->base_layer) |
2562 S_028C6C_SLICE_MAX(iview->base_layer + max_slice - 1);
2563
2564 cb->micro_tile_mode = iview->image->surface.micro_tile_mode;
2565 pitch_tile_max = level_info->nblk_x / 8 - 1;
2566 slice_tile_max = (level_info->nblk_x * level_info->nblk_y) / 64 - 1;
2567 tile_mode_index = si_tile_mode_index(iview->image, iview->base_mip, false);
2568
2569 cb->cb_color_pitch = S_028C64_TILE_MAX(pitch_tile_max);
2570 cb->cb_color_slice = S_028C68_TILE_MAX(slice_tile_max);
2571
2572 /* Intensity is implemented as Red, so treat it that way. */
2573 cb->cb_color_attrib = S_028C74_FORCE_DST_ALPHA_1(desc->swizzle[3] == VK_SWIZZLE_1) |
2574 S_028C74_TILE_MODE_INDEX(tile_mode_index);
2575
2576 if (iview->image->samples > 1) {
2577 unsigned log_samples = util_logbase2(iview->image->samples);
2578
2579 cb->cb_color_attrib |= S_028C74_NUM_SAMPLES(log_samples) |
2580 S_028C74_NUM_FRAGMENTS(log_samples);
2581 }
2582
2583 if (iview->image->fmask.size) {
2584 va = device->ws->buffer_get_va(iview->bo) + iview->image->offset + iview->image->fmask.offset;
2585 if (device->physical_device->rad_info.chip_class >= CIK)
2586 cb->cb_color_pitch |= S_028C64_FMASK_TILE_MAX(iview->image->fmask.pitch_in_pixels / 8 - 1);
2587 cb->cb_color_attrib |= S_028C74_FMASK_TILE_MODE_INDEX(iview->image->fmask.tile_mode_index);
2588 cb->cb_color_fmask = va >> 8;
2589 cb->cb_color_fmask_slice = S_028C88_TILE_MAX(iview->image->fmask.slice_tile_max);
2590 } else {
2591 /* This must be set for fast clear to work without FMASK. */
2592 if (device->physical_device->rad_info.chip_class >= CIK)
2593 cb->cb_color_pitch |= S_028C64_FMASK_TILE_MAX(pitch_tile_max);
2594 cb->cb_color_attrib |= S_028C74_FMASK_TILE_MODE_INDEX(tile_mode_index);
2595 cb->cb_color_fmask = cb->cb_color_base;
2596 cb->cb_color_fmask_slice = S_028C88_TILE_MAX(slice_tile_max);
2597 }
2598
2599 ntype = radv_translate_color_numformat(iview->vk_format,
2600 desc,
2601 vk_format_get_first_non_void_channel(iview->vk_format));
2602 format = radv_translate_colorformat(iview->vk_format);
2603 if (format == V_028C70_COLOR_INVALID || ntype == ~0u)
2604 radv_finishme("Illegal color\n");
2605 swap = radv_translate_colorswap(iview->vk_format, FALSE);
2606 endian = radv_colorformat_endian_swap(format);
2607
2608 /* blend clamp should be set for all NORM/SRGB types */
2609 if (ntype == V_028C70_NUMBER_UNORM ||
2610 ntype == V_028C70_NUMBER_SNORM ||
2611 ntype == V_028C70_NUMBER_SRGB)
2612 blend_clamp = 1;
2613
2614 /* set blend bypass according to docs if SINT/UINT or
2615 8/24 COLOR variants */
2616 if (ntype == V_028C70_NUMBER_UINT || ntype == V_028C70_NUMBER_SINT ||
2617 format == V_028C70_COLOR_8_24 || format == V_028C70_COLOR_24_8 ||
2618 format == V_028C70_COLOR_X24_8_32_FLOAT) {
2619 blend_clamp = 0;
2620 blend_bypass = 1;
2621 }
2622 #if 0
2623 if ((ntype == V_028C70_NUMBER_UINT || ntype == V_028C70_NUMBER_SINT) &&
2624 (format == V_028C70_COLOR_8 ||
2625 format == V_028C70_COLOR_8_8 ||
2626 format == V_028C70_COLOR_8_8_8_8))
2627 ->color_is_int8 = true;
2628 #endif
2629 cb->cb_color_info = S_028C70_FORMAT(format) |
2630 S_028C70_COMP_SWAP(swap) |
2631 S_028C70_BLEND_CLAMP(blend_clamp) |
2632 S_028C70_BLEND_BYPASS(blend_bypass) |
2633 S_028C70_SIMPLE_FLOAT(1) |
2634 S_028C70_ROUND_MODE(ntype != V_028C70_NUMBER_UNORM &&
2635 ntype != V_028C70_NUMBER_SNORM &&
2636 ntype != V_028C70_NUMBER_SRGB &&
2637 format != V_028C70_COLOR_8_24 &&
2638 format != V_028C70_COLOR_24_8) |
2639 S_028C70_NUMBER_TYPE(ntype) |
2640 S_028C70_ENDIAN(endian);
2641 if (iview->image->samples > 1)
2642 if (iview->image->fmask.size)
2643 cb->cb_color_info |= S_028C70_COMPRESSION(1);
2644
2645 if (iview->image->cmask.size &&
2646 !(device->debug_flags & RADV_DEBUG_NO_FAST_CLEARS))
2647 cb->cb_color_info |= S_028C70_FAST_CLEAR(1);
2648
2649 if (iview->image->surface.dcc_size && level_info->dcc_enabled)
2650 cb->cb_color_info |= S_028C70_DCC_ENABLE(1);
2651
2652 if (device->physical_device->rad_info.chip_class >= VI) {
2653 unsigned max_uncompressed_block_size = 2;
2654 if (iview->image->samples > 1) {
2655 if (iview->image->surface.bpe == 1)
2656 max_uncompressed_block_size = 0;
2657 else if (iview->image->surface.bpe == 2)
2658 max_uncompressed_block_size = 1;
2659 }
2660
2661 cb->cb_dcc_control = S_028C78_MAX_UNCOMPRESSED_BLOCK_SIZE(max_uncompressed_block_size) |
2662 S_028C78_INDEPENDENT_64B_BLOCKS(1);
2663 }
2664
2665 /* This must be set for fast clear to work without FMASK. */
2666 if (!iview->image->fmask.size &&
2667 device->physical_device->rad_info.chip_class == SI) {
2668 unsigned bankh = util_logbase2(iview->image->surface.bankh);
2669 cb->cb_color_attrib |= S_028C74_FMASK_BANK_HEIGHT(bankh);
2670 }
2671 }
2672
2673 static void
2674 radv_initialise_ds_surface(struct radv_device *device,
2675 struct radv_ds_buffer_info *ds,
2676 struct radv_image_view *iview)
2677 {
2678 unsigned level = iview->base_mip;
2679 unsigned format;
2680 uint64_t va, s_offs, z_offs;
2681 const struct radeon_surf_level *level_info = &iview->image->surface.level[level];
2682 memset(ds, 0, sizeof(*ds));
2683 switch (iview->vk_format) {
2684 case VK_FORMAT_D24_UNORM_S8_UINT:
2685 case VK_FORMAT_X8_D24_UNORM_PACK32:
2686 ds->pa_su_poly_offset_db_fmt_cntl = S_028B78_POLY_OFFSET_NEG_NUM_DB_BITS(-24);
2687 ds->offset_scale = 2.0f;
2688 break;
2689 case VK_FORMAT_D16_UNORM:
2690 case VK_FORMAT_D16_UNORM_S8_UINT:
2691 ds->pa_su_poly_offset_db_fmt_cntl = S_028B78_POLY_OFFSET_NEG_NUM_DB_BITS(-16);
2692 ds->offset_scale = 4.0f;
2693 break;
2694 case VK_FORMAT_D32_SFLOAT:
2695 case VK_FORMAT_D32_SFLOAT_S8_UINT:
2696 ds->pa_su_poly_offset_db_fmt_cntl = S_028B78_POLY_OFFSET_NEG_NUM_DB_BITS(-23) |
2697 S_028B78_POLY_OFFSET_DB_IS_FLOAT_FMT(1);
2698 ds->offset_scale = 1.0f;
2699 break;
2700 default:
2701 break;
2702 }
2703
2704 format = radv_translate_dbformat(iview->vk_format);
2705
2706 va = device->ws->buffer_get_va(iview->bo) + iview->image->offset;
2707 s_offs = z_offs = va;
2708 z_offs += iview->image->surface.level[level].offset;
2709 s_offs += iview->image->surface.stencil_level[level].offset;
2710
2711 uint32_t max_slice = radv_surface_layer_count(iview);
2712 ds->db_depth_view = S_028008_SLICE_START(iview->base_layer) |
2713 S_028008_SLICE_MAX(iview->base_layer + max_slice - 1);
2714 ds->db_depth_info = S_02803C_ADDR5_SWIZZLE_MASK(1);
2715 ds->db_z_info = S_028040_FORMAT(format) | S_028040_ZRANGE_PRECISION(1);
2716
2717 if (iview->image->samples > 1)
2718 ds->db_z_info |= S_028040_NUM_SAMPLES(util_logbase2(iview->image->samples));
2719
2720 if (iview->image->surface.flags & RADEON_SURF_SBUFFER)
2721 ds->db_stencil_info = S_028044_FORMAT(V_028044_STENCIL_8);
2722 else
2723 ds->db_stencil_info = S_028044_FORMAT(V_028044_STENCIL_INVALID);
2724
2725 if (device->physical_device->rad_info.chip_class >= CIK) {
2726 struct radeon_info *info = &device->physical_device->rad_info;
2727 unsigned tiling_index = iview->image->surface.tiling_index[level];
2728 unsigned stencil_index = iview->image->surface.stencil_tiling_index[level];
2729 unsigned macro_index = iview->image->surface.macro_tile_index;
2730 unsigned tile_mode = info->si_tile_mode_array[tiling_index];
2731 unsigned stencil_tile_mode = info->si_tile_mode_array[stencil_index];
2732 unsigned macro_mode = info->cik_macrotile_mode_array[macro_index];
2733
2734 ds->db_depth_info |=
2735 S_02803C_ARRAY_MODE(G_009910_ARRAY_MODE(tile_mode)) |
2736 S_02803C_PIPE_CONFIG(G_009910_PIPE_CONFIG(tile_mode)) |
2737 S_02803C_BANK_WIDTH(G_009990_BANK_WIDTH(macro_mode)) |
2738 S_02803C_BANK_HEIGHT(G_009990_BANK_HEIGHT(macro_mode)) |
2739 S_02803C_MACRO_TILE_ASPECT(G_009990_MACRO_TILE_ASPECT(macro_mode)) |
2740 S_02803C_NUM_BANKS(G_009990_NUM_BANKS(macro_mode));
2741 ds->db_z_info |= S_028040_TILE_SPLIT(G_009910_TILE_SPLIT(tile_mode));
2742 ds->db_stencil_info |= S_028044_TILE_SPLIT(G_009910_TILE_SPLIT(stencil_tile_mode));
2743 } else {
2744 unsigned tile_mode_index = si_tile_mode_index(iview->image, level, false);
2745 ds->db_z_info |= S_028040_TILE_MODE_INDEX(tile_mode_index);
2746 tile_mode_index = si_tile_mode_index(iview->image, level, true);
2747 ds->db_stencil_info |= S_028044_TILE_MODE_INDEX(tile_mode_index);
2748 }
2749
2750 if (iview->image->surface.htile_size && !level) {
2751 ds->db_z_info |= S_028040_TILE_SURFACE_ENABLE(1) |
2752 S_028040_ALLOW_EXPCLEAR(1);
2753
2754 if (iview->image->surface.flags & RADEON_SURF_SBUFFER) {
2755 /* Workaround: For a not yet understood reason, the
2756 * combination of MSAA, fast stencil clear and stencil
2757 * decompress messes with subsequent stencil buffer
2758 * uses. Problem was reproduced on Verde, Bonaire,
2759 * Tonga, and Carrizo.
2760 *
2761 * Disabling EXPCLEAR works around the problem.
2762 *
2763 * Check piglit's arb_texture_multisample-stencil-clear
2764 * test if you want to try changing this.
2765 */
2766 if (iview->image->samples <= 1)
2767 ds->db_stencil_info |= S_028044_ALLOW_EXPCLEAR(1);
2768 } else
2769 /* Use all of the htile_buffer for depth if there's no stencil. */
2770 ds->db_stencil_info |= S_028044_TILE_STENCIL_DISABLE(1);
2771
2772 va = device->ws->buffer_get_va(iview->bo) + iview->image->offset +
2773 iview->image->htile_offset;
2774 ds->db_htile_data_base = va >> 8;
2775 ds->db_htile_surface = S_028ABC_FULL_CACHE(1);
2776 } else {
2777 ds->db_htile_data_base = 0;
2778 ds->db_htile_surface = 0;
2779 }
2780
2781 ds->db_z_read_base = ds->db_z_write_base = z_offs >> 8;
2782 ds->db_stencil_read_base = ds->db_stencil_write_base = s_offs >> 8;
2783
2784 ds->db_depth_size = S_028058_PITCH_TILE_MAX((level_info->nblk_x / 8) - 1) |
2785 S_028058_HEIGHT_TILE_MAX((level_info->nblk_y / 8) - 1);
2786 ds->db_depth_slice = S_02805C_SLICE_TILE_MAX((level_info->nblk_x * level_info->nblk_y) / 64 - 1);
2787 }
2788
2789 VkResult radv_CreateFramebuffer(
2790 VkDevice _device,
2791 const VkFramebufferCreateInfo* pCreateInfo,
2792 const VkAllocationCallbacks* pAllocator,
2793 VkFramebuffer* pFramebuffer)
2794 {
2795 RADV_FROM_HANDLE(radv_device, device, _device);
2796 struct radv_framebuffer *framebuffer;
2797
2798 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO);
2799
2800 size_t size = sizeof(*framebuffer) +
2801 sizeof(struct radv_attachment_info) * pCreateInfo->attachmentCount;
2802 framebuffer = vk_alloc2(&device->alloc, pAllocator, size, 8,
2803 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2804 if (framebuffer == NULL)
2805 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
2806
2807 framebuffer->attachment_count = pCreateInfo->attachmentCount;
2808 framebuffer->width = pCreateInfo->width;
2809 framebuffer->height = pCreateInfo->height;
2810 framebuffer->layers = pCreateInfo->layers;
2811 for (uint32_t i = 0; i < pCreateInfo->attachmentCount; i++) {
2812 VkImageView _iview = pCreateInfo->pAttachments[i];
2813 struct radv_image_view *iview = radv_image_view_from_handle(_iview);
2814 framebuffer->attachments[i].attachment = iview;
2815 if (iview->aspect_mask & VK_IMAGE_ASPECT_COLOR_BIT) {
2816 radv_initialise_color_surface(device, &framebuffer->attachments[i].cb, iview);
2817 } else if (iview->aspect_mask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) {
2818 radv_initialise_ds_surface(device, &framebuffer->attachments[i].ds, iview);
2819 }
2820 framebuffer->width = MIN2(framebuffer->width, iview->extent.width);
2821 framebuffer->height = MIN2(framebuffer->height, iview->extent.height);
2822 framebuffer->layers = MIN2(framebuffer->layers, radv_surface_layer_count(iview));
2823 }
2824
2825 *pFramebuffer = radv_framebuffer_to_handle(framebuffer);
2826 return VK_SUCCESS;
2827 }
2828
2829 void radv_DestroyFramebuffer(
2830 VkDevice _device,
2831 VkFramebuffer _fb,
2832 const VkAllocationCallbacks* pAllocator)
2833 {
2834 RADV_FROM_HANDLE(radv_device, device, _device);
2835 RADV_FROM_HANDLE(radv_framebuffer, fb, _fb);
2836
2837 if (!fb)
2838 return;
2839 vk_free2(&device->alloc, pAllocator, fb);
2840 }
2841
2842 static unsigned radv_tex_wrap(VkSamplerAddressMode address_mode)
2843 {
2844 switch (address_mode) {
2845 case VK_SAMPLER_ADDRESS_MODE_REPEAT:
2846 return V_008F30_SQ_TEX_WRAP;
2847 case VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT:
2848 return V_008F30_SQ_TEX_MIRROR;
2849 case VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE:
2850 return V_008F30_SQ_TEX_CLAMP_LAST_TEXEL;
2851 case VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER:
2852 return V_008F30_SQ_TEX_CLAMP_BORDER;
2853 case VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE:
2854 return V_008F30_SQ_TEX_MIRROR_ONCE_LAST_TEXEL;
2855 default:
2856 unreachable("illegal tex wrap mode");
2857 break;
2858 }
2859 }
2860
2861 static unsigned
2862 radv_tex_compare(VkCompareOp op)
2863 {
2864 switch (op) {
2865 case VK_COMPARE_OP_NEVER:
2866 return V_008F30_SQ_TEX_DEPTH_COMPARE_NEVER;
2867 case VK_COMPARE_OP_LESS:
2868 return V_008F30_SQ_TEX_DEPTH_COMPARE_LESS;
2869 case VK_COMPARE_OP_EQUAL:
2870 return V_008F30_SQ_TEX_DEPTH_COMPARE_EQUAL;
2871 case VK_COMPARE_OP_LESS_OR_EQUAL:
2872 return V_008F30_SQ_TEX_DEPTH_COMPARE_LESSEQUAL;
2873 case VK_COMPARE_OP_GREATER:
2874 return V_008F30_SQ_TEX_DEPTH_COMPARE_GREATER;
2875 case VK_COMPARE_OP_NOT_EQUAL:
2876 return V_008F30_SQ_TEX_DEPTH_COMPARE_NOTEQUAL;
2877 case VK_COMPARE_OP_GREATER_OR_EQUAL:
2878 return V_008F30_SQ_TEX_DEPTH_COMPARE_GREATEREQUAL;
2879 case VK_COMPARE_OP_ALWAYS:
2880 return V_008F30_SQ_TEX_DEPTH_COMPARE_ALWAYS;
2881 default:
2882 unreachable("illegal compare mode");
2883 break;
2884 }
2885 }
2886
2887 static unsigned
2888 radv_tex_filter(VkFilter filter, unsigned max_ansio)
2889 {
2890 switch (filter) {
2891 case VK_FILTER_NEAREST:
2892 return (max_ansio > 1 ? V_008F38_SQ_TEX_XY_FILTER_ANISO_POINT :
2893 V_008F38_SQ_TEX_XY_FILTER_POINT);
2894 case VK_FILTER_LINEAR:
2895 return (max_ansio > 1 ? V_008F38_SQ_TEX_XY_FILTER_ANISO_BILINEAR :
2896 V_008F38_SQ_TEX_XY_FILTER_BILINEAR);
2897 case VK_FILTER_CUBIC_IMG:
2898 default:
2899 fprintf(stderr, "illegal texture filter");
2900 return 0;
2901 }
2902 }
2903
2904 static unsigned
2905 radv_tex_mipfilter(VkSamplerMipmapMode mode)
2906 {
2907 switch (mode) {
2908 case VK_SAMPLER_MIPMAP_MODE_NEAREST:
2909 return V_008F38_SQ_TEX_Z_FILTER_POINT;
2910 case VK_SAMPLER_MIPMAP_MODE_LINEAR:
2911 return V_008F38_SQ_TEX_Z_FILTER_LINEAR;
2912 default:
2913 return V_008F38_SQ_TEX_Z_FILTER_NONE;
2914 }
2915 }
2916
2917 static unsigned
2918 radv_tex_bordercolor(VkBorderColor bcolor)
2919 {
2920 switch (bcolor) {
2921 case VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK:
2922 case VK_BORDER_COLOR_INT_TRANSPARENT_BLACK:
2923 return V_008F3C_SQ_TEX_BORDER_COLOR_TRANS_BLACK;
2924 case VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK:
2925 case VK_BORDER_COLOR_INT_OPAQUE_BLACK:
2926 return V_008F3C_SQ_TEX_BORDER_COLOR_OPAQUE_BLACK;
2927 case VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE:
2928 case VK_BORDER_COLOR_INT_OPAQUE_WHITE:
2929 return V_008F3C_SQ_TEX_BORDER_COLOR_OPAQUE_WHITE;
2930 default:
2931 break;
2932 }
2933 return 0;
2934 }
2935
2936 static unsigned
2937 radv_tex_aniso_filter(unsigned filter)
2938 {
2939 if (filter < 2)
2940 return 0;
2941 if (filter < 4)
2942 return 1;
2943 if (filter < 8)
2944 return 2;
2945 if (filter < 16)
2946 return 3;
2947 return 4;
2948 }
2949
2950 static void
2951 radv_init_sampler(struct radv_device *device,
2952 struct radv_sampler *sampler,
2953 const VkSamplerCreateInfo *pCreateInfo)
2954 {
2955 uint32_t max_aniso = pCreateInfo->anisotropyEnable && pCreateInfo->maxAnisotropy > 1.0 ?
2956 (uint32_t) pCreateInfo->maxAnisotropy : 0;
2957 uint32_t max_aniso_ratio = radv_tex_aniso_filter(max_aniso);
2958 bool is_vi = (device->physical_device->rad_info.chip_class >= VI);
2959
2960 sampler->state[0] = (S_008F30_CLAMP_X(radv_tex_wrap(pCreateInfo->addressModeU)) |
2961 S_008F30_CLAMP_Y(radv_tex_wrap(pCreateInfo->addressModeV)) |
2962 S_008F30_CLAMP_Z(radv_tex_wrap(pCreateInfo->addressModeW)) |
2963 S_008F30_MAX_ANISO_RATIO(max_aniso_ratio) |
2964 S_008F30_DEPTH_COMPARE_FUNC(radv_tex_compare(pCreateInfo->compareOp)) |
2965 S_008F30_FORCE_UNNORMALIZED(pCreateInfo->unnormalizedCoordinates ? 1 : 0) |
2966 S_008F30_ANISO_THRESHOLD(max_aniso_ratio >> 1) |
2967 S_008F30_ANISO_BIAS(max_aniso_ratio) |
2968 S_008F30_DISABLE_CUBE_WRAP(0) |
2969 S_008F30_COMPAT_MODE(is_vi));
2970 sampler->state[1] = (S_008F34_MIN_LOD(S_FIXED(CLAMP(pCreateInfo->minLod, 0, 15), 8)) |
2971 S_008F34_MAX_LOD(S_FIXED(CLAMP(pCreateInfo->maxLod, 0, 15), 8)) |
2972 S_008F34_PERF_MIP(max_aniso_ratio ? max_aniso_ratio + 6 : 0));
2973 sampler->state[2] = (S_008F38_LOD_BIAS(S_FIXED(CLAMP(pCreateInfo->mipLodBias, -16, 16), 8)) |
2974 S_008F38_XY_MAG_FILTER(radv_tex_filter(pCreateInfo->magFilter, max_aniso)) |
2975 S_008F38_XY_MIN_FILTER(radv_tex_filter(pCreateInfo->minFilter, max_aniso)) |
2976 S_008F38_MIP_FILTER(radv_tex_mipfilter(pCreateInfo->mipmapMode)) |
2977 S_008F38_MIP_POINT_PRECLAMP(0) |
2978 S_008F38_DISABLE_LSB_CEIL(1) |
2979 S_008F38_FILTER_PREC_FIX(1) |
2980 S_008F38_ANISO_OVERRIDE(is_vi));
2981 sampler->state[3] = (S_008F3C_BORDER_COLOR_PTR(0) |
2982 S_008F3C_BORDER_COLOR_TYPE(radv_tex_bordercolor(pCreateInfo->borderColor)));
2983 }
2984
2985 VkResult radv_CreateSampler(
2986 VkDevice _device,
2987 const VkSamplerCreateInfo* pCreateInfo,
2988 const VkAllocationCallbacks* pAllocator,
2989 VkSampler* pSampler)
2990 {
2991 RADV_FROM_HANDLE(radv_device, device, _device);
2992 struct radv_sampler *sampler;
2993
2994 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO);
2995
2996 sampler = vk_alloc2(&device->alloc, pAllocator, sizeof(*sampler), 8,
2997 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2998 if (!sampler)
2999 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
3000
3001 radv_init_sampler(device, sampler, pCreateInfo);
3002 *pSampler = radv_sampler_to_handle(sampler);
3003
3004 return VK_SUCCESS;
3005 }
3006
3007 void radv_DestroySampler(
3008 VkDevice _device,
3009 VkSampler _sampler,
3010 const VkAllocationCallbacks* pAllocator)
3011 {
3012 RADV_FROM_HANDLE(radv_device, device, _device);
3013 RADV_FROM_HANDLE(radv_sampler, sampler, _sampler);
3014
3015 if (!sampler)
3016 return;
3017 vk_free2(&device->alloc, pAllocator, sampler);
3018 }
3019
3020
3021 /* vk_icd.h does not declare this function, so we declare it here to
3022 * suppress Wmissing-prototypes.
3023 */
3024 PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
3025 vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion);
3026
3027 PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
3028 vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion)
3029 {
3030 /* For the full details on loader interface versioning, see
3031 * <https://github.com/KhronosGroup/Vulkan-LoaderAndValidationLayers/blob/master/loader/LoaderAndLayerInterface.md>.
3032 * What follows is a condensed summary, to help you navigate the large and
3033 * confusing official doc.
3034 *
3035 * - Loader interface v0 is incompatible with later versions. We don't
3036 * support it.
3037 *
3038 * - In loader interface v1:
3039 * - The first ICD entrypoint called by the loader is
3040 * vk_icdGetInstanceProcAddr(). The ICD must statically expose this
3041 * entrypoint.
3042 * - The ICD must statically expose no other Vulkan symbol unless it is
3043 * linked with -Bsymbolic.
3044 * - Each dispatchable Vulkan handle created by the ICD must be
3045 * a pointer to a struct whose first member is VK_LOADER_DATA. The
3046 * ICD must initialize VK_LOADER_DATA.loadMagic to ICD_LOADER_MAGIC.
3047 * - The loader implements vkCreate{PLATFORM}SurfaceKHR() and
3048 * vkDestroySurfaceKHR(). The ICD must be capable of working with
3049 * such loader-managed surfaces.
3050 *
3051 * - Loader interface v2 differs from v1 in:
3052 * - The first ICD entrypoint called by the loader is
3053 * vk_icdNegotiateLoaderICDInterfaceVersion(). The ICD must
3054 * statically expose this entrypoint.
3055 *
3056 * - Loader interface v3 differs from v2 in:
3057 * - The ICD must implement vkCreate{PLATFORM}SurfaceKHR(),
3058 * vkDestroySurfaceKHR(), and other API which uses VKSurfaceKHR,
3059 * because the loader no longer does so.
3060 */
3061 *pSupportedVersion = MIN2(*pSupportedVersion, 3u);
3062 return VK_SUCCESS;
3063 }