radv: Fix truncation issue hexifying the cache uuid for the disk cache.
[mesa.git] / src / amd / vulkan / radv_device.c
1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 */
27
28 #include <stdbool.h>
29 #include <string.h>
30 #include <unistd.h>
31 #include <fcntl.h>
32 #include "radv_debug.h"
33 #include "radv_private.h"
34 #include "radv_shader.h"
35 #include "radv_cs.h"
36 #include "util/disk_cache.h"
37 #include "util/strtod.h"
38 #include "vk_util.h"
39 #include <xf86drm.h>
40 #include <amdgpu.h>
41 #include <amdgpu_drm.h>
42 #include "amdgpu_id.h"
43 #include "winsys/amdgpu/radv_amdgpu_winsys_public.h"
44 #include "ac_llvm_util.h"
45 #include "vk_format.h"
46 #include "sid.h"
47 #include "gfx9d.h"
48 #include "util/debug.h"
49
50 static int
51 radv_device_get_cache_uuid(enum radeon_family family, void *uuid)
52 {
53 uint32_t mesa_timestamp, llvm_timestamp;
54 uint16_t f = family;
55 memset(uuid, 0, VK_UUID_SIZE);
56 if (!disk_cache_get_function_timestamp(radv_device_get_cache_uuid, &mesa_timestamp) ||
57 !disk_cache_get_function_timestamp(LLVMInitializeAMDGPUTargetInfo, &llvm_timestamp))
58 return -1;
59
60 memcpy(uuid, &mesa_timestamp, 4);
61 memcpy((char*)uuid + 4, &llvm_timestamp, 4);
62 memcpy((char*)uuid + 8, &f, 2);
63 snprintf((char*)uuid + 10, VK_UUID_SIZE - 10, "radv");
64 return 0;
65 }
66
67 static void
68 radv_get_driver_uuid(void *uuid)
69 {
70 ac_compute_driver_uuid(uuid, VK_UUID_SIZE);
71 }
72
73 static void
74 radv_get_device_uuid(struct radeon_info *info, void *uuid)
75 {
76 ac_compute_device_uuid(info, uuid, VK_UUID_SIZE);
77 }
78
79 static const char *
80 get_chip_name(enum radeon_family family)
81 {
82 switch (family) {
83 case CHIP_TAHITI: return "AMD RADV TAHITI";
84 case CHIP_PITCAIRN: return "AMD RADV PITCAIRN";
85 case CHIP_VERDE: return "AMD RADV CAPE VERDE";
86 case CHIP_OLAND: return "AMD RADV OLAND";
87 case CHIP_HAINAN: return "AMD RADV HAINAN";
88 case CHIP_BONAIRE: return "AMD RADV BONAIRE";
89 case CHIP_KAVERI: return "AMD RADV KAVERI";
90 case CHIP_KABINI: return "AMD RADV KABINI";
91 case CHIP_HAWAII: return "AMD RADV HAWAII";
92 case CHIP_MULLINS: return "AMD RADV MULLINS";
93 case CHIP_TONGA: return "AMD RADV TONGA";
94 case CHIP_ICELAND: return "AMD RADV ICELAND";
95 case CHIP_CARRIZO: return "AMD RADV CARRIZO";
96 case CHIP_FIJI: return "AMD RADV FIJI";
97 case CHIP_POLARIS10: return "AMD RADV POLARIS10";
98 case CHIP_POLARIS11: return "AMD RADV POLARIS11";
99 case CHIP_POLARIS12: return "AMD RADV POLARIS12";
100 case CHIP_STONEY: return "AMD RADV STONEY";
101 case CHIP_VEGA10: return "AMD RADV VEGA";
102 case CHIP_RAVEN: return "AMD RADV RAVEN";
103 default: return "AMD RADV unknown";
104 }
105 }
106
107 static VkResult
108 radv_physical_device_init(struct radv_physical_device *device,
109 struct radv_instance *instance,
110 drmDevicePtr drm_device)
111 {
112 const char *path = drm_device->nodes[DRM_NODE_RENDER];
113 VkResult result;
114 drmVersionPtr version;
115 int fd;
116
117 fd = open(path, O_RDWR | O_CLOEXEC);
118 if (fd < 0)
119 return VK_ERROR_INCOMPATIBLE_DRIVER;
120
121 version = drmGetVersion(fd);
122 if (!version) {
123 close(fd);
124 return vk_errorf(VK_ERROR_INCOMPATIBLE_DRIVER,
125 "failed to get version %s: %m", path);
126 }
127
128 if (strcmp(version->name, "amdgpu")) {
129 drmFreeVersion(version);
130 close(fd);
131 return VK_ERROR_INCOMPATIBLE_DRIVER;
132 }
133 drmFreeVersion(version);
134
135 device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
136 device->instance = instance;
137 assert(strlen(path) < ARRAY_SIZE(device->path));
138 strncpy(device->path, path, ARRAY_SIZE(device->path));
139
140 device->ws = radv_amdgpu_winsys_create(fd, instance->debug_flags,
141 instance->perftest_flags);
142 if (!device->ws) {
143 result = VK_ERROR_INCOMPATIBLE_DRIVER;
144 goto fail;
145 }
146
147 device->local_fd = fd;
148 device->ws->query_info(device->ws, &device->rad_info);
149 result = radv_init_wsi(device);
150 if (result != VK_SUCCESS) {
151 device->ws->destroy(device->ws);
152 goto fail;
153 }
154
155 device->name = get_chip_name(device->rad_info.family);
156
157 if (radv_device_get_cache_uuid(device->rad_info.family, device->cache_uuid)) {
158 radv_finish_wsi(device);
159 device->ws->destroy(device->ws);
160 result = vk_errorf(VK_ERROR_INITIALIZATION_FAILED,
161 "cannot generate UUID");
162 goto fail;
163 }
164
165 /* These flags affect shader compilation. */
166 uint64_t shader_env_flags =
167 (device->instance->perftest_flags & RADV_PERFTEST_SISCHED ? 0x1 : 0) |
168 (device->instance->debug_flags & RADV_DEBUG_UNSAFE_MATH ? 0x2 : 0);
169
170 /* The gpu id is already embeded in the uuid so we just pass "radv"
171 * when creating the cache.
172 */
173 char buf[VK_UUID_SIZE * 2 + 1];
174 disk_cache_format_hex_id(buf, device->cache_uuid, VK_UUID_SIZE * 2);
175 device->disk_cache = disk_cache_create(device->name, buf, shader_env_flags);
176
177 fprintf(stderr, "WARNING: radv is not a conformant vulkan implementation, testing use only.\n");
178
179 radv_get_driver_uuid(&device->device_uuid);
180 radv_get_device_uuid(&device->rad_info, &device->device_uuid);
181
182 if (device->rad_info.family == CHIP_STONEY ||
183 device->rad_info.chip_class >= GFX9) {
184 device->has_rbplus = true;
185 device->rbplus_allowed = device->rad_info.family == CHIP_STONEY;
186 }
187
188 /* The mere presense of CLEAR_STATE in the IB causes random GPU hangs
189 * on SI.
190 */
191 device->has_clear_state = device->rad_info.chip_class >= CIK;
192
193 return VK_SUCCESS;
194
195 fail:
196 close(fd);
197 return result;
198 }
199
200 static void
201 radv_physical_device_finish(struct radv_physical_device *device)
202 {
203 radv_finish_wsi(device);
204 device->ws->destroy(device->ws);
205 disk_cache_destroy(device->disk_cache);
206 close(device->local_fd);
207 }
208
209 static void *
210 default_alloc_func(void *pUserData, size_t size, size_t align,
211 VkSystemAllocationScope allocationScope)
212 {
213 return malloc(size);
214 }
215
216 static void *
217 default_realloc_func(void *pUserData, void *pOriginal, size_t size,
218 size_t align, VkSystemAllocationScope allocationScope)
219 {
220 return realloc(pOriginal, size);
221 }
222
223 static void
224 default_free_func(void *pUserData, void *pMemory)
225 {
226 free(pMemory);
227 }
228
229 static const VkAllocationCallbacks default_alloc = {
230 .pUserData = NULL,
231 .pfnAllocation = default_alloc_func,
232 .pfnReallocation = default_realloc_func,
233 .pfnFree = default_free_func,
234 };
235
236 static const struct debug_control radv_debug_options[] = {
237 {"nofastclears", RADV_DEBUG_NO_FAST_CLEARS},
238 {"nodcc", RADV_DEBUG_NO_DCC},
239 {"shaders", RADV_DEBUG_DUMP_SHADERS},
240 {"nocache", RADV_DEBUG_NO_CACHE},
241 {"shaderstats", RADV_DEBUG_DUMP_SHADER_STATS},
242 {"nohiz", RADV_DEBUG_NO_HIZ},
243 {"nocompute", RADV_DEBUG_NO_COMPUTE_QUEUE},
244 {"unsafemath", RADV_DEBUG_UNSAFE_MATH},
245 {"allbos", RADV_DEBUG_ALL_BOS},
246 {"noibs", RADV_DEBUG_NO_IBS},
247 {"spirv", RADV_DEBUG_DUMP_SPIRV},
248 {"vmfaults", RADV_DEBUG_VM_FAULTS},
249 {"zerovram", RADV_DEBUG_ZERO_VRAM},
250 {"syncshaders", RADV_DEBUG_SYNC_SHADERS},
251 {NULL, 0}
252 };
253
254 const char *
255 radv_get_debug_option_name(int id)
256 {
257 assert(id < ARRAY_SIZE(radv_debug_options) - 1);
258 return radv_debug_options[id].string;
259 }
260
261 static const struct debug_control radv_perftest_options[] = {
262 {"nobatchchain", RADV_PERFTEST_NO_BATCHCHAIN},
263 {"sisched", RADV_PERFTEST_SISCHED},
264 {NULL, 0}
265 };
266
267 const char *
268 radv_get_perftest_option_name(int id)
269 {
270 assert(id < ARRAY_SIZE(radv_debug_options) - 1);
271 return radv_perftest_options[id].string;
272 }
273
274 VkResult radv_CreateInstance(
275 const VkInstanceCreateInfo* pCreateInfo,
276 const VkAllocationCallbacks* pAllocator,
277 VkInstance* pInstance)
278 {
279 struct radv_instance *instance;
280
281 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO);
282
283 uint32_t client_version;
284 if (pCreateInfo->pApplicationInfo &&
285 pCreateInfo->pApplicationInfo->apiVersion != 0) {
286 client_version = pCreateInfo->pApplicationInfo->apiVersion;
287 } else {
288 client_version = VK_MAKE_VERSION(1, 0, 0);
289 }
290
291 if (VK_MAKE_VERSION(1, 0, 0) > client_version ||
292 client_version > VK_MAKE_VERSION(1, 0, 0xfff)) {
293 return vk_errorf(VK_ERROR_INCOMPATIBLE_DRIVER,
294 "Client requested version %d.%d.%d",
295 VK_VERSION_MAJOR(client_version),
296 VK_VERSION_MINOR(client_version),
297 VK_VERSION_PATCH(client_version));
298 }
299
300 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
301 const char *ext_name = pCreateInfo->ppEnabledExtensionNames[i];
302 if (!radv_instance_extension_supported(ext_name))
303 return vk_error(VK_ERROR_EXTENSION_NOT_PRESENT);
304 }
305
306 instance = vk_alloc2(&default_alloc, pAllocator, sizeof(*instance), 8,
307 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
308 if (!instance)
309 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
310
311 memset(instance, 0, sizeof(*instance));
312
313 instance->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
314
315 if (pAllocator)
316 instance->alloc = *pAllocator;
317 else
318 instance->alloc = default_alloc;
319
320 instance->apiVersion = client_version;
321 instance->physicalDeviceCount = -1;
322
323 _mesa_locale_init();
324
325 VG(VALGRIND_CREATE_MEMPOOL(instance, 0, false));
326
327 instance->debug_flags = parse_debug_string(getenv("RADV_DEBUG"),
328 radv_debug_options);
329
330 instance->perftest_flags = parse_debug_string(getenv("RADV_PERFTEST"),
331 radv_perftest_options);
332
333 *pInstance = radv_instance_to_handle(instance);
334
335 return VK_SUCCESS;
336 }
337
338 void radv_DestroyInstance(
339 VkInstance _instance,
340 const VkAllocationCallbacks* pAllocator)
341 {
342 RADV_FROM_HANDLE(radv_instance, instance, _instance);
343
344 if (!instance)
345 return;
346
347 for (int i = 0; i < instance->physicalDeviceCount; ++i) {
348 radv_physical_device_finish(instance->physicalDevices + i);
349 }
350
351 VG(VALGRIND_DESTROY_MEMPOOL(instance));
352
353 _mesa_locale_fini();
354
355 vk_free(&instance->alloc, instance);
356 }
357
358 static VkResult
359 radv_enumerate_devices(struct radv_instance *instance)
360 {
361 /* TODO: Check for more devices ? */
362 drmDevicePtr devices[8];
363 VkResult result = VK_ERROR_INCOMPATIBLE_DRIVER;
364 int max_devices;
365
366 instance->physicalDeviceCount = 0;
367
368 max_devices = drmGetDevices2(0, devices, ARRAY_SIZE(devices));
369 if (max_devices < 1)
370 return VK_ERROR_INCOMPATIBLE_DRIVER;
371
372 for (unsigned i = 0; i < (unsigned)max_devices; i++) {
373 if (devices[i]->available_nodes & 1 << DRM_NODE_RENDER &&
374 devices[i]->bustype == DRM_BUS_PCI &&
375 devices[i]->deviceinfo.pci->vendor_id == ATI_VENDOR_ID) {
376
377 result = radv_physical_device_init(instance->physicalDevices +
378 instance->physicalDeviceCount,
379 instance,
380 devices[i]);
381 if (result == VK_SUCCESS)
382 ++instance->physicalDeviceCount;
383 else if (result != VK_ERROR_INCOMPATIBLE_DRIVER)
384 break;
385 }
386 }
387 drmFreeDevices(devices, max_devices);
388
389 return result;
390 }
391
392 VkResult radv_EnumeratePhysicalDevices(
393 VkInstance _instance,
394 uint32_t* pPhysicalDeviceCount,
395 VkPhysicalDevice* pPhysicalDevices)
396 {
397 RADV_FROM_HANDLE(radv_instance, instance, _instance);
398 VkResult result;
399
400 if (instance->physicalDeviceCount < 0) {
401 result = radv_enumerate_devices(instance);
402 if (result != VK_SUCCESS &&
403 result != VK_ERROR_INCOMPATIBLE_DRIVER)
404 return result;
405 }
406
407 if (!pPhysicalDevices) {
408 *pPhysicalDeviceCount = instance->physicalDeviceCount;
409 } else {
410 *pPhysicalDeviceCount = MIN2(*pPhysicalDeviceCount, instance->physicalDeviceCount);
411 for (unsigned i = 0; i < *pPhysicalDeviceCount; ++i)
412 pPhysicalDevices[i] = radv_physical_device_to_handle(instance->physicalDevices + i);
413 }
414
415 return *pPhysicalDeviceCount < instance->physicalDeviceCount ? VK_INCOMPLETE
416 : VK_SUCCESS;
417 }
418
419 void radv_GetPhysicalDeviceFeatures(
420 VkPhysicalDevice physicalDevice,
421 VkPhysicalDeviceFeatures* pFeatures)
422 {
423 memset(pFeatures, 0, sizeof(*pFeatures));
424
425 *pFeatures = (VkPhysicalDeviceFeatures) {
426 .robustBufferAccess = true,
427 .fullDrawIndexUint32 = true,
428 .imageCubeArray = true,
429 .independentBlend = true,
430 .geometryShader = true,
431 .tessellationShader = true,
432 .sampleRateShading = true,
433 .dualSrcBlend = true,
434 .logicOp = true,
435 .multiDrawIndirect = true,
436 .drawIndirectFirstInstance = true,
437 .depthClamp = true,
438 .depthBiasClamp = true,
439 .fillModeNonSolid = true,
440 .depthBounds = true,
441 .wideLines = true,
442 .largePoints = true,
443 .alphaToOne = true,
444 .multiViewport = true,
445 .samplerAnisotropy = true,
446 .textureCompressionETC2 = false,
447 .textureCompressionASTC_LDR = false,
448 .textureCompressionBC = true,
449 .occlusionQueryPrecise = true,
450 .pipelineStatisticsQuery = true,
451 .vertexPipelineStoresAndAtomics = true,
452 .fragmentStoresAndAtomics = true,
453 .shaderTessellationAndGeometryPointSize = true,
454 .shaderImageGatherExtended = true,
455 .shaderStorageImageExtendedFormats = true,
456 .shaderStorageImageMultisample = false,
457 .shaderUniformBufferArrayDynamicIndexing = true,
458 .shaderSampledImageArrayDynamicIndexing = true,
459 .shaderStorageBufferArrayDynamicIndexing = true,
460 .shaderStorageImageArrayDynamicIndexing = true,
461 .shaderStorageImageReadWithoutFormat = true,
462 .shaderStorageImageWriteWithoutFormat = true,
463 .shaderClipDistance = true,
464 .shaderCullDistance = true,
465 .shaderFloat64 = true,
466 .shaderInt64 = true,
467 .shaderInt16 = false,
468 .sparseBinding = true,
469 .variableMultisampleRate = true,
470 .inheritedQueries = true,
471 };
472 }
473
474 void radv_GetPhysicalDeviceFeatures2KHR(
475 VkPhysicalDevice physicalDevice,
476 VkPhysicalDeviceFeatures2KHR *pFeatures)
477 {
478 vk_foreach_struct(ext, pFeatures->pNext) {
479 switch (ext->sType) {
480 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTER_FEATURES_KHR: {
481 VkPhysicalDeviceVariablePointerFeaturesKHR *features = (void *)ext;
482 features->variablePointersStorageBuffer = true;
483 features->variablePointers = false;
484 break;
485 }
486 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES_KHX: {
487 VkPhysicalDeviceMultiviewFeaturesKHX *features = (VkPhysicalDeviceMultiviewFeaturesKHX*)ext;
488 features->multiview = true;
489 features->multiviewGeometryShader = true;
490 features->multiviewTessellationShader = true;
491 break;
492 }
493 default:
494 break;
495 }
496 }
497 return radv_GetPhysicalDeviceFeatures(physicalDevice, &pFeatures->features);
498 }
499
500 void radv_GetPhysicalDeviceProperties(
501 VkPhysicalDevice physicalDevice,
502 VkPhysicalDeviceProperties* pProperties)
503 {
504 RADV_FROM_HANDLE(radv_physical_device, pdevice, physicalDevice);
505 VkSampleCountFlags sample_counts = 0xf;
506
507 /* make sure that the entire descriptor set is addressable with a signed
508 * 32-bit int. So the sum of all limits scaled by descriptor size has to
509 * be at most 2 GiB. the combined image & samples object count as one of
510 * both. This limit is for the pipeline layout, not for the set layout, but
511 * there is no set limit, so we just set a pipeline limit. I don't think
512 * any app is going to hit this soon. */
513 size_t max_descriptor_set_size = ((1ull << 31) - 16 * MAX_DYNAMIC_BUFFERS) /
514 (32 /* uniform buffer, 32 due to potential space wasted on alignement */ +
515 32 /* storage buffer, 32 due to potential space wasted on alignement */ +
516 32 /* sampler, largest when combined with image */ +
517 64 /* sampled image */ +
518 64 /* storage image */);
519
520 VkPhysicalDeviceLimits limits = {
521 .maxImageDimension1D = (1 << 14),
522 .maxImageDimension2D = (1 << 14),
523 .maxImageDimension3D = (1 << 11),
524 .maxImageDimensionCube = (1 << 14),
525 .maxImageArrayLayers = (1 << 11),
526 .maxTexelBufferElements = 128 * 1024 * 1024,
527 .maxUniformBufferRange = UINT32_MAX,
528 .maxStorageBufferRange = UINT32_MAX,
529 .maxPushConstantsSize = MAX_PUSH_CONSTANTS_SIZE,
530 .maxMemoryAllocationCount = UINT32_MAX,
531 .maxSamplerAllocationCount = 64 * 1024,
532 .bufferImageGranularity = 64, /* A cache line */
533 .sparseAddressSpaceSize = 0xffffffffu, /* buffer max size */
534 .maxBoundDescriptorSets = MAX_SETS,
535 .maxPerStageDescriptorSamplers = max_descriptor_set_size,
536 .maxPerStageDescriptorUniformBuffers = max_descriptor_set_size,
537 .maxPerStageDescriptorStorageBuffers = max_descriptor_set_size,
538 .maxPerStageDescriptorSampledImages = max_descriptor_set_size,
539 .maxPerStageDescriptorStorageImages = max_descriptor_set_size,
540 .maxPerStageDescriptorInputAttachments = max_descriptor_set_size,
541 .maxPerStageResources = max_descriptor_set_size,
542 .maxDescriptorSetSamplers = max_descriptor_set_size,
543 .maxDescriptorSetUniformBuffers = max_descriptor_set_size,
544 .maxDescriptorSetUniformBuffersDynamic = MAX_DYNAMIC_BUFFERS / 2,
545 .maxDescriptorSetStorageBuffers = max_descriptor_set_size,
546 .maxDescriptorSetStorageBuffersDynamic = MAX_DYNAMIC_BUFFERS / 2,
547 .maxDescriptorSetSampledImages = max_descriptor_set_size,
548 .maxDescriptorSetStorageImages = max_descriptor_set_size,
549 .maxDescriptorSetInputAttachments = max_descriptor_set_size,
550 .maxVertexInputAttributes = 32,
551 .maxVertexInputBindings = 32,
552 .maxVertexInputAttributeOffset = 2047,
553 .maxVertexInputBindingStride = 2048,
554 .maxVertexOutputComponents = 128,
555 .maxTessellationGenerationLevel = 64,
556 .maxTessellationPatchSize = 32,
557 .maxTessellationControlPerVertexInputComponents = 128,
558 .maxTessellationControlPerVertexOutputComponents = 128,
559 .maxTessellationControlPerPatchOutputComponents = 120,
560 .maxTessellationControlTotalOutputComponents = 4096,
561 .maxTessellationEvaluationInputComponents = 128,
562 .maxTessellationEvaluationOutputComponents = 128,
563 .maxGeometryShaderInvocations = 127,
564 .maxGeometryInputComponents = 64,
565 .maxGeometryOutputComponents = 128,
566 .maxGeometryOutputVertices = 256,
567 .maxGeometryTotalOutputComponents = 1024,
568 .maxFragmentInputComponents = 128,
569 .maxFragmentOutputAttachments = 8,
570 .maxFragmentDualSrcAttachments = 1,
571 .maxFragmentCombinedOutputResources = 8,
572 .maxComputeSharedMemorySize = 32768,
573 .maxComputeWorkGroupCount = { 65535, 65535, 65535 },
574 .maxComputeWorkGroupInvocations = 2048,
575 .maxComputeWorkGroupSize = {
576 2048,
577 2048,
578 2048
579 },
580 .subPixelPrecisionBits = 4 /* FIXME */,
581 .subTexelPrecisionBits = 4 /* FIXME */,
582 .mipmapPrecisionBits = 4 /* FIXME */,
583 .maxDrawIndexedIndexValue = UINT32_MAX,
584 .maxDrawIndirectCount = UINT32_MAX,
585 .maxSamplerLodBias = 16,
586 .maxSamplerAnisotropy = 16,
587 .maxViewports = MAX_VIEWPORTS,
588 .maxViewportDimensions = { (1 << 14), (1 << 14) },
589 .viewportBoundsRange = { INT16_MIN, INT16_MAX },
590 .viewportSubPixelBits = 13, /* We take a float? */
591 .minMemoryMapAlignment = 4096, /* A page */
592 .minTexelBufferOffsetAlignment = 1,
593 .minUniformBufferOffsetAlignment = 4,
594 .minStorageBufferOffsetAlignment = 4,
595 .minTexelOffset = -32,
596 .maxTexelOffset = 31,
597 .minTexelGatherOffset = -32,
598 .maxTexelGatherOffset = 31,
599 .minInterpolationOffset = -2,
600 .maxInterpolationOffset = 2,
601 .subPixelInterpolationOffsetBits = 8,
602 .maxFramebufferWidth = (1 << 14),
603 .maxFramebufferHeight = (1 << 14),
604 .maxFramebufferLayers = (1 << 10),
605 .framebufferColorSampleCounts = sample_counts,
606 .framebufferDepthSampleCounts = sample_counts,
607 .framebufferStencilSampleCounts = sample_counts,
608 .framebufferNoAttachmentsSampleCounts = sample_counts,
609 .maxColorAttachments = MAX_RTS,
610 .sampledImageColorSampleCounts = sample_counts,
611 .sampledImageIntegerSampleCounts = VK_SAMPLE_COUNT_1_BIT,
612 .sampledImageDepthSampleCounts = sample_counts,
613 .sampledImageStencilSampleCounts = sample_counts,
614 .storageImageSampleCounts = VK_SAMPLE_COUNT_1_BIT,
615 .maxSampleMaskWords = 1,
616 .timestampComputeAndGraphics = true,
617 .timestampPeriod = 1000000.0 / pdevice->rad_info.clock_crystal_freq,
618 .maxClipDistances = 8,
619 .maxCullDistances = 8,
620 .maxCombinedClipAndCullDistances = 8,
621 .discreteQueuePriorities = 1,
622 .pointSizeRange = { 0.125, 255.875 },
623 .lineWidthRange = { 0.0, 7.9921875 },
624 .pointSizeGranularity = (1.0 / 8.0),
625 .lineWidthGranularity = (1.0 / 128.0),
626 .strictLines = false, /* FINISHME */
627 .standardSampleLocations = true,
628 .optimalBufferCopyOffsetAlignment = 128,
629 .optimalBufferCopyRowPitchAlignment = 128,
630 .nonCoherentAtomSize = 64,
631 };
632
633 *pProperties = (VkPhysicalDeviceProperties) {
634 .apiVersion = radv_physical_device_api_version(pdevice),
635 .driverVersion = vk_get_driver_version(),
636 .vendorID = ATI_VENDOR_ID,
637 .deviceID = pdevice->rad_info.pci_id,
638 .deviceType = pdevice->rad_info.has_dedicated_vram ? VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU : VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU,
639 .limits = limits,
640 .sparseProperties = {0},
641 };
642
643 strcpy(pProperties->deviceName, pdevice->name);
644 memcpy(pProperties->pipelineCacheUUID, pdevice->cache_uuid, VK_UUID_SIZE);
645 }
646
647 void radv_GetPhysicalDeviceProperties2KHR(
648 VkPhysicalDevice physicalDevice,
649 VkPhysicalDeviceProperties2KHR *pProperties)
650 {
651 RADV_FROM_HANDLE(radv_physical_device, pdevice, physicalDevice);
652 radv_GetPhysicalDeviceProperties(physicalDevice, &pProperties->properties);
653
654 vk_foreach_struct(ext, pProperties->pNext) {
655 switch (ext->sType) {
656 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR: {
657 VkPhysicalDevicePushDescriptorPropertiesKHR *properties =
658 (VkPhysicalDevicePushDescriptorPropertiesKHR *) ext;
659 properties->maxPushDescriptors = MAX_PUSH_DESCRIPTORS;
660 break;
661 }
662 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES_KHR: {
663 VkPhysicalDeviceIDPropertiesKHR *properties = (VkPhysicalDeviceIDPropertiesKHR*)ext;
664 memcpy(properties->driverUUID, pdevice->driver_uuid, VK_UUID_SIZE);
665 memcpy(properties->deviceUUID, pdevice->device_uuid, VK_UUID_SIZE);
666 properties->deviceLUIDValid = false;
667 break;
668 }
669 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES_KHX: {
670 VkPhysicalDeviceMultiviewPropertiesKHX *properties = (VkPhysicalDeviceMultiviewPropertiesKHX*)ext;
671 properties->maxMultiviewViewCount = MAX_VIEWS;
672 properties->maxMultiviewInstanceIndex = INT_MAX;
673 break;
674 }
675 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES_KHR: {
676 VkPhysicalDevicePointClippingPropertiesKHR *properties =
677 (VkPhysicalDevicePointClippingPropertiesKHR*)ext;
678 properties->pointClippingBehavior = VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES_KHR;
679 break;
680 }
681 default:
682 break;
683 }
684 }
685 }
686
687 static void radv_get_physical_device_queue_family_properties(
688 struct radv_physical_device* pdevice,
689 uint32_t* pCount,
690 VkQueueFamilyProperties** pQueueFamilyProperties)
691 {
692 int num_queue_families = 1;
693 int idx;
694 if (pdevice->rad_info.num_compute_rings > 0 &&
695 pdevice->rad_info.chip_class >= CIK &&
696 !(pdevice->instance->debug_flags & RADV_DEBUG_NO_COMPUTE_QUEUE))
697 num_queue_families++;
698
699 if (pQueueFamilyProperties == NULL) {
700 *pCount = num_queue_families;
701 return;
702 }
703
704 if (!*pCount)
705 return;
706
707 idx = 0;
708 if (*pCount >= 1) {
709 *pQueueFamilyProperties[idx] = (VkQueueFamilyProperties) {
710 .queueFlags = VK_QUEUE_GRAPHICS_BIT |
711 VK_QUEUE_COMPUTE_BIT |
712 VK_QUEUE_TRANSFER_BIT |
713 VK_QUEUE_SPARSE_BINDING_BIT,
714 .queueCount = 1,
715 .timestampValidBits = 64,
716 .minImageTransferGranularity = (VkExtent3D) { 1, 1, 1 },
717 };
718 idx++;
719 }
720
721 if (pdevice->rad_info.num_compute_rings > 0 &&
722 pdevice->rad_info.chip_class >= CIK &&
723 !(pdevice->instance->debug_flags & RADV_DEBUG_NO_COMPUTE_QUEUE)) {
724 if (*pCount > idx) {
725 *pQueueFamilyProperties[idx] = (VkQueueFamilyProperties) {
726 .queueFlags = VK_QUEUE_COMPUTE_BIT |
727 VK_QUEUE_TRANSFER_BIT |
728 VK_QUEUE_SPARSE_BINDING_BIT,
729 .queueCount = pdevice->rad_info.num_compute_rings,
730 .timestampValidBits = 64,
731 .minImageTransferGranularity = (VkExtent3D) { 1, 1, 1 },
732 };
733 idx++;
734 }
735 }
736 *pCount = idx;
737 }
738
739 void radv_GetPhysicalDeviceQueueFamilyProperties(
740 VkPhysicalDevice physicalDevice,
741 uint32_t* pCount,
742 VkQueueFamilyProperties* pQueueFamilyProperties)
743 {
744 RADV_FROM_HANDLE(radv_physical_device, pdevice, physicalDevice);
745 if (!pQueueFamilyProperties) {
746 return radv_get_physical_device_queue_family_properties(pdevice, pCount, NULL);
747 return;
748 }
749 VkQueueFamilyProperties *properties[] = {
750 pQueueFamilyProperties + 0,
751 pQueueFamilyProperties + 1,
752 pQueueFamilyProperties + 2,
753 };
754 radv_get_physical_device_queue_family_properties(pdevice, pCount, properties);
755 assert(*pCount <= 3);
756 }
757
758 void radv_GetPhysicalDeviceQueueFamilyProperties2KHR(
759 VkPhysicalDevice physicalDevice,
760 uint32_t* pCount,
761 VkQueueFamilyProperties2KHR *pQueueFamilyProperties)
762 {
763 RADV_FROM_HANDLE(radv_physical_device, pdevice, physicalDevice);
764 if (!pQueueFamilyProperties) {
765 return radv_get_physical_device_queue_family_properties(pdevice, pCount, NULL);
766 return;
767 }
768 VkQueueFamilyProperties *properties[] = {
769 &pQueueFamilyProperties[0].queueFamilyProperties,
770 &pQueueFamilyProperties[1].queueFamilyProperties,
771 &pQueueFamilyProperties[2].queueFamilyProperties,
772 };
773 radv_get_physical_device_queue_family_properties(pdevice, pCount, properties);
774 assert(*pCount <= 3);
775 }
776
777 void radv_GetPhysicalDeviceMemoryProperties(
778 VkPhysicalDevice physicalDevice,
779 VkPhysicalDeviceMemoryProperties *pMemoryProperties)
780 {
781 RADV_FROM_HANDLE(radv_physical_device, physical_device, physicalDevice);
782
783 STATIC_ASSERT(RADV_MEM_TYPE_COUNT <= VK_MAX_MEMORY_TYPES);
784
785 pMemoryProperties->memoryTypeCount = RADV_MEM_TYPE_COUNT;
786 pMemoryProperties->memoryTypes[RADV_MEM_TYPE_VRAM] = (VkMemoryType) {
787 .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT,
788 .heapIndex = RADV_MEM_HEAP_VRAM,
789 };
790 pMemoryProperties->memoryTypes[RADV_MEM_TYPE_GTT_WRITE_COMBINE] = (VkMemoryType) {
791 .propertyFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
792 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
793 .heapIndex = RADV_MEM_HEAP_GTT,
794 };
795 pMemoryProperties->memoryTypes[RADV_MEM_TYPE_VRAM_CPU_ACCESS] = (VkMemoryType) {
796 .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
797 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
798 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
799 .heapIndex = RADV_MEM_HEAP_VRAM_CPU_ACCESS,
800 };
801 pMemoryProperties->memoryTypes[RADV_MEM_TYPE_GTT_CACHED] = (VkMemoryType) {
802 .propertyFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
803 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT |
804 VK_MEMORY_PROPERTY_HOST_CACHED_BIT,
805 .heapIndex = RADV_MEM_HEAP_GTT,
806 };
807
808 STATIC_ASSERT(RADV_MEM_HEAP_COUNT <= VK_MAX_MEMORY_HEAPS);
809 uint64_t visible_vram_size = MIN2(physical_device->rad_info.vram_size,
810 physical_device->rad_info.vram_vis_size);
811
812 pMemoryProperties->memoryHeapCount = RADV_MEM_HEAP_COUNT;
813 pMemoryProperties->memoryHeaps[RADV_MEM_HEAP_VRAM] = (VkMemoryHeap) {
814 .size = physical_device->rad_info.vram_size -
815 visible_vram_size,
816 .flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT,
817 };
818 pMemoryProperties->memoryHeaps[RADV_MEM_HEAP_VRAM_CPU_ACCESS] = (VkMemoryHeap) {
819 .size = visible_vram_size,
820 .flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT,
821 };
822 pMemoryProperties->memoryHeaps[RADV_MEM_HEAP_GTT] = (VkMemoryHeap) {
823 .size = physical_device->rad_info.gart_size,
824 .flags = 0,
825 };
826 }
827
828 void radv_GetPhysicalDeviceMemoryProperties2KHR(
829 VkPhysicalDevice physicalDevice,
830 VkPhysicalDeviceMemoryProperties2KHR *pMemoryProperties)
831 {
832 return radv_GetPhysicalDeviceMemoryProperties(physicalDevice,
833 &pMemoryProperties->memoryProperties);
834 }
835
836 static enum radeon_ctx_priority
837 radv_get_queue_global_priority(const VkDeviceQueueGlobalPriorityCreateInfoEXT *pObj)
838 {
839 /* Default to MEDIUM when a specific global priority isn't requested */
840 if (!pObj)
841 return RADEON_CTX_PRIORITY_MEDIUM;
842
843 switch(pObj->globalPriority) {
844 case VK_QUEUE_GLOBAL_PRIORITY_REALTIME:
845 return RADEON_CTX_PRIORITY_REALTIME;
846 case VK_QUEUE_GLOBAL_PRIORITY_HIGH:
847 return RADEON_CTX_PRIORITY_HIGH;
848 case VK_QUEUE_GLOBAL_PRIORITY_MEDIUM:
849 return RADEON_CTX_PRIORITY_MEDIUM;
850 case VK_QUEUE_GLOBAL_PRIORITY_LOW:
851 return RADEON_CTX_PRIORITY_LOW;
852 default:
853 unreachable("Illegal global priority value");
854 return RADEON_CTX_PRIORITY_INVALID;
855 }
856 }
857
858 static int
859 radv_queue_init(struct radv_device *device, struct radv_queue *queue,
860 int queue_family_index, int idx,
861 const VkDeviceQueueGlobalPriorityCreateInfoEXT *global_priority)
862 {
863 queue->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
864 queue->device = device;
865 queue->queue_family_index = queue_family_index;
866 queue->queue_idx = idx;
867 queue->priority = radv_get_queue_global_priority(global_priority);
868
869 queue->hw_ctx = device->ws->ctx_create(device->ws, queue->priority);
870 if (!queue->hw_ctx)
871 return VK_ERROR_OUT_OF_HOST_MEMORY;
872
873 return VK_SUCCESS;
874 }
875
876 static void
877 radv_queue_finish(struct radv_queue *queue)
878 {
879 if (queue->hw_ctx)
880 queue->device->ws->ctx_destroy(queue->hw_ctx);
881
882 if (queue->initial_full_flush_preamble_cs)
883 queue->device->ws->cs_destroy(queue->initial_full_flush_preamble_cs);
884 if (queue->initial_preamble_cs)
885 queue->device->ws->cs_destroy(queue->initial_preamble_cs);
886 if (queue->continue_preamble_cs)
887 queue->device->ws->cs_destroy(queue->continue_preamble_cs);
888 if (queue->descriptor_bo)
889 queue->device->ws->buffer_destroy(queue->descriptor_bo);
890 if (queue->scratch_bo)
891 queue->device->ws->buffer_destroy(queue->scratch_bo);
892 if (queue->esgs_ring_bo)
893 queue->device->ws->buffer_destroy(queue->esgs_ring_bo);
894 if (queue->gsvs_ring_bo)
895 queue->device->ws->buffer_destroy(queue->gsvs_ring_bo);
896 if (queue->tess_factor_ring_bo)
897 queue->device->ws->buffer_destroy(queue->tess_factor_ring_bo);
898 if (queue->tess_offchip_ring_bo)
899 queue->device->ws->buffer_destroy(queue->tess_offchip_ring_bo);
900 if (queue->compute_scratch_bo)
901 queue->device->ws->buffer_destroy(queue->compute_scratch_bo);
902 }
903
904 static void
905 radv_device_init_gs_info(struct radv_device *device)
906 {
907 switch (device->physical_device->rad_info.family) {
908 case CHIP_OLAND:
909 case CHIP_HAINAN:
910 case CHIP_KAVERI:
911 case CHIP_KABINI:
912 case CHIP_MULLINS:
913 case CHIP_ICELAND:
914 case CHIP_CARRIZO:
915 case CHIP_STONEY:
916 device->gs_table_depth = 16;
917 return;
918 case CHIP_TAHITI:
919 case CHIP_PITCAIRN:
920 case CHIP_VERDE:
921 case CHIP_BONAIRE:
922 case CHIP_HAWAII:
923 case CHIP_TONGA:
924 case CHIP_FIJI:
925 case CHIP_POLARIS10:
926 case CHIP_POLARIS11:
927 case CHIP_POLARIS12:
928 case CHIP_VEGA10:
929 case CHIP_RAVEN:
930 device->gs_table_depth = 32;
931 return;
932 default:
933 unreachable("unknown GPU");
934 }
935 }
936
937 VkResult radv_CreateDevice(
938 VkPhysicalDevice physicalDevice,
939 const VkDeviceCreateInfo* pCreateInfo,
940 const VkAllocationCallbacks* pAllocator,
941 VkDevice* pDevice)
942 {
943 RADV_FROM_HANDLE(radv_physical_device, physical_device, physicalDevice);
944 VkResult result;
945 struct radv_device *device;
946
947 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
948 const char *ext_name = pCreateInfo->ppEnabledExtensionNames[i];
949 if (!radv_physical_device_extension_supported(physical_device, ext_name))
950 return vk_error(VK_ERROR_EXTENSION_NOT_PRESENT);
951 }
952
953 /* Check enabled features */
954 if (pCreateInfo->pEnabledFeatures) {
955 VkPhysicalDeviceFeatures supported_features;
956 radv_GetPhysicalDeviceFeatures(physicalDevice, &supported_features);
957 VkBool32 *supported_feature = (VkBool32 *)&supported_features;
958 VkBool32 *enabled_feature = (VkBool32 *)pCreateInfo->pEnabledFeatures;
959 unsigned num_features = sizeof(VkPhysicalDeviceFeatures) / sizeof(VkBool32);
960 for (uint32_t i = 0; i < num_features; i++) {
961 if (enabled_feature[i] && !supported_feature[i])
962 return vk_error(VK_ERROR_FEATURE_NOT_PRESENT);
963 }
964 }
965
966 device = vk_alloc2(&physical_device->instance->alloc, pAllocator,
967 sizeof(*device), 8,
968 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
969 if (!device)
970 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
971
972 memset(device, 0, sizeof(*device));
973
974 device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
975 device->instance = physical_device->instance;
976 device->physical_device = physical_device;
977
978 device->ws = physical_device->ws;
979 if (pAllocator)
980 device->alloc = *pAllocator;
981 else
982 device->alloc = physical_device->instance->alloc;
983
984 mtx_init(&device->shader_slab_mutex, mtx_plain);
985 list_inithead(&device->shader_slabs);
986
987 for (unsigned i = 0; i < pCreateInfo->queueCreateInfoCount; i++) {
988 const VkDeviceQueueCreateInfo *queue_create = &pCreateInfo->pQueueCreateInfos[i];
989 uint32_t qfi = queue_create->queueFamilyIndex;
990 const VkDeviceQueueGlobalPriorityCreateInfoEXT *global_priority =
991 vk_find_struct_const(queue_create->pNext, DEVICE_QUEUE_GLOBAL_PRIORITY_CREATE_INFO_EXT);
992
993 assert(!global_priority || device->physical_device->rad_info.has_ctx_priority);
994
995 device->queues[qfi] = vk_alloc(&device->alloc,
996 queue_create->queueCount * sizeof(struct radv_queue), 8, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
997 if (!device->queues[qfi]) {
998 result = VK_ERROR_OUT_OF_HOST_MEMORY;
999 goto fail;
1000 }
1001
1002 memset(device->queues[qfi], 0, queue_create->queueCount * sizeof(struct radv_queue));
1003
1004 device->queue_count[qfi] = queue_create->queueCount;
1005
1006 for (unsigned q = 0; q < queue_create->queueCount; q++) {
1007 result = radv_queue_init(device, &device->queues[qfi][q], qfi, q, global_priority);
1008 if (result != VK_SUCCESS)
1009 goto fail;
1010 }
1011 }
1012
1013 #if HAVE_LLVM < 0x0400
1014 device->llvm_supports_spill = false;
1015 #else
1016 device->llvm_supports_spill = true;
1017 #endif
1018
1019 /* The maximum number of scratch waves. Scratch space isn't divided
1020 * evenly between CUs. The number is only a function of the number of CUs.
1021 * We can decrease the constant to decrease the scratch buffer size.
1022 *
1023 * sctx->scratch_waves must be >= the maximum posible size of
1024 * 1 threadgroup, so that the hw doesn't hang from being unable
1025 * to start any.
1026 *
1027 * The recommended value is 4 per CU at most. Higher numbers don't
1028 * bring much benefit, but they still occupy chip resources (think
1029 * async compute). I've seen ~2% performance difference between 4 and 32.
1030 */
1031 uint32_t max_threads_per_block = 2048;
1032 device->scratch_waves = MAX2(32 * physical_device->rad_info.num_good_compute_units,
1033 max_threads_per_block / 64);
1034
1035 radv_device_init_gs_info(device);
1036
1037 device->tess_offchip_block_dw_size =
1038 device->physical_device->rad_info.family == CHIP_HAWAII ? 4096 : 8192;
1039 device->has_distributed_tess =
1040 device->physical_device->rad_info.chip_class >= VI &&
1041 device->physical_device->rad_info.max_se >= 2;
1042
1043 if (getenv("RADV_TRACE_FILE")) {
1044 if (!radv_init_trace(device))
1045 goto fail;
1046 }
1047
1048 result = radv_device_init_meta(device);
1049 if (result != VK_SUCCESS)
1050 goto fail;
1051
1052 radv_device_init_msaa(device);
1053
1054 for (int family = 0; family < RADV_MAX_QUEUE_FAMILIES; ++family) {
1055 device->empty_cs[family] = device->ws->cs_create(device->ws, family);
1056 switch (family) {
1057 case RADV_QUEUE_GENERAL:
1058 radeon_emit(device->empty_cs[family], PKT3(PKT3_CONTEXT_CONTROL, 1, 0));
1059 radeon_emit(device->empty_cs[family], CONTEXT_CONTROL_LOAD_ENABLE(1));
1060 radeon_emit(device->empty_cs[family], CONTEXT_CONTROL_SHADOW_ENABLE(1));
1061 break;
1062 case RADV_QUEUE_COMPUTE:
1063 radeon_emit(device->empty_cs[family], PKT3(PKT3_NOP, 0, 0));
1064 radeon_emit(device->empty_cs[family], 0);
1065 break;
1066 }
1067 device->ws->cs_finalize(device->empty_cs[family]);
1068 }
1069
1070 if (device->physical_device->rad_info.chip_class >= CIK)
1071 cik_create_gfx_config(device);
1072
1073 VkPipelineCacheCreateInfo ci;
1074 ci.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
1075 ci.pNext = NULL;
1076 ci.flags = 0;
1077 ci.pInitialData = NULL;
1078 ci.initialDataSize = 0;
1079 VkPipelineCache pc;
1080 result = radv_CreatePipelineCache(radv_device_to_handle(device),
1081 &ci, NULL, &pc);
1082 if (result != VK_SUCCESS)
1083 goto fail;
1084
1085 device->mem_cache = radv_pipeline_cache_from_handle(pc);
1086
1087 *pDevice = radv_device_to_handle(device);
1088 return VK_SUCCESS;
1089
1090 fail:
1091 if (device->trace_bo)
1092 device->ws->buffer_destroy(device->trace_bo);
1093
1094 if (device->gfx_init)
1095 device->ws->buffer_destroy(device->gfx_init);
1096
1097 for (unsigned i = 0; i < RADV_MAX_QUEUE_FAMILIES; i++) {
1098 for (unsigned q = 0; q < device->queue_count[i]; q++)
1099 radv_queue_finish(&device->queues[i][q]);
1100 if (device->queue_count[i])
1101 vk_free(&device->alloc, device->queues[i]);
1102 }
1103
1104 vk_free(&device->alloc, device);
1105 return result;
1106 }
1107
1108 void radv_DestroyDevice(
1109 VkDevice _device,
1110 const VkAllocationCallbacks* pAllocator)
1111 {
1112 RADV_FROM_HANDLE(radv_device, device, _device);
1113
1114 if (!device)
1115 return;
1116
1117 if (device->trace_bo)
1118 device->ws->buffer_destroy(device->trace_bo);
1119
1120 if (device->gfx_init)
1121 device->ws->buffer_destroy(device->gfx_init);
1122
1123 for (unsigned i = 0; i < RADV_MAX_QUEUE_FAMILIES; i++) {
1124 for (unsigned q = 0; q < device->queue_count[i]; q++)
1125 radv_queue_finish(&device->queues[i][q]);
1126 if (device->queue_count[i])
1127 vk_free(&device->alloc, device->queues[i]);
1128 if (device->empty_cs[i])
1129 device->ws->cs_destroy(device->empty_cs[i]);
1130 }
1131 radv_device_finish_meta(device);
1132
1133 VkPipelineCache pc = radv_pipeline_cache_to_handle(device->mem_cache);
1134 radv_DestroyPipelineCache(radv_device_to_handle(device), pc, NULL);
1135
1136 radv_destroy_shader_slabs(device);
1137
1138 vk_free(&device->alloc, device);
1139 }
1140
1141 VkResult radv_EnumerateInstanceLayerProperties(
1142 uint32_t* pPropertyCount,
1143 VkLayerProperties* pProperties)
1144 {
1145 if (pProperties == NULL) {
1146 *pPropertyCount = 0;
1147 return VK_SUCCESS;
1148 }
1149
1150 /* None supported at this time */
1151 return vk_error(VK_ERROR_LAYER_NOT_PRESENT);
1152 }
1153
1154 VkResult radv_EnumerateDeviceLayerProperties(
1155 VkPhysicalDevice physicalDevice,
1156 uint32_t* pPropertyCount,
1157 VkLayerProperties* pProperties)
1158 {
1159 if (pProperties == NULL) {
1160 *pPropertyCount = 0;
1161 return VK_SUCCESS;
1162 }
1163
1164 /* None supported at this time */
1165 return vk_error(VK_ERROR_LAYER_NOT_PRESENT);
1166 }
1167
1168 void radv_GetDeviceQueue(
1169 VkDevice _device,
1170 uint32_t queueFamilyIndex,
1171 uint32_t queueIndex,
1172 VkQueue* pQueue)
1173 {
1174 RADV_FROM_HANDLE(radv_device, device, _device);
1175
1176 *pQueue = radv_queue_to_handle(&device->queues[queueFamilyIndex][queueIndex]);
1177 }
1178
1179 static void
1180 fill_geom_tess_rings(struct radv_queue *queue,
1181 uint32_t *map,
1182 bool add_sample_positions,
1183 uint32_t esgs_ring_size,
1184 struct radeon_winsys_bo *esgs_ring_bo,
1185 uint32_t gsvs_ring_size,
1186 struct radeon_winsys_bo *gsvs_ring_bo,
1187 uint32_t tess_factor_ring_size,
1188 struct radeon_winsys_bo *tess_factor_ring_bo,
1189 uint32_t tess_offchip_ring_size,
1190 struct radeon_winsys_bo *tess_offchip_ring_bo)
1191 {
1192 uint64_t esgs_va = 0, gsvs_va = 0;
1193 uint64_t tess_factor_va = 0, tess_offchip_va = 0;
1194 uint32_t *desc = &map[4];
1195
1196 if (esgs_ring_bo)
1197 esgs_va = radv_buffer_get_va(esgs_ring_bo);
1198 if (gsvs_ring_bo)
1199 gsvs_va = radv_buffer_get_va(gsvs_ring_bo);
1200 if (tess_factor_ring_bo)
1201 tess_factor_va = radv_buffer_get_va(tess_factor_ring_bo);
1202 if (tess_offchip_ring_bo)
1203 tess_offchip_va = radv_buffer_get_va(tess_offchip_ring_bo);
1204
1205 /* stride 0, num records - size, add tid, swizzle, elsize4,
1206 index stride 64 */
1207 desc[0] = esgs_va;
1208 desc[1] = S_008F04_BASE_ADDRESS_HI(esgs_va >> 32) |
1209 S_008F04_STRIDE(0) |
1210 S_008F04_SWIZZLE_ENABLE(true);
1211 desc[2] = esgs_ring_size;
1212 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
1213 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
1214 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
1215 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
1216 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
1217 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32) |
1218 S_008F0C_ELEMENT_SIZE(1) |
1219 S_008F0C_INDEX_STRIDE(3) |
1220 S_008F0C_ADD_TID_ENABLE(true);
1221
1222 desc += 4;
1223 /* GS entry for ES->GS ring */
1224 /* stride 0, num records - size, elsize0,
1225 index stride 0 */
1226 desc[0] = esgs_va;
1227 desc[1] = S_008F04_BASE_ADDRESS_HI(esgs_va >> 32)|
1228 S_008F04_STRIDE(0) |
1229 S_008F04_SWIZZLE_ENABLE(false);
1230 desc[2] = esgs_ring_size;
1231 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
1232 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
1233 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
1234 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
1235 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
1236 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32) |
1237 S_008F0C_ELEMENT_SIZE(0) |
1238 S_008F0C_INDEX_STRIDE(0) |
1239 S_008F0C_ADD_TID_ENABLE(false);
1240
1241 desc += 4;
1242 /* VS entry for GS->VS ring */
1243 /* stride 0, num records - size, elsize0,
1244 index stride 0 */
1245 desc[0] = gsvs_va;
1246 desc[1] = S_008F04_BASE_ADDRESS_HI(gsvs_va >> 32)|
1247 S_008F04_STRIDE(0) |
1248 S_008F04_SWIZZLE_ENABLE(false);
1249 desc[2] = gsvs_ring_size;
1250 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
1251 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
1252 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
1253 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
1254 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
1255 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32) |
1256 S_008F0C_ELEMENT_SIZE(0) |
1257 S_008F0C_INDEX_STRIDE(0) |
1258 S_008F0C_ADD_TID_ENABLE(false);
1259 desc += 4;
1260
1261 /* stride gsvs_itemsize, num records 64
1262 elsize 4, index stride 16 */
1263 /* shader will patch stride and desc[2] */
1264 desc[0] = gsvs_va;
1265 desc[1] = S_008F04_BASE_ADDRESS_HI(gsvs_va >> 32)|
1266 S_008F04_STRIDE(0) |
1267 S_008F04_SWIZZLE_ENABLE(true);
1268 desc[2] = 0;
1269 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
1270 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
1271 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
1272 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
1273 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
1274 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32) |
1275 S_008F0C_ELEMENT_SIZE(1) |
1276 S_008F0C_INDEX_STRIDE(1) |
1277 S_008F0C_ADD_TID_ENABLE(true);
1278 desc += 4;
1279
1280 desc[0] = tess_factor_va;
1281 desc[1] = S_008F04_BASE_ADDRESS_HI(tess_factor_va >> 32) |
1282 S_008F04_STRIDE(0) |
1283 S_008F04_SWIZZLE_ENABLE(false);
1284 desc[2] = tess_factor_ring_size;
1285 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
1286 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
1287 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
1288 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
1289 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
1290 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32) |
1291 S_008F0C_ELEMENT_SIZE(0) |
1292 S_008F0C_INDEX_STRIDE(0) |
1293 S_008F0C_ADD_TID_ENABLE(false);
1294 desc += 4;
1295
1296 desc[0] = tess_offchip_va;
1297 desc[1] = S_008F04_BASE_ADDRESS_HI(tess_offchip_va >> 32) |
1298 S_008F04_STRIDE(0) |
1299 S_008F04_SWIZZLE_ENABLE(false);
1300 desc[2] = tess_offchip_ring_size;
1301 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
1302 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
1303 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
1304 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
1305 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
1306 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32) |
1307 S_008F0C_ELEMENT_SIZE(0) |
1308 S_008F0C_INDEX_STRIDE(0) |
1309 S_008F0C_ADD_TID_ENABLE(false);
1310 desc += 4;
1311
1312 /* add sample positions after all rings */
1313 memcpy(desc, queue->device->sample_locations_1x, 8);
1314 desc += 2;
1315 memcpy(desc, queue->device->sample_locations_2x, 16);
1316 desc += 4;
1317 memcpy(desc, queue->device->sample_locations_4x, 32);
1318 desc += 8;
1319 memcpy(desc, queue->device->sample_locations_8x, 64);
1320 desc += 16;
1321 memcpy(desc, queue->device->sample_locations_16x, 128);
1322 }
1323
1324 static unsigned
1325 radv_get_hs_offchip_param(struct radv_device *device, uint32_t *max_offchip_buffers_p)
1326 {
1327 bool double_offchip_buffers = device->physical_device->rad_info.chip_class >= CIK &&
1328 device->physical_device->rad_info.family != CHIP_CARRIZO &&
1329 device->physical_device->rad_info.family != CHIP_STONEY;
1330 unsigned max_offchip_buffers_per_se = double_offchip_buffers ? 128 : 64;
1331 unsigned max_offchip_buffers = max_offchip_buffers_per_se *
1332 device->physical_device->rad_info.max_se;
1333 unsigned offchip_granularity;
1334 unsigned hs_offchip_param;
1335 switch (device->tess_offchip_block_dw_size) {
1336 default:
1337 assert(0);
1338 /* fall through */
1339 case 8192:
1340 offchip_granularity = V_03093C_X_8K_DWORDS;
1341 break;
1342 case 4096:
1343 offchip_granularity = V_03093C_X_4K_DWORDS;
1344 break;
1345 }
1346
1347 switch (device->physical_device->rad_info.chip_class) {
1348 case SI:
1349 max_offchip_buffers = MIN2(max_offchip_buffers, 126);
1350 break;
1351 case CIK:
1352 case VI:
1353 case GFX9:
1354 default:
1355 max_offchip_buffers = MIN2(max_offchip_buffers, 508);
1356 break;
1357 }
1358
1359 *max_offchip_buffers_p = max_offchip_buffers;
1360 if (device->physical_device->rad_info.chip_class >= CIK) {
1361 if (device->physical_device->rad_info.chip_class >= VI)
1362 --max_offchip_buffers;
1363 hs_offchip_param =
1364 S_03093C_OFFCHIP_BUFFERING(max_offchip_buffers) |
1365 S_03093C_OFFCHIP_GRANULARITY(offchip_granularity);
1366 } else {
1367 hs_offchip_param =
1368 S_0089B0_OFFCHIP_BUFFERING(max_offchip_buffers);
1369 }
1370 return hs_offchip_param;
1371 }
1372
1373 static VkResult
1374 radv_get_preamble_cs(struct radv_queue *queue,
1375 uint32_t scratch_size,
1376 uint32_t compute_scratch_size,
1377 uint32_t esgs_ring_size,
1378 uint32_t gsvs_ring_size,
1379 bool needs_tess_rings,
1380 bool needs_sample_positions,
1381 struct radeon_winsys_cs **initial_full_flush_preamble_cs,
1382 struct radeon_winsys_cs **initial_preamble_cs,
1383 struct radeon_winsys_cs **continue_preamble_cs)
1384 {
1385 struct radeon_winsys_bo *scratch_bo = NULL;
1386 struct radeon_winsys_bo *descriptor_bo = NULL;
1387 struct radeon_winsys_bo *compute_scratch_bo = NULL;
1388 struct radeon_winsys_bo *esgs_ring_bo = NULL;
1389 struct radeon_winsys_bo *gsvs_ring_bo = NULL;
1390 struct radeon_winsys_bo *tess_factor_ring_bo = NULL;
1391 struct radeon_winsys_bo *tess_offchip_ring_bo = NULL;
1392 struct radeon_winsys_cs *dest_cs[3] = {0};
1393 bool add_tess_rings = false, add_sample_positions = false;
1394 unsigned tess_factor_ring_size = 0, tess_offchip_ring_size = 0;
1395 unsigned max_offchip_buffers;
1396 unsigned hs_offchip_param = 0;
1397 if (!queue->has_tess_rings) {
1398 if (needs_tess_rings)
1399 add_tess_rings = true;
1400 }
1401 if (!queue->has_sample_positions) {
1402 if (needs_sample_positions)
1403 add_sample_positions = true;
1404 }
1405 tess_factor_ring_size = 32768 * queue->device->physical_device->rad_info.max_se;
1406 hs_offchip_param = radv_get_hs_offchip_param(queue->device,
1407 &max_offchip_buffers);
1408 tess_offchip_ring_size = max_offchip_buffers *
1409 queue->device->tess_offchip_block_dw_size * 4;
1410
1411 if (scratch_size <= queue->scratch_size &&
1412 compute_scratch_size <= queue->compute_scratch_size &&
1413 esgs_ring_size <= queue->esgs_ring_size &&
1414 gsvs_ring_size <= queue->gsvs_ring_size &&
1415 !add_tess_rings && !add_sample_positions &&
1416 queue->initial_preamble_cs) {
1417 *initial_full_flush_preamble_cs = queue->initial_full_flush_preamble_cs;
1418 *initial_preamble_cs = queue->initial_preamble_cs;
1419 *continue_preamble_cs = queue->continue_preamble_cs;
1420 if (!scratch_size && !compute_scratch_size && !esgs_ring_size && !gsvs_ring_size)
1421 *continue_preamble_cs = NULL;
1422 return VK_SUCCESS;
1423 }
1424
1425 if (scratch_size > queue->scratch_size) {
1426 scratch_bo = queue->device->ws->buffer_create(queue->device->ws,
1427 scratch_size,
1428 4096,
1429 RADEON_DOMAIN_VRAM,
1430 RADEON_FLAG_NO_CPU_ACCESS);
1431 if (!scratch_bo)
1432 goto fail;
1433 } else
1434 scratch_bo = queue->scratch_bo;
1435
1436 if (compute_scratch_size > queue->compute_scratch_size) {
1437 compute_scratch_bo = queue->device->ws->buffer_create(queue->device->ws,
1438 compute_scratch_size,
1439 4096,
1440 RADEON_DOMAIN_VRAM,
1441 RADEON_FLAG_NO_CPU_ACCESS);
1442 if (!compute_scratch_bo)
1443 goto fail;
1444
1445 } else
1446 compute_scratch_bo = queue->compute_scratch_bo;
1447
1448 if (esgs_ring_size > queue->esgs_ring_size) {
1449 esgs_ring_bo = queue->device->ws->buffer_create(queue->device->ws,
1450 esgs_ring_size,
1451 4096,
1452 RADEON_DOMAIN_VRAM,
1453 RADEON_FLAG_NO_CPU_ACCESS);
1454 if (!esgs_ring_bo)
1455 goto fail;
1456 } else {
1457 esgs_ring_bo = queue->esgs_ring_bo;
1458 esgs_ring_size = queue->esgs_ring_size;
1459 }
1460
1461 if (gsvs_ring_size > queue->gsvs_ring_size) {
1462 gsvs_ring_bo = queue->device->ws->buffer_create(queue->device->ws,
1463 gsvs_ring_size,
1464 4096,
1465 RADEON_DOMAIN_VRAM,
1466 RADEON_FLAG_NO_CPU_ACCESS);
1467 if (!gsvs_ring_bo)
1468 goto fail;
1469 } else {
1470 gsvs_ring_bo = queue->gsvs_ring_bo;
1471 gsvs_ring_size = queue->gsvs_ring_size;
1472 }
1473
1474 if (add_tess_rings) {
1475 tess_factor_ring_bo = queue->device->ws->buffer_create(queue->device->ws,
1476 tess_factor_ring_size,
1477 256,
1478 RADEON_DOMAIN_VRAM,
1479 RADEON_FLAG_NO_CPU_ACCESS);
1480 if (!tess_factor_ring_bo)
1481 goto fail;
1482 tess_offchip_ring_bo = queue->device->ws->buffer_create(queue->device->ws,
1483 tess_offchip_ring_size,
1484 256,
1485 RADEON_DOMAIN_VRAM,
1486 RADEON_FLAG_NO_CPU_ACCESS);
1487 if (!tess_offchip_ring_bo)
1488 goto fail;
1489 } else {
1490 tess_factor_ring_bo = queue->tess_factor_ring_bo;
1491 tess_offchip_ring_bo = queue->tess_offchip_ring_bo;
1492 }
1493
1494 if (scratch_bo != queue->scratch_bo ||
1495 esgs_ring_bo != queue->esgs_ring_bo ||
1496 gsvs_ring_bo != queue->gsvs_ring_bo ||
1497 tess_factor_ring_bo != queue->tess_factor_ring_bo ||
1498 tess_offchip_ring_bo != queue->tess_offchip_ring_bo || add_sample_positions) {
1499 uint32_t size = 0;
1500 if (gsvs_ring_bo || esgs_ring_bo ||
1501 tess_factor_ring_bo || tess_offchip_ring_bo || add_sample_positions) {
1502 size = 112; /* 2 dword + 2 padding + 4 dword * 6 */
1503 if (add_sample_positions)
1504 size += 256; /* 32+16+8+4+2+1 samples * 4 * 2 = 248 bytes. */
1505 }
1506 else if (scratch_bo)
1507 size = 8; /* 2 dword */
1508
1509 descriptor_bo = queue->device->ws->buffer_create(queue->device->ws,
1510 size,
1511 4096,
1512 RADEON_DOMAIN_VRAM,
1513 RADEON_FLAG_CPU_ACCESS);
1514 if (!descriptor_bo)
1515 goto fail;
1516 } else
1517 descriptor_bo = queue->descriptor_bo;
1518
1519 for(int i = 0; i < 3; ++i) {
1520 struct radeon_winsys_cs *cs = NULL;
1521 cs = queue->device->ws->cs_create(queue->device->ws,
1522 queue->queue_family_index ? RING_COMPUTE : RING_GFX);
1523 if (!cs)
1524 goto fail;
1525
1526 dest_cs[i] = cs;
1527
1528 if (scratch_bo)
1529 queue->device->ws->cs_add_buffer(cs, scratch_bo, 8);
1530
1531 if (esgs_ring_bo)
1532 queue->device->ws->cs_add_buffer(cs, esgs_ring_bo, 8);
1533
1534 if (gsvs_ring_bo)
1535 queue->device->ws->cs_add_buffer(cs, gsvs_ring_bo, 8);
1536
1537 if (tess_factor_ring_bo)
1538 queue->device->ws->cs_add_buffer(cs, tess_factor_ring_bo, 8);
1539
1540 if (tess_offchip_ring_bo)
1541 queue->device->ws->cs_add_buffer(cs, tess_offchip_ring_bo, 8);
1542
1543 if (descriptor_bo)
1544 queue->device->ws->cs_add_buffer(cs, descriptor_bo, 8);
1545
1546 if (descriptor_bo != queue->descriptor_bo) {
1547 uint32_t *map = (uint32_t*)queue->device->ws->buffer_map(descriptor_bo);
1548
1549 if (scratch_bo) {
1550 uint64_t scratch_va = radv_buffer_get_va(scratch_bo);
1551 uint32_t rsrc1 = S_008F04_BASE_ADDRESS_HI(scratch_va >> 32) |
1552 S_008F04_SWIZZLE_ENABLE(1);
1553 map[0] = scratch_va;
1554 map[1] = rsrc1;
1555 }
1556
1557 if (esgs_ring_bo || gsvs_ring_bo || tess_factor_ring_bo || tess_offchip_ring_bo ||
1558 add_sample_positions)
1559 fill_geom_tess_rings(queue, map, add_sample_positions,
1560 esgs_ring_size, esgs_ring_bo,
1561 gsvs_ring_size, gsvs_ring_bo,
1562 tess_factor_ring_size, tess_factor_ring_bo,
1563 tess_offchip_ring_size, tess_offchip_ring_bo);
1564
1565 queue->device->ws->buffer_unmap(descriptor_bo);
1566 }
1567
1568 if (esgs_ring_bo || gsvs_ring_bo || tess_factor_ring_bo || tess_offchip_ring_bo) {
1569 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1570 radeon_emit(cs, EVENT_TYPE(V_028A90_VS_PARTIAL_FLUSH) | EVENT_INDEX(4));
1571 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1572 radeon_emit(cs, EVENT_TYPE(V_028A90_VGT_FLUSH) | EVENT_INDEX(0));
1573 }
1574
1575 if (esgs_ring_bo || gsvs_ring_bo) {
1576 if (queue->device->physical_device->rad_info.chip_class >= CIK) {
1577 radeon_set_uconfig_reg_seq(cs, R_030900_VGT_ESGS_RING_SIZE, 2);
1578 radeon_emit(cs, esgs_ring_size >> 8);
1579 radeon_emit(cs, gsvs_ring_size >> 8);
1580 } else {
1581 radeon_set_config_reg_seq(cs, R_0088C8_VGT_ESGS_RING_SIZE, 2);
1582 radeon_emit(cs, esgs_ring_size >> 8);
1583 radeon_emit(cs, gsvs_ring_size >> 8);
1584 }
1585 }
1586
1587 if (tess_factor_ring_bo) {
1588 uint64_t tf_va = radv_buffer_get_va(tess_factor_ring_bo);
1589 if (queue->device->physical_device->rad_info.chip_class >= CIK) {
1590 radeon_set_uconfig_reg(cs, R_030938_VGT_TF_RING_SIZE,
1591 S_030938_SIZE(tess_factor_ring_size / 4));
1592 radeon_set_uconfig_reg(cs, R_030940_VGT_TF_MEMORY_BASE,
1593 tf_va >> 8);
1594 if (queue->device->physical_device->rad_info.chip_class >= GFX9) {
1595 radeon_set_uconfig_reg(cs, R_030944_VGT_TF_MEMORY_BASE_HI,
1596 tf_va >> 40);
1597 }
1598 radeon_set_uconfig_reg(cs, R_03093C_VGT_HS_OFFCHIP_PARAM, hs_offchip_param);
1599 } else {
1600 radeon_set_config_reg(cs, R_008988_VGT_TF_RING_SIZE,
1601 S_008988_SIZE(tess_factor_ring_size / 4));
1602 radeon_set_config_reg(cs, R_0089B8_VGT_TF_MEMORY_BASE,
1603 tf_va >> 8);
1604 radeon_set_config_reg(cs, R_0089B0_VGT_HS_OFFCHIP_PARAM,
1605 hs_offchip_param);
1606 }
1607 }
1608
1609 if (descriptor_bo) {
1610 uint64_t va = radv_buffer_get_va(descriptor_bo);
1611 if (queue->device->physical_device->rad_info.chip_class >= GFX9) {
1612 uint32_t regs[] = {R_00B030_SPI_SHADER_USER_DATA_PS_0,
1613 R_00B130_SPI_SHADER_USER_DATA_VS_0,
1614 R_00B208_SPI_SHADER_USER_DATA_ADDR_LO_GS,
1615 R_00B408_SPI_SHADER_USER_DATA_ADDR_LO_HS};
1616
1617 for (int i = 0; i < ARRAY_SIZE(regs); ++i) {
1618 radeon_set_sh_reg_seq(cs, regs[i], 2);
1619 radeon_emit(cs, va);
1620 radeon_emit(cs, va >> 32);
1621 }
1622 } else {
1623 uint32_t regs[] = {R_00B030_SPI_SHADER_USER_DATA_PS_0,
1624 R_00B130_SPI_SHADER_USER_DATA_VS_0,
1625 R_00B230_SPI_SHADER_USER_DATA_GS_0,
1626 R_00B330_SPI_SHADER_USER_DATA_ES_0,
1627 R_00B430_SPI_SHADER_USER_DATA_HS_0,
1628 R_00B530_SPI_SHADER_USER_DATA_LS_0};
1629
1630 for (int i = 0; i < ARRAY_SIZE(regs); ++i) {
1631 radeon_set_sh_reg_seq(cs, regs[i], 2);
1632 radeon_emit(cs, va);
1633 radeon_emit(cs, va >> 32);
1634 }
1635 }
1636 }
1637
1638 if (compute_scratch_bo) {
1639 uint64_t scratch_va = radv_buffer_get_va(compute_scratch_bo);
1640 uint32_t rsrc1 = S_008F04_BASE_ADDRESS_HI(scratch_va >> 32) |
1641 S_008F04_SWIZZLE_ENABLE(1);
1642
1643 queue->device->ws->cs_add_buffer(cs, compute_scratch_bo, 8);
1644
1645 radeon_set_sh_reg_seq(cs, R_00B900_COMPUTE_USER_DATA_0, 2);
1646 radeon_emit(cs, scratch_va);
1647 radeon_emit(cs, rsrc1);
1648 }
1649
1650 if (i == 0) {
1651 si_cs_emit_cache_flush(cs,
1652 false,
1653 queue->device->physical_device->rad_info.chip_class,
1654 NULL, 0,
1655 queue->queue_family_index == RING_COMPUTE &&
1656 queue->device->physical_device->rad_info.chip_class >= CIK,
1657 (queue->queue_family_index == RADV_QUEUE_COMPUTE ? RADV_CMD_FLAG_CS_PARTIAL_FLUSH : (RADV_CMD_FLAG_CS_PARTIAL_FLUSH | RADV_CMD_FLAG_PS_PARTIAL_FLUSH)) |
1658 RADV_CMD_FLAG_INV_ICACHE |
1659 RADV_CMD_FLAG_INV_SMEM_L1 |
1660 RADV_CMD_FLAG_INV_VMEM_L1 |
1661 RADV_CMD_FLAG_INV_GLOBAL_L2);
1662 } else if (i == 1) {
1663 si_cs_emit_cache_flush(cs,
1664 false,
1665 queue->device->physical_device->rad_info.chip_class,
1666 NULL, 0,
1667 queue->queue_family_index == RING_COMPUTE &&
1668 queue->device->physical_device->rad_info.chip_class >= CIK,
1669 RADV_CMD_FLAG_INV_ICACHE |
1670 RADV_CMD_FLAG_INV_SMEM_L1 |
1671 RADV_CMD_FLAG_INV_VMEM_L1 |
1672 RADV_CMD_FLAG_INV_GLOBAL_L2);
1673 }
1674
1675 if (!queue->device->ws->cs_finalize(cs))
1676 goto fail;
1677 }
1678
1679 if (queue->initial_full_flush_preamble_cs)
1680 queue->device->ws->cs_destroy(queue->initial_full_flush_preamble_cs);
1681
1682 if (queue->initial_preamble_cs)
1683 queue->device->ws->cs_destroy(queue->initial_preamble_cs);
1684
1685 if (queue->continue_preamble_cs)
1686 queue->device->ws->cs_destroy(queue->continue_preamble_cs);
1687
1688 queue->initial_full_flush_preamble_cs = dest_cs[0];
1689 queue->initial_preamble_cs = dest_cs[1];
1690 queue->continue_preamble_cs = dest_cs[2];
1691
1692 if (scratch_bo != queue->scratch_bo) {
1693 if (queue->scratch_bo)
1694 queue->device->ws->buffer_destroy(queue->scratch_bo);
1695 queue->scratch_bo = scratch_bo;
1696 queue->scratch_size = scratch_size;
1697 }
1698
1699 if (compute_scratch_bo != queue->compute_scratch_bo) {
1700 if (queue->compute_scratch_bo)
1701 queue->device->ws->buffer_destroy(queue->compute_scratch_bo);
1702 queue->compute_scratch_bo = compute_scratch_bo;
1703 queue->compute_scratch_size = compute_scratch_size;
1704 }
1705
1706 if (esgs_ring_bo != queue->esgs_ring_bo) {
1707 if (queue->esgs_ring_bo)
1708 queue->device->ws->buffer_destroy(queue->esgs_ring_bo);
1709 queue->esgs_ring_bo = esgs_ring_bo;
1710 queue->esgs_ring_size = esgs_ring_size;
1711 }
1712
1713 if (gsvs_ring_bo != queue->gsvs_ring_bo) {
1714 if (queue->gsvs_ring_bo)
1715 queue->device->ws->buffer_destroy(queue->gsvs_ring_bo);
1716 queue->gsvs_ring_bo = gsvs_ring_bo;
1717 queue->gsvs_ring_size = gsvs_ring_size;
1718 }
1719
1720 if (tess_factor_ring_bo != queue->tess_factor_ring_bo) {
1721 queue->tess_factor_ring_bo = tess_factor_ring_bo;
1722 }
1723
1724 if (tess_offchip_ring_bo != queue->tess_offchip_ring_bo) {
1725 queue->tess_offchip_ring_bo = tess_offchip_ring_bo;
1726 queue->has_tess_rings = true;
1727 }
1728
1729 if (descriptor_bo != queue->descriptor_bo) {
1730 if (queue->descriptor_bo)
1731 queue->device->ws->buffer_destroy(queue->descriptor_bo);
1732
1733 queue->descriptor_bo = descriptor_bo;
1734 }
1735
1736 if (add_sample_positions)
1737 queue->has_sample_positions = true;
1738
1739 *initial_full_flush_preamble_cs = queue->initial_full_flush_preamble_cs;
1740 *initial_preamble_cs = queue->initial_preamble_cs;
1741 *continue_preamble_cs = queue->continue_preamble_cs;
1742 if (!scratch_size && !compute_scratch_size && !esgs_ring_size && !gsvs_ring_size)
1743 *continue_preamble_cs = NULL;
1744 return VK_SUCCESS;
1745 fail:
1746 for (int i = 0; i < ARRAY_SIZE(dest_cs); ++i)
1747 if (dest_cs[i])
1748 queue->device->ws->cs_destroy(dest_cs[i]);
1749 if (descriptor_bo && descriptor_bo != queue->descriptor_bo)
1750 queue->device->ws->buffer_destroy(descriptor_bo);
1751 if (scratch_bo && scratch_bo != queue->scratch_bo)
1752 queue->device->ws->buffer_destroy(scratch_bo);
1753 if (compute_scratch_bo && compute_scratch_bo != queue->compute_scratch_bo)
1754 queue->device->ws->buffer_destroy(compute_scratch_bo);
1755 if (esgs_ring_bo && esgs_ring_bo != queue->esgs_ring_bo)
1756 queue->device->ws->buffer_destroy(esgs_ring_bo);
1757 if (gsvs_ring_bo && gsvs_ring_bo != queue->gsvs_ring_bo)
1758 queue->device->ws->buffer_destroy(gsvs_ring_bo);
1759 if (tess_factor_ring_bo && tess_factor_ring_bo != queue->tess_factor_ring_bo)
1760 queue->device->ws->buffer_destroy(tess_factor_ring_bo);
1761 if (tess_offchip_ring_bo && tess_offchip_ring_bo != queue->tess_offchip_ring_bo)
1762 queue->device->ws->buffer_destroy(tess_offchip_ring_bo);
1763 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
1764 }
1765
1766 static VkResult radv_alloc_sem_counts(struct radv_winsys_sem_counts *counts,
1767 int num_sems,
1768 const VkSemaphore *sems,
1769 bool reset_temp)
1770 {
1771 int syncobj_idx = 0, sem_idx = 0;
1772
1773 if (num_sems == 0)
1774 return VK_SUCCESS;
1775 for (uint32_t i = 0; i < num_sems; i++) {
1776 RADV_FROM_HANDLE(radv_semaphore, sem, sems[i]);
1777
1778 if (sem->temp_syncobj || sem->syncobj)
1779 counts->syncobj_count++;
1780 else
1781 counts->sem_count++;
1782 }
1783
1784 if (counts->syncobj_count) {
1785 counts->syncobj = (uint32_t *)malloc(sizeof(uint32_t) * counts->syncobj_count);
1786 if (!counts->syncobj)
1787 return VK_ERROR_OUT_OF_HOST_MEMORY;
1788 }
1789
1790 if (counts->sem_count) {
1791 counts->sem = (struct radeon_winsys_sem **)malloc(sizeof(struct radeon_winsys_sem *) * counts->sem_count);
1792 if (!counts->sem) {
1793 free(counts->syncobj);
1794 return VK_ERROR_OUT_OF_HOST_MEMORY;
1795 }
1796 }
1797
1798 for (uint32_t i = 0; i < num_sems; i++) {
1799 RADV_FROM_HANDLE(radv_semaphore, sem, sems[i]);
1800
1801 if (sem->temp_syncobj) {
1802 counts->syncobj[syncobj_idx++] = sem->temp_syncobj;
1803 if (reset_temp) {
1804 /* after we wait on a temp import - drop it */
1805 sem->temp_syncobj = 0;
1806 }
1807 }
1808 else if (sem->syncobj)
1809 counts->syncobj[syncobj_idx++] = sem->syncobj;
1810 else {
1811 assert(sem->sem);
1812 counts->sem[sem_idx++] = sem->sem;
1813 }
1814 }
1815
1816 return VK_SUCCESS;
1817 }
1818
1819 void radv_free_sem_info(struct radv_winsys_sem_info *sem_info)
1820 {
1821 free(sem_info->wait.syncobj);
1822 free(sem_info->wait.sem);
1823 free(sem_info->signal.syncobj);
1824 free(sem_info->signal.sem);
1825 }
1826
1827 VkResult radv_alloc_sem_info(struct radv_winsys_sem_info *sem_info,
1828 int num_wait_sems,
1829 const VkSemaphore *wait_sems,
1830 int num_signal_sems,
1831 const VkSemaphore *signal_sems)
1832 {
1833 VkResult ret;
1834 memset(sem_info, 0, sizeof(*sem_info));
1835
1836 ret = radv_alloc_sem_counts(&sem_info->wait, num_wait_sems, wait_sems, true);
1837 if (ret)
1838 return ret;
1839 ret = radv_alloc_sem_counts(&sem_info->signal, num_signal_sems, signal_sems, false);
1840 if (ret)
1841 radv_free_sem_info(sem_info);
1842
1843 /* caller can override these */
1844 sem_info->cs_emit_wait = true;
1845 sem_info->cs_emit_signal = true;
1846 return ret;
1847 }
1848
1849 VkResult radv_QueueSubmit(
1850 VkQueue _queue,
1851 uint32_t submitCount,
1852 const VkSubmitInfo* pSubmits,
1853 VkFence _fence)
1854 {
1855 RADV_FROM_HANDLE(radv_queue, queue, _queue);
1856 RADV_FROM_HANDLE(radv_fence, fence, _fence);
1857 struct radeon_winsys_fence *base_fence = fence ? fence->fence : NULL;
1858 struct radeon_winsys_ctx *ctx = queue->hw_ctx;
1859 int ret;
1860 uint32_t max_cs_submission = queue->device->trace_bo ? 1 : UINT32_MAX;
1861 uint32_t scratch_size = 0;
1862 uint32_t compute_scratch_size = 0;
1863 uint32_t esgs_ring_size = 0, gsvs_ring_size = 0;
1864 struct radeon_winsys_cs *initial_preamble_cs = NULL, *initial_flush_preamble_cs = NULL, *continue_preamble_cs = NULL;
1865 VkResult result;
1866 bool fence_emitted = false;
1867 bool tess_rings_needed = false;
1868 bool sample_positions_needed = false;
1869
1870 /* Do this first so failing to allocate scratch buffers can't result in
1871 * partially executed submissions. */
1872 for (uint32_t i = 0; i < submitCount; i++) {
1873 for (uint32_t j = 0; j < pSubmits[i].commandBufferCount; j++) {
1874 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer,
1875 pSubmits[i].pCommandBuffers[j]);
1876
1877 scratch_size = MAX2(scratch_size, cmd_buffer->scratch_size_needed);
1878 compute_scratch_size = MAX2(compute_scratch_size,
1879 cmd_buffer->compute_scratch_size_needed);
1880 esgs_ring_size = MAX2(esgs_ring_size, cmd_buffer->esgs_ring_size_needed);
1881 gsvs_ring_size = MAX2(gsvs_ring_size, cmd_buffer->gsvs_ring_size_needed);
1882 tess_rings_needed |= cmd_buffer->tess_rings_needed;
1883 sample_positions_needed |= cmd_buffer->sample_positions_needed;
1884 }
1885 }
1886
1887 result = radv_get_preamble_cs(queue, scratch_size, compute_scratch_size,
1888 esgs_ring_size, gsvs_ring_size, tess_rings_needed,
1889 sample_positions_needed, &initial_flush_preamble_cs,
1890 &initial_preamble_cs, &continue_preamble_cs);
1891 if (result != VK_SUCCESS)
1892 return result;
1893
1894 for (uint32_t i = 0; i < submitCount; i++) {
1895 struct radeon_winsys_cs **cs_array;
1896 bool do_flush = !i || pSubmits[i].pWaitDstStageMask;
1897 bool can_patch = true;
1898 uint32_t advance;
1899 struct radv_winsys_sem_info sem_info;
1900
1901 result = radv_alloc_sem_info(&sem_info,
1902 pSubmits[i].waitSemaphoreCount,
1903 pSubmits[i].pWaitSemaphores,
1904 pSubmits[i].signalSemaphoreCount,
1905 pSubmits[i].pSignalSemaphores);
1906 if (result != VK_SUCCESS)
1907 return result;
1908
1909 if (!pSubmits[i].commandBufferCount) {
1910 if (pSubmits[i].waitSemaphoreCount || pSubmits[i].signalSemaphoreCount) {
1911 ret = queue->device->ws->cs_submit(ctx, queue->queue_idx,
1912 &queue->device->empty_cs[queue->queue_family_index],
1913 1, NULL, NULL,
1914 &sem_info,
1915 false, base_fence);
1916 if (ret) {
1917 radv_loge("failed to submit CS %d\n", i);
1918 abort();
1919 }
1920 fence_emitted = true;
1921 }
1922 radv_free_sem_info(&sem_info);
1923 continue;
1924 }
1925
1926 cs_array = malloc(sizeof(struct radeon_winsys_cs *) *
1927 (pSubmits[i].commandBufferCount));
1928
1929 for (uint32_t j = 0; j < pSubmits[i].commandBufferCount; j++) {
1930 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer,
1931 pSubmits[i].pCommandBuffers[j]);
1932 assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
1933
1934 cs_array[j] = cmd_buffer->cs;
1935 if ((cmd_buffer->usage_flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT))
1936 can_patch = false;
1937 }
1938
1939 for (uint32_t j = 0; j < pSubmits[i].commandBufferCount; j += advance) {
1940 struct radeon_winsys_cs *initial_preamble = (do_flush && !j) ? initial_flush_preamble_cs : initial_preamble_cs;
1941 advance = MIN2(max_cs_submission,
1942 pSubmits[i].commandBufferCount - j);
1943
1944 if (queue->device->trace_bo)
1945 *queue->device->trace_id_ptr = 0;
1946
1947 sem_info.cs_emit_wait = j == 0;
1948 sem_info.cs_emit_signal = j + advance == pSubmits[i].commandBufferCount;
1949
1950 ret = queue->device->ws->cs_submit(ctx, queue->queue_idx, cs_array + j,
1951 advance, initial_preamble, continue_preamble_cs,
1952 &sem_info,
1953 can_patch, base_fence);
1954
1955 if (ret) {
1956 radv_loge("failed to submit CS %d\n", i);
1957 abort();
1958 }
1959 fence_emitted = true;
1960 if (queue->device->trace_bo) {
1961 radv_check_gpu_hangs(queue, cs_array[j]);
1962 }
1963 }
1964
1965 radv_free_sem_info(&sem_info);
1966 free(cs_array);
1967 }
1968
1969 if (fence) {
1970 if (!fence_emitted) {
1971 struct radv_winsys_sem_info sem_info = {0};
1972 ret = queue->device->ws->cs_submit(ctx, queue->queue_idx,
1973 &queue->device->empty_cs[queue->queue_family_index],
1974 1, NULL, NULL, &sem_info,
1975 false, base_fence);
1976 }
1977 fence->submitted = true;
1978 }
1979
1980 return VK_SUCCESS;
1981 }
1982
1983 VkResult radv_QueueWaitIdle(
1984 VkQueue _queue)
1985 {
1986 RADV_FROM_HANDLE(radv_queue, queue, _queue);
1987
1988 queue->device->ws->ctx_wait_idle(queue->hw_ctx,
1989 radv_queue_family_to_ring(queue->queue_family_index),
1990 queue->queue_idx);
1991 return VK_SUCCESS;
1992 }
1993
1994 VkResult radv_DeviceWaitIdle(
1995 VkDevice _device)
1996 {
1997 RADV_FROM_HANDLE(radv_device, device, _device);
1998
1999 for (unsigned i = 0; i < RADV_MAX_QUEUE_FAMILIES; i++) {
2000 for (unsigned q = 0; q < device->queue_count[i]; q++) {
2001 radv_QueueWaitIdle(radv_queue_to_handle(&device->queues[i][q]));
2002 }
2003 }
2004 return VK_SUCCESS;
2005 }
2006
2007 PFN_vkVoidFunction radv_GetInstanceProcAddr(
2008 VkInstance instance,
2009 const char* pName)
2010 {
2011 return radv_lookup_entrypoint(pName);
2012 }
2013
2014 /* The loader wants us to expose a second GetInstanceProcAddr function
2015 * to work around certain LD_PRELOAD issues seen in apps.
2016 */
2017 PUBLIC
2018 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vk_icdGetInstanceProcAddr(
2019 VkInstance instance,
2020 const char* pName);
2021
2022 PUBLIC
2023 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vk_icdGetInstanceProcAddr(
2024 VkInstance instance,
2025 const char* pName)
2026 {
2027 return radv_GetInstanceProcAddr(instance, pName);
2028 }
2029
2030 PFN_vkVoidFunction radv_GetDeviceProcAddr(
2031 VkDevice device,
2032 const char* pName)
2033 {
2034 return radv_lookup_entrypoint(pName);
2035 }
2036
2037 bool radv_get_memory_fd(struct radv_device *device,
2038 struct radv_device_memory *memory,
2039 int *pFD)
2040 {
2041 struct radeon_bo_metadata metadata;
2042
2043 if (memory->image) {
2044 radv_init_metadata(device, memory->image, &metadata);
2045 device->ws->buffer_set_metadata(memory->bo, &metadata);
2046 }
2047
2048 return device->ws->buffer_get_fd(device->ws, memory->bo,
2049 pFD);
2050 }
2051
2052 VkResult radv_alloc_memory(VkDevice _device,
2053 const VkMemoryAllocateInfo* pAllocateInfo,
2054 const VkAllocationCallbacks* pAllocator,
2055 enum radv_mem_flags_bits mem_flags,
2056 VkDeviceMemory* pMem)
2057 {
2058 RADV_FROM_HANDLE(radv_device, device, _device);
2059 struct radv_device_memory *mem;
2060 VkResult result;
2061 enum radeon_bo_domain domain;
2062 uint32_t flags = 0;
2063
2064 assert(pAllocateInfo->sType == VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO);
2065
2066 if (pAllocateInfo->allocationSize == 0) {
2067 /* Apparently, this is allowed */
2068 *pMem = VK_NULL_HANDLE;
2069 return VK_SUCCESS;
2070 }
2071
2072 const VkImportMemoryFdInfoKHR *import_info =
2073 vk_find_struct_const(pAllocateInfo->pNext, IMPORT_MEMORY_FD_INFO_KHR);
2074 const VkMemoryDedicatedAllocateInfoKHR *dedicate_info =
2075 vk_find_struct_const(pAllocateInfo->pNext, MEMORY_DEDICATED_ALLOCATE_INFO_KHR);
2076
2077 mem = vk_alloc2(&device->alloc, pAllocator, sizeof(*mem), 8,
2078 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2079 if (mem == NULL)
2080 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
2081
2082 if (dedicate_info) {
2083 mem->image = radv_image_from_handle(dedicate_info->image);
2084 mem->buffer = radv_buffer_from_handle(dedicate_info->buffer);
2085 } else {
2086 mem->image = NULL;
2087 mem->buffer = NULL;
2088 }
2089
2090 if (import_info) {
2091 assert(import_info->handleType ==
2092 VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR);
2093 mem->bo = device->ws->buffer_from_fd(device->ws, import_info->fd,
2094 NULL, NULL);
2095 if (!mem->bo) {
2096 result = VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR;
2097 goto fail;
2098 } else {
2099 close(import_info->fd);
2100 goto out_success;
2101 }
2102 }
2103
2104 uint64_t alloc_size = align_u64(pAllocateInfo->allocationSize, 4096);
2105 if (pAllocateInfo->memoryTypeIndex == RADV_MEM_TYPE_GTT_WRITE_COMBINE ||
2106 pAllocateInfo->memoryTypeIndex == RADV_MEM_TYPE_GTT_CACHED)
2107 domain = RADEON_DOMAIN_GTT;
2108 else
2109 domain = RADEON_DOMAIN_VRAM;
2110
2111 if (pAllocateInfo->memoryTypeIndex == RADV_MEM_TYPE_VRAM)
2112 flags |= RADEON_FLAG_NO_CPU_ACCESS;
2113 else
2114 flags |= RADEON_FLAG_CPU_ACCESS;
2115
2116 if (pAllocateInfo->memoryTypeIndex == RADV_MEM_TYPE_GTT_WRITE_COMBINE)
2117 flags |= RADEON_FLAG_GTT_WC;
2118
2119 if (mem_flags & RADV_MEM_IMPLICIT_SYNC)
2120 flags |= RADEON_FLAG_IMPLICIT_SYNC;
2121
2122 mem->bo = device->ws->buffer_create(device->ws, alloc_size, device->physical_device->rad_info.max_alignment,
2123 domain, flags);
2124
2125 if (!mem->bo) {
2126 result = VK_ERROR_OUT_OF_DEVICE_MEMORY;
2127 goto fail;
2128 }
2129 mem->type_index = pAllocateInfo->memoryTypeIndex;
2130 out_success:
2131 *pMem = radv_device_memory_to_handle(mem);
2132
2133 return VK_SUCCESS;
2134
2135 fail:
2136 vk_free2(&device->alloc, pAllocator, mem);
2137
2138 return result;
2139 }
2140
2141 VkResult radv_AllocateMemory(
2142 VkDevice _device,
2143 const VkMemoryAllocateInfo* pAllocateInfo,
2144 const VkAllocationCallbacks* pAllocator,
2145 VkDeviceMemory* pMem)
2146 {
2147 return radv_alloc_memory(_device, pAllocateInfo, pAllocator, 0, pMem);
2148 }
2149
2150 void radv_FreeMemory(
2151 VkDevice _device,
2152 VkDeviceMemory _mem,
2153 const VkAllocationCallbacks* pAllocator)
2154 {
2155 RADV_FROM_HANDLE(radv_device, device, _device);
2156 RADV_FROM_HANDLE(radv_device_memory, mem, _mem);
2157
2158 if (mem == NULL)
2159 return;
2160
2161 device->ws->buffer_destroy(mem->bo);
2162 mem->bo = NULL;
2163
2164 vk_free2(&device->alloc, pAllocator, mem);
2165 }
2166
2167 VkResult radv_MapMemory(
2168 VkDevice _device,
2169 VkDeviceMemory _memory,
2170 VkDeviceSize offset,
2171 VkDeviceSize size,
2172 VkMemoryMapFlags flags,
2173 void** ppData)
2174 {
2175 RADV_FROM_HANDLE(radv_device, device, _device);
2176 RADV_FROM_HANDLE(radv_device_memory, mem, _memory);
2177
2178 if (mem == NULL) {
2179 *ppData = NULL;
2180 return VK_SUCCESS;
2181 }
2182
2183 *ppData = device->ws->buffer_map(mem->bo);
2184 if (*ppData) {
2185 *ppData += offset;
2186 return VK_SUCCESS;
2187 }
2188
2189 return VK_ERROR_MEMORY_MAP_FAILED;
2190 }
2191
2192 void radv_UnmapMemory(
2193 VkDevice _device,
2194 VkDeviceMemory _memory)
2195 {
2196 RADV_FROM_HANDLE(radv_device, device, _device);
2197 RADV_FROM_HANDLE(radv_device_memory, mem, _memory);
2198
2199 if (mem == NULL)
2200 return;
2201
2202 device->ws->buffer_unmap(mem->bo);
2203 }
2204
2205 VkResult radv_FlushMappedMemoryRanges(
2206 VkDevice _device,
2207 uint32_t memoryRangeCount,
2208 const VkMappedMemoryRange* pMemoryRanges)
2209 {
2210 return VK_SUCCESS;
2211 }
2212
2213 VkResult radv_InvalidateMappedMemoryRanges(
2214 VkDevice _device,
2215 uint32_t memoryRangeCount,
2216 const VkMappedMemoryRange* pMemoryRanges)
2217 {
2218 return VK_SUCCESS;
2219 }
2220
2221 void radv_GetBufferMemoryRequirements(
2222 VkDevice device,
2223 VkBuffer _buffer,
2224 VkMemoryRequirements* pMemoryRequirements)
2225 {
2226 RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
2227
2228 pMemoryRequirements->memoryTypeBits = (1u << RADV_MEM_TYPE_COUNT) - 1;
2229
2230 if (buffer->flags & VK_BUFFER_CREATE_SPARSE_BINDING_BIT)
2231 pMemoryRequirements->alignment = 4096;
2232 else
2233 pMemoryRequirements->alignment = 16;
2234
2235 pMemoryRequirements->size = align64(buffer->size, pMemoryRequirements->alignment);
2236 }
2237
2238 void radv_GetBufferMemoryRequirements2KHR(
2239 VkDevice device,
2240 const VkBufferMemoryRequirementsInfo2KHR* pInfo,
2241 VkMemoryRequirements2KHR* pMemoryRequirements)
2242 {
2243 radv_GetBufferMemoryRequirements(device, pInfo->buffer,
2244 &pMemoryRequirements->memoryRequirements);
2245
2246 vk_foreach_struct(ext, pMemoryRequirements->pNext) {
2247 switch (ext->sType) {
2248 case VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR: {
2249 VkMemoryDedicatedRequirementsKHR *req =
2250 (VkMemoryDedicatedRequirementsKHR *) ext;
2251 req->requiresDedicatedAllocation = false;
2252 req->prefersDedicatedAllocation = req->requiresDedicatedAllocation;
2253 break;
2254 }
2255 default:
2256 break;
2257 }
2258 }
2259 }
2260
2261 void radv_GetImageMemoryRequirements(
2262 VkDevice device,
2263 VkImage _image,
2264 VkMemoryRequirements* pMemoryRequirements)
2265 {
2266 RADV_FROM_HANDLE(radv_image, image, _image);
2267
2268 pMemoryRequirements->memoryTypeBits = (1u << RADV_MEM_TYPE_COUNT) - 1;
2269
2270 pMemoryRequirements->size = image->size;
2271 pMemoryRequirements->alignment = image->alignment;
2272 }
2273
2274 void radv_GetImageMemoryRequirements2KHR(
2275 VkDevice device,
2276 const VkImageMemoryRequirementsInfo2KHR* pInfo,
2277 VkMemoryRequirements2KHR* pMemoryRequirements)
2278 {
2279 radv_GetImageMemoryRequirements(device, pInfo->image,
2280 &pMemoryRequirements->memoryRequirements);
2281
2282 RADV_FROM_HANDLE(radv_image, image, pInfo->image);
2283
2284 vk_foreach_struct(ext, pMemoryRequirements->pNext) {
2285 switch (ext->sType) {
2286 case VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR: {
2287 VkMemoryDedicatedRequirementsKHR *req =
2288 (VkMemoryDedicatedRequirementsKHR *) ext;
2289 req->requiresDedicatedAllocation = image->shareable;
2290 req->prefersDedicatedAllocation = req->requiresDedicatedAllocation;
2291 break;
2292 }
2293 default:
2294 break;
2295 }
2296 }
2297 }
2298
2299 void radv_GetImageSparseMemoryRequirements(
2300 VkDevice device,
2301 VkImage image,
2302 uint32_t* pSparseMemoryRequirementCount,
2303 VkSparseImageMemoryRequirements* pSparseMemoryRequirements)
2304 {
2305 stub();
2306 }
2307
2308 void radv_GetImageSparseMemoryRequirements2KHR(
2309 VkDevice device,
2310 const VkImageSparseMemoryRequirementsInfo2KHR* pInfo,
2311 uint32_t* pSparseMemoryRequirementCount,
2312 VkSparseImageMemoryRequirements2KHR* pSparseMemoryRequirements)
2313 {
2314 stub();
2315 }
2316
2317 void radv_GetDeviceMemoryCommitment(
2318 VkDevice device,
2319 VkDeviceMemory memory,
2320 VkDeviceSize* pCommittedMemoryInBytes)
2321 {
2322 *pCommittedMemoryInBytes = 0;
2323 }
2324
2325 VkResult radv_BindBufferMemory2KHR(VkDevice device,
2326 uint32_t bindInfoCount,
2327 const VkBindBufferMemoryInfoKHR *pBindInfos)
2328 {
2329 for (uint32_t i = 0; i < bindInfoCount; ++i) {
2330 RADV_FROM_HANDLE(radv_device_memory, mem, pBindInfos[i].memory);
2331 RADV_FROM_HANDLE(radv_buffer, buffer, pBindInfos[i].buffer);
2332
2333 if (mem) {
2334 buffer->bo = mem->bo;
2335 buffer->offset = pBindInfos[i].memoryOffset;
2336 } else {
2337 buffer->bo = NULL;
2338 }
2339 }
2340 return VK_SUCCESS;
2341 }
2342
2343 VkResult radv_BindBufferMemory(
2344 VkDevice device,
2345 VkBuffer buffer,
2346 VkDeviceMemory memory,
2347 VkDeviceSize memoryOffset)
2348 {
2349 const VkBindBufferMemoryInfoKHR info = {
2350 .sType = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR,
2351 .buffer = buffer,
2352 .memory = memory,
2353 .memoryOffset = memoryOffset
2354 };
2355
2356 return radv_BindBufferMemory2KHR(device, 1, &info);
2357 }
2358
2359 VkResult radv_BindImageMemory2KHR(VkDevice device,
2360 uint32_t bindInfoCount,
2361 const VkBindImageMemoryInfoKHR *pBindInfos)
2362 {
2363 for (uint32_t i = 0; i < bindInfoCount; ++i) {
2364 RADV_FROM_HANDLE(radv_device_memory, mem, pBindInfos[i].memory);
2365 RADV_FROM_HANDLE(radv_image, image, pBindInfos[i].image);
2366
2367 if (mem) {
2368 image->bo = mem->bo;
2369 image->offset = pBindInfos[i].memoryOffset;
2370 } else {
2371 image->bo = NULL;
2372 image->offset = 0;
2373 }
2374 }
2375 return VK_SUCCESS;
2376 }
2377
2378
2379 VkResult radv_BindImageMemory(
2380 VkDevice device,
2381 VkImage image,
2382 VkDeviceMemory memory,
2383 VkDeviceSize memoryOffset)
2384 {
2385 const VkBindImageMemoryInfoKHR info = {
2386 .sType = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR,
2387 .image = image,
2388 .memory = memory,
2389 .memoryOffset = memoryOffset
2390 };
2391
2392 return radv_BindImageMemory2KHR(device, 1, &info);
2393 }
2394
2395
2396 static void
2397 radv_sparse_buffer_bind_memory(struct radv_device *device,
2398 const VkSparseBufferMemoryBindInfo *bind)
2399 {
2400 RADV_FROM_HANDLE(radv_buffer, buffer, bind->buffer);
2401
2402 for (uint32_t i = 0; i < bind->bindCount; ++i) {
2403 struct radv_device_memory *mem = NULL;
2404
2405 if (bind->pBinds[i].memory != VK_NULL_HANDLE)
2406 mem = radv_device_memory_from_handle(bind->pBinds[i].memory);
2407
2408 device->ws->buffer_virtual_bind(buffer->bo,
2409 bind->pBinds[i].resourceOffset,
2410 bind->pBinds[i].size,
2411 mem ? mem->bo : NULL,
2412 bind->pBinds[i].memoryOffset);
2413 }
2414 }
2415
2416 static void
2417 radv_sparse_image_opaque_bind_memory(struct radv_device *device,
2418 const VkSparseImageOpaqueMemoryBindInfo *bind)
2419 {
2420 RADV_FROM_HANDLE(radv_image, image, bind->image);
2421
2422 for (uint32_t i = 0; i < bind->bindCount; ++i) {
2423 struct radv_device_memory *mem = NULL;
2424
2425 if (bind->pBinds[i].memory != VK_NULL_HANDLE)
2426 mem = radv_device_memory_from_handle(bind->pBinds[i].memory);
2427
2428 device->ws->buffer_virtual_bind(image->bo,
2429 bind->pBinds[i].resourceOffset,
2430 bind->pBinds[i].size,
2431 mem ? mem->bo : NULL,
2432 bind->pBinds[i].memoryOffset);
2433 }
2434 }
2435
2436 VkResult radv_QueueBindSparse(
2437 VkQueue _queue,
2438 uint32_t bindInfoCount,
2439 const VkBindSparseInfo* pBindInfo,
2440 VkFence _fence)
2441 {
2442 RADV_FROM_HANDLE(radv_fence, fence, _fence);
2443 RADV_FROM_HANDLE(radv_queue, queue, _queue);
2444 struct radeon_winsys_fence *base_fence = fence ? fence->fence : NULL;
2445 bool fence_emitted = false;
2446
2447 for (uint32_t i = 0; i < bindInfoCount; ++i) {
2448 struct radv_winsys_sem_info sem_info;
2449 for (uint32_t j = 0; j < pBindInfo[i].bufferBindCount; ++j) {
2450 radv_sparse_buffer_bind_memory(queue->device,
2451 pBindInfo[i].pBufferBinds + j);
2452 }
2453
2454 for (uint32_t j = 0; j < pBindInfo[i].imageOpaqueBindCount; ++j) {
2455 radv_sparse_image_opaque_bind_memory(queue->device,
2456 pBindInfo[i].pImageOpaqueBinds + j);
2457 }
2458
2459 VkResult result;
2460 result = radv_alloc_sem_info(&sem_info,
2461 pBindInfo[i].waitSemaphoreCount,
2462 pBindInfo[i].pWaitSemaphores,
2463 pBindInfo[i].signalSemaphoreCount,
2464 pBindInfo[i].pSignalSemaphores);
2465 if (result != VK_SUCCESS)
2466 return result;
2467
2468 if (pBindInfo[i].waitSemaphoreCount || pBindInfo[i].signalSemaphoreCount) {
2469 queue->device->ws->cs_submit(queue->hw_ctx, queue->queue_idx,
2470 &queue->device->empty_cs[queue->queue_family_index],
2471 1, NULL, NULL,
2472 &sem_info,
2473 false, base_fence);
2474 fence_emitted = true;
2475 if (fence)
2476 fence->submitted = true;
2477 }
2478
2479 radv_free_sem_info(&sem_info);
2480
2481 }
2482
2483 if (fence && !fence_emitted) {
2484 fence->signalled = true;
2485 }
2486
2487 return VK_SUCCESS;
2488 }
2489
2490 VkResult radv_CreateFence(
2491 VkDevice _device,
2492 const VkFenceCreateInfo* pCreateInfo,
2493 const VkAllocationCallbacks* pAllocator,
2494 VkFence* pFence)
2495 {
2496 RADV_FROM_HANDLE(radv_device, device, _device);
2497 struct radv_fence *fence = vk_alloc2(&device->alloc, pAllocator,
2498 sizeof(*fence), 8,
2499 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2500
2501 if (!fence)
2502 return VK_ERROR_OUT_OF_HOST_MEMORY;
2503
2504 memset(fence, 0, sizeof(*fence));
2505 fence->submitted = false;
2506 fence->signalled = !!(pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT);
2507 fence->fence = device->ws->create_fence();
2508 if (!fence->fence) {
2509 vk_free2(&device->alloc, pAllocator, fence);
2510 return VK_ERROR_OUT_OF_HOST_MEMORY;
2511 }
2512
2513 *pFence = radv_fence_to_handle(fence);
2514
2515 return VK_SUCCESS;
2516 }
2517
2518 void radv_DestroyFence(
2519 VkDevice _device,
2520 VkFence _fence,
2521 const VkAllocationCallbacks* pAllocator)
2522 {
2523 RADV_FROM_HANDLE(radv_device, device, _device);
2524 RADV_FROM_HANDLE(radv_fence, fence, _fence);
2525
2526 if (!fence)
2527 return;
2528 device->ws->destroy_fence(fence->fence);
2529 vk_free2(&device->alloc, pAllocator, fence);
2530 }
2531
2532 static uint64_t radv_get_absolute_timeout(uint64_t timeout)
2533 {
2534 uint64_t current_time;
2535 struct timespec tv;
2536
2537 clock_gettime(CLOCK_MONOTONIC, &tv);
2538 current_time = tv.tv_nsec + tv.tv_sec*1000000000ull;
2539
2540 timeout = MIN2(UINT64_MAX - current_time, timeout);
2541
2542 return current_time + timeout;
2543 }
2544
2545 VkResult radv_WaitForFences(
2546 VkDevice _device,
2547 uint32_t fenceCount,
2548 const VkFence* pFences,
2549 VkBool32 waitAll,
2550 uint64_t timeout)
2551 {
2552 RADV_FROM_HANDLE(radv_device, device, _device);
2553 timeout = radv_get_absolute_timeout(timeout);
2554
2555 if (!waitAll && fenceCount > 1) {
2556 fprintf(stderr, "radv: WaitForFences without waitAll not implemented yet\n");
2557 }
2558
2559 for (uint32_t i = 0; i < fenceCount; ++i) {
2560 RADV_FROM_HANDLE(radv_fence, fence, pFences[i]);
2561 bool expired = false;
2562
2563 if (fence->signalled)
2564 continue;
2565
2566 if (!fence->submitted)
2567 return VK_TIMEOUT;
2568
2569 expired = device->ws->fence_wait(device->ws, fence->fence, true, timeout);
2570 if (!expired)
2571 return VK_TIMEOUT;
2572
2573 fence->signalled = true;
2574 }
2575
2576 return VK_SUCCESS;
2577 }
2578
2579 VkResult radv_ResetFences(VkDevice device,
2580 uint32_t fenceCount,
2581 const VkFence *pFences)
2582 {
2583 for (unsigned i = 0; i < fenceCount; ++i) {
2584 RADV_FROM_HANDLE(radv_fence, fence, pFences[i]);
2585 fence->submitted = fence->signalled = false;
2586 }
2587
2588 return VK_SUCCESS;
2589 }
2590
2591 VkResult radv_GetFenceStatus(VkDevice _device, VkFence _fence)
2592 {
2593 RADV_FROM_HANDLE(radv_device, device, _device);
2594 RADV_FROM_HANDLE(radv_fence, fence, _fence);
2595
2596 if (fence->signalled)
2597 return VK_SUCCESS;
2598 if (!fence->submitted)
2599 return VK_NOT_READY;
2600
2601 if (!device->ws->fence_wait(device->ws, fence->fence, false, 0))
2602 return VK_NOT_READY;
2603
2604 return VK_SUCCESS;
2605 }
2606
2607
2608 // Queue semaphore functions
2609
2610 VkResult radv_CreateSemaphore(
2611 VkDevice _device,
2612 const VkSemaphoreCreateInfo* pCreateInfo,
2613 const VkAllocationCallbacks* pAllocator,
2614 VkSemaphore* pSemaphore)
2615 {
2616 RADV_FROM_HANDLE(radv_device, device, _device);
2617 const VkExportSemaphoreCreateInfoKHR *export =
2618 vk_find_struct_const(pCreateInfo->pNext, EXPORT_SEMAPHORE_CREATE_INFO_KHR);
2619 VkExternalSemaphoreHandleTypeFlagsKHR handleTypes =
2620 export ? export->handleTypes : 0;
2621
2622 struct radv_semaphore *sem = vk_alloc2(&device->alloc, pAllocator,
2623 sizeof(*sem), 8,
2624 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2625 if (!sem)
2626 return VK_ERROR_OUT_OF_HOST_MEMORY;
2627
2628 sem->temp_syncobj = 0;
2629 /* create a syncobject if we are going to export this semaphore */
2630 if (handleTypes) {
2631 assert (device->physical_device->rad_info.has_syncobj);
2632 assert (handleTypes == VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR);
2633 int ret = device->ws->create_syncobj(device->ws, &sem->syncobj);
2634 if (ret) {
2635 vk_free2(&device->alloc, pAllocator, sem);
2636 return VK_ERROR_OUT_OF_HOST_MEMORY;
2637 }
2638 sem->sem = NULL;
2639 } else {
2640 sem->sem = device->ws->create_sem(device->ws);
2641 if (!sem->sem) {
2642 vk_free2(&device->alloc, pAllocator, sem);
2643 return VK_ERROR_OUT_OF_HOST_MEMORY;
2644 }
2645 sem->syncobj = 0;
2646 }
2647
2648 *pSemaphore = radv_semaphore_to_handle(sem);
2649 return VK_SUCCESS;
2650 }
2651
2652 void radv_DestroySemaphore(
2653 VkDevice _device,
2654 VkSemaphore _semaphore,
2655 const VkAllocationCallbacks* pAllocator)
2656 {
2657 RADV_FROM_HANDLE(radv_device, device, _device);
2658 RADV_FROM_HANDLE(radv_semaphore, sem, _semaphore);
2659 if (!_semaphore)
2660 return;
2661
2662 if (sem->syncobj)
2663 device->ws->destroy_syncobj(device->ws, sem->syncobj);
2664 else
2665 device->ws->destroy_sem(sem->sem);
2666 vk_free2(&device->alloc, pAllocator, sem);
2667 }
2668
2669 VkResult radv_CreateEvent(
2670 VkDevice _device,
2671 const VkEventCreateInfo* pCreateInfo,
2672 const VkAllocationCallbacks* pAllocator,
2673 VkEvent* pEvent)
2674 {
2675 RADV_FROM_HANDLE(radv_device, device, _device);
2676 struct radv_event *event = vk_alloc2(&device->alloc, pAllocator,
2677 sizeof(*event), 8,
2678 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2679
2680 if (!event)
2681 return VK_ERROR_OUT_OF_HOST_MEMORY;
2682
2683 event->bo = device->ws->buffer_create(device->ws, 8, 8,
2684 RADEON_DOMAIN_GTT,
2685 RADEON_FLAG_VA_UNCACHED | RADEON_FLAG_CPU_ACCESS);
2686 if (!event->bo) {
2687 vk_free2(&device->alloc, pAllocator, event);
2688 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
2689 }
2690
2691 event->map = (uint64_t*)device->ws->buffer_map(event->bo);
2692
2693 *pEvent = radv_event_to_handle(event);
2694
2695 return VK_SUCCESS;
2696 }
2697
2698 void radv_DestroyEvent(
2699 VkDevice _device,
2700 VkEvent _event,
2701 const VkAllocationCallbacks* pAllocator)
2702 {
2703 RADV_FROM_HANDLE(radv_device, device, _device);
2704 RADV_FROM_HANDLE(radv_event, event, _event);
2705
2706 if (!event)
2707 return;
2708 device->ws->buffer_destroy(event->bo);
2709 vk_free2(&device->alloc, pAllocator, event);
2710 }
2711
2712 VkResult radv_GetEventStatus(
2713 VkDevice _device,
2714 VkEvent _event)
2715 {
2716 RADV_FROM_HANDLE(radv_event, event, _event);
2717
2718 if (*event->map == 1)
2719 return VK_EVENT_SET;
2720 return VK_EVENT_RESET;
2721 }
2722
2723 VkResult radv_SetEvent(
2724 VkDevice _device,
2725 VkEvent _event)
2726 {
2727 RADV_FROM_HANDLE(radv_event, event, _event);
2728 *event->map = 1;
2729
2730 return VK_SUCCESS;
2731 }
2732
2733 VkResult radv_ResetEvent(
2734 VkDevice _device,
2735 VkEvent _event)
2736 {
2737 RADV_FROM_HANDLE(radv_event, event, _event);
2738 *event->map = 0;
2739
2740 return VK_SUCCESS;
2741 }
2742
2743 VkResult radv_CreateBuffer(
2744 VkDevice _device,
2745 const VkBufferCreateInfo* pCreateInfo,
2746 const VkAllocationCallbacks* pAllocator,
2747 VkBuffer* pBuffer)
2748 {
2749 RADV_FROM_HANDLE(radv_device, device, _device);
2750 struct radv_buffer *buffer;
2751
2752 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO);
2753
2754 buffer = vk_alloc2(&device->alloc, pAllocator, sizeof(*buffer), 8,
2755 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2756 if (buffer == NULL)
2757 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
2758
2759 buffer->size = pCreateInfo->size;
2760 buffer->usage = pCreateInfo->usage;
2761 buffer->bo = NULL;
2762 buffer->offset = 0;
2763 buffer->flags = pCreateInfo->flags;
2764
2765 if (pCreateInfo->flags & VK_BUFFER_CREATE_SPARSE_BINDING_BIT) {
2766 buffer->bo = device->ws->buffer_create(device->ws,
2767 align64(buffer->size, 4096),
2768 4096, 0, RADEON_FLAG_VIRTUAL);
2769 if (!buffer->bo) {
2770 vk_free2(&device->alloc, pAllocator, buffer);
2771 return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
2772 }
2773 }
2774
2775 *pBuffer = radv_buffer_to_handle(buffer);
2776
2777 return VK_SUCCESS;
2778 }
2779
2780 void radv_DestroyBuffer(
2781 VkDevice _device,
2782 VkBuffer _buffer,
2783 const VkAllocationCallbacks* pAllocator)
2784 {
2785 RADV_FROM_HANDLE(radv_device, device, _device);
2786 RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
2787
2788 if (!buffer)
2789 return;
2790
2791 if (buffer->flags & VK_BUFFER_CREATE_SPARSE_BINDING_BIT)
2792 device->ws->buffer_destroy(buffer->bo);
2793
2794 vk_free2(&device->alloc, pAllocator, buffer);
2795 }
2796
2797 static inline unsigned
2798 si_tile_mode_index(const struct radv_image *image, unsigned level, bool stencil)
2799 {
2800 if (stencil)
2801 return image->surface.u.legacy.stencil_tiling_index[level];
2802 else
2803 return image->surface.u.legacy.tiling_index[level];
2804 }
2805
2806 static uint32_t radv_surface_layer_count(struct radv_image_view *iview)
2807 {
2808 return iview->type == VK_IMAGE_VIEW_TYPE_3D ? iview->extent.depth : iview->layer_count;
2809 }
2810
2811 static void
2812 radv_initialise_color_surface(struct radv_device *device,
2813 struct radv_color_buffer_info *cb,
2814 struct radv_image_view *iview)
2815 {
2816 const struct vk_format_description *desc;
2817 unsigned ntype, format, swap, endian;
2818 unsigned blend_clamp = 0, blend_bypass = 0;
2819 uint64_t va;
2820 const struct radeon_surf *surf = &iview->image->surface;
2821
2822 desc = vk_format_description(iview->vk_format);
2823
2824 memset(cb, 0, sizeof(*cb));
2825
2826 /* Intensity is implemented as Red, so treat it that way. */
2827 cb->cb_color_attrib = S_028C74_FORCE_DST_ALPHA_1(desc->swizzle[3] == VK_SWIZZLE_1);
2828
2829 va = radv_buffer_get_va(iview->bo) + iview->image->offset;
2830
2831 cb->cb_color_base = va >> 8;
2832
2833 if (device->physical_device->rad_info.chip_class >= GFX9) {
2834 struct gfx9_surf_meta_flags meta;
2835 if (iview->image->dcc_offset)
2836 meta = iview->image->surface.u.gfx9.dcc;
2837 else
2838 meta = iview->image->surface.u.gfx9.cmask;
2839
2840 cb->cb_color_attrib |= S_028C74_COLOR_SW_MODE(iview->image->surface.u.gfx9.surf.swizzle_mode) |
2841 S_028C74_FMASK_SW_MODE(iview->image->surface.u.gfx9.fmask.swizzle_mode) |
2842 S_028C74_RB_ALIGNED(meta.rb_aligned) |
2843 S_028C74_PIPE_ALIGNED(meta.pipe_aligned);
2844
2845 cb->cb_color_base += iview->image->surface.u.gfx9.surf_offset >> 8;
2846 cb->cb_color_base |= iview->image->surface.tile_swizzle;
2847 } else {
2848 const struct legacy_surf_level *level_info = &surf->u.legacy.level[iview->base_mip];
2849 unsigned pitch_tile_max, slice_tile_max, tile_mode_index;
2850
2851 cb->cb_color_base += level_info->offset >> 8;
2852 if (level_info->mode == RADEON_SURF_MODE_2D)
2853 cb->cb_color_base |= iview->image->surface.tile_swizzle;
2854
2855 pitch_tile_max = level_info->nblk_x / 8 - 1;
2856 slice_tile_max = (level_info->nblk_x * level_info->nblk_y) / 64 - 1;
2857 tile_mode_index = si_tile_mode_index(iview->image, iview->base_mip, false);
2858
2859 cb->cb_color_pitch = S_028C64_TILE_MAX(pitch_tile_max);
2860 cb->cb_color_slice = S_028C68_TILE_MAX(slice_tile_max);
2861 cb->cb_color_cmask_slice = iview->image->cmask.slice_tile_max;
2862
2863 cb->cb_color_attrib |= S_028C74_TILE_MODE_INDEX(tile_mode_index);
2864 cb->micro_tile_mode = iview->image->surface.micro_tile_mode;
2865
2866 if (iview->image->fmask.size) {
2867 if (device->physical_device->rad_info.chip_class >= CIK)
2868 cb->cb_color_pitch |= S_028C64_FMASK_TILE_MAX(iview->image->fmask.pitch_in_pixels / 8 - 1);
2869 cb->cb_color_attrib |= S_028C74_FMASK_TILE_MODE_INDEX(iview->image->fmask.tile_mode_index);
2870 cb->cb_color_fmask_slice = S_028C88_TILE_MAX(iview->image->fmask.slice_tile_max);
2871 } else {
2872 /* This must be set for fast clear to work without FMASK. */
2873 if (device->physical_device->rad_info.chip_class >= CIK)
2874 cb->cb_color_pitch |= S_028C64_FMASK_TILE_MAX(pitch_tile_max);
2875 cb->cb_color_attrib |= S_028C74_FMASK_TILE_MODE_INDEX(tile_mode_index);
2876 cb->cb_color_fmask_slice = S_028C88_TILE_MAX(slice_tile_max);
2877 }
2878 }
2879
2880 /* CMASK variables */
2881 va = radv_buffer_get_va(iview->bo) + iview->image->offset;
2882 va += iview->image->cmask.offset;
2883 cb->cb_color_cmask = va >> 8;
2884
2885 va = radv_buffer_get_va(iview->bo) + iview->image->offset;
2886 va += iview->image->dcc_offset;
2887 cb->cb_dcc_base = va >> 8;
2888 cb->cb_dcc_base |= iview->image->surface.tile_swizzle;
2889
2890 uint32_t max_slice = radv_surface_layer_count(iview);
2891 cb->cb_color_view = S_028C6C_SLICE_START(iview->base_layer) |
2892 S_028C6C_SLICE_MAX(iview->base_layer + max_slice - 1);
2893
2894 if (iview->image->info.samples > 1) {
2895 unsigned log_samples = util_logbase2(iview->image->info.samples);
2896
2897 cb->cb_color_attrib |= S_028C74_NUM_SAMPLES(log_samples) |
2898 S_028C74_NUM_FRAGMENTS(log_samples);
2899 }
2900
2901 if (iview->image->fmask.size) {
2902 va = radv_buffer_get_va(iview->bo) + iview->image->offset + iview->image->fmask.offset;
2903 cb->cb_color_fmask = va >> 8;
2904 cb->cb_color_fmask |= iview->image->fmask.tile_swizzle;
2905 } else {
2906 cb->cb_color_fmask = cb->cb_color_base;
2907 }
2908
2909 ntype = radv_translate_color_numformat(iview->vk_format,
2910 desc,
2911 vk_format_get_first_non_void_channel(iview->vk_format));
2912 format = radv_translate_colorformat(iview->vk_format);
2913 if (format == V_028C70_COLOR_INVALID || ntype == ~0u)
2914 radv_finishme("Illegal color\n");
2915 swap = radv_translate_colorswap(iview->vk_format, FALSE);
2916 endian = radv_colorformat_endian_swap(format);
2917
2918 /* blend clamp should be set for all NORM/SRGB types */
2919 if (ntype == V_028C70_NUMBER_UNORM ||
2920 ntype == V_028C70_NUMBER_SNORM ||
2921 ntype == V_028C70_NUMBER_SRGB)
2922 blend_clamp = 1;
2923
2924 /* set blend bypass according to docs if SINT/UINT or
2925 8/24 COLOR variants */
2926 if (ntype == V_028C70_NUMBER_UINT || ntype == V_028C70_NUMBER_SINT ||
2927 format == V_028C70_COLOR_8_24 || format == V_028C70_COLOR_24_8 ||
2928 format == V_028C70_COLOR_X24_8_32_FLOAT) {
2929 blend_clamp = 0;
2930 blend_bypass = 1;
2931 }
2932 #if 0
2933 if ((ntype == V_028C70_NUMBER_UINT || ntype == V_028C70_NUMBER_SINT) &&
2934 (format == V_028C70_COLOR_8 ||
2935 format == V_028C70_COLOR_8_8 ||
2936 format == V_028C70_COLOR_8_8_8_8))
2937 ->color_is_int8 = true;
2938 #endif
2939 cb->cb_color_info = S_028C70_FORMAT(format) |
2940 S_028C70_COMP_SWAP(swap) |
2941 S_028C70_BLEND_CLAMP(blend_clamp) |
2942 S_028C70_BLEND_BYPASS(blend_bypass) |
2943 S_028C70_SIMPLE_FLOAT(1) |
2944 S_028C70_ROUND_MODE(ntype != V_028C70_NUMBER_UNORM &&
2945 ntype != V_028C70_NUMBER_SNORM &&
2946 ntype != V_028C70_NUMBER_SRGB &&
2947 format != V_028C70_COLOR_8_24 &&
2948 format != V_028C70_COLOR_24_8) |
2949 S_028C70_NUMBER_TYPE(ntype) |
2950 S_028C70_ENDIAN(endian);
2951 if ((iview->image->info.samples > 1) && iview->image->fmask.size) {
2952 cb->cb_color_info |= S_028C70_COMPRESSION(1);
2953 if (device->physical_device->rad_info.chip_class == SI) {
2954 unsigned fmask_bankh = util_logbase2(iview->image->fmask.bank_height);
2955 cb->cb_color_attrib |= S_028C74_FMASK_BANK_HEIGHT(fmask_bankh);
2956 }
2957 }
2958
2959 if (iview->image->cmask.size &&
2960 !(device->instance->debug_flags & RADV_DEBUG_NO_FAST_CLEARS))
2961 cb->cb_color_info |= S_028C70_FAST_CLEAR(1);
2962
2963 if (radv_vi_dcc_enabled(iview->image, iview->base_mip))
2964 cb->cb_color_info |= S_028C70_DCC_ENABLE(1);
2965
2966 if (device->physical_device->rad_info.chip_class >= VI) {
2967 unsigned max_uncompressed_block_size = 2;
2968 if (iview->image->info.samples > 1) {
2969 if (iview->image->surface.bpe == 1)
2970 max_uncompressed_block_size = 0;
2971 else if (iview->image->surface.bpe == 2)
2972 max_uncompressed_block_size = 1;
2973 }
2974
2975 cb->cb_dcc_control = S_028C78_MAX_UNCOMPRESSED_BLOCK_SIZE(max_uncompressed_block_size) |
2976 S_028C78_INDEPENDENT_64B_BLOCKS(1);
2977 }
2978
2979 /* This must be set for fast clear to work without FMASK. */
2980 if (!iview->image->fmask.size &&
2981 device->physical_device->rad_info.chip_class == SI) {
2982 unsigned bankh = util_logbase2(iview->image->surface.u.legacy.bankh);
2983 cb->cb_color_attrib |= S_028C74_FMASK_BANK_HEIGHT(bankh);
2984 }
2985
2986 if (device->physical_device->rad_info.chip_class >= GFX9) {
2987 unsigned mip0_depth = iview->image->type == VK_IMAGE_TYPE_3D ?
2988 (iview->extent.depth - 1) : (iview->image->info.array_size - 1);
2989
2990 cb->cb_color_view |= S_028C6C_MIP_LEVEL(iview->base_mip);
2991 cb->cb_color_attrib |= S_028C74_MIP0_DEPTH(mip0_depth) |
2992 S_028C74_RESOURCE_TYPE(iview->image->surface.u.gfx9.resource_type);
2993 cb->cb_color_attrib2 = S_028C68_MIP0_WIDTH(iview->extent.width - 1) |
2994 S_028C68_MIP0_HEIGHT(iview->extent.height - 1) |
2995 S_028C68_MAX_MIP(iview->image->info.levels - 1);
2996
2997 cb->gfx9_epitch = S_0287A0_EPITCH(iview->image->surface.u.gfx9.surf.epitch);
2998
2999 }
3000 }
3001
3002 static void
3003 radv_initialise_ds_surface(struct radv_device *device,
3004 struct radv_ds_buffer_info *ds,
3005 struct radv_image_view *iview)
3006 {
3007 unsigned level = iview->base_mip;
3008 unsigned format, stencil_format;
3009 uint64_t va, s_offs, z_offs;
3010 bool stencil_only = false;
3011 memset(ds, 0, sizeof(*ds));
3012 switch (iview->image->vk_format) {
3013 case VK_FORMAT_D24_UNORM_S8_UINT:
3014 case VK_FORMAT_X8_D24_UNORM_PACK32:
3015 ds->pa_su_poly_offset_db_fmt_cntl = S_028B78_POLY_OFFSET_NEG_NUM_DB_BITS(-24);
3016 ds->offset_scale = 2.0f;
3017 break;
3018 case VK_FORMAT_D16_UNORM:
3019 case VK_FORMAT_D16_UNORM_S8_UINT:
3020 ds->pa_su_poly_offset_db_fmt_cntl = S_028B78_POLY_OFFSET_NEG_NUM_DB_BITS(-16);
3021 ds->offset_scale = 4.0f;
3022 break;
3023 case VK_FORMAT_D32_SFLOAT:
3024 case VK_FORMAT_D32_SFLOAT_S8_UINT:
3025 ds->pa_su_poly_offset_db_fmt_cntl = S_028B78_POLY_OFFSET_NEG_NUM_DB_BITS(-23) |
3026 S_028B78_POLY_OFFSET_DB_IS_FLOAT_FMT(1);
3027 ds->offset_scale = 1.0f;
3028 break;
3029 case VK_FORMAT_S8_UINT:
3030 stencil_only = true;
3031 break;
3032 default:
3033 break;
3034 }
3035
3036 format = radv_translate_dbformat(iview->image->vk_format);
3037 stencil_format = iview->image->surface.has_stencil ?
3038 V_028044_STENCIL_8 : V_028044_STENCIL_INVALID;
3039
3040 uint32_t max_slice = radv_surface_layer_count(iview);
3041 ds->db_depth_view = S_028008_SLICE_START(iview->base_layer) |
3042 S_028008_SLICE_MAX(iview->base_layer + max_slice - 1);
3043
3044 ds->db_htile_data_base = 0;
3045 ds->db_htile_surface = 0;
3046
3047 va = radv_buffer_get_va(iview->bo) + iview->image->offset;
3048 s_offs = z_offs = va;
3049
3050 if (device->physical_device->rad_info.chip_class >= GFX9) {
3051 assert(iview->image->surface.u.gfx9.surf_offset == 0);
3052 s_offs += iview->image->surface.u.gfx9.stencil_offset;
3053
3054 ds->db_z_info = S_028038_FORMAT(format) |
3055 S_028038_NUM_SAMPLES(util_logbase2(iview->image->info.samples)) |
3056 S_028038_SW_MODE(iview->image->surface.u.gfx9.surf.swizzle_mode) |
3057 S_028038_MAXMIP(iview->image->info.levels - 1);
3058 ds->db_stencil_info = S_02803C_FORMAT(stencil_format) |
3059 S_02803C_SW_MODE(iview->image->surface.u.gfx9.stencil.swizzle_mode);
3060
3061 ds->db_z_info2 = S_028068_EPITCH(iview->image->surface.u.gfx9.surf.epitch);
3062 ds->db_stencil_info2 = S_02806C_EPITCH(iview->image->surface.u.gfx9.stencil.epitch);
3063 ds->db_depth_view |= S_028008_MIPID(level);
3064
3065 ds->db_depth_size = S_02801C_X_MAX(iview->image->info.width - 1) |
3066 S_02801C_Y_MAX(iview->image->info.height - 1);
3067
3068 if (radv_htile_enabled(iview->image, level)) {
3069 ds->db_z_info |= S_028038_TILE_SURFACE_ENABLE(1);
3070
3071 if (iview->image->tc_compatible_htile) {
3072 unsigned max_zplanes = 4;
3073
3074 if (iview->vk_format == VK_FORMAT_D16_UNORM &&
3075 iview->image->info.samples > 1)
3076 max_zplanes = 2;
3077
3078 ds->db_z_info |= S_028038_DECOMPRESS_ON_N_ZPLANES(max_zplanes + 1) |
3079 S_028038_ITERATE_FLUSH(1);
3080 ds->db_stencil_info |= S_02803C_ITERATE_FLUSH(1);
3081 }
3082
3083 if (!iview->image->surface.has_stencil)
3084 /* Use all of the htile_buffer for depth if there's no stencil. */
3085 ds->db_stencil_info |= S_02803C_TILE_STENCIL_DISABLE(1);
3086 va = radv_buffer_get_va(iview->bo) + iview->image->offset +
3087 iview->image->htile_offset;
3088 ds->db_htile_data_base = va >> 8;
3089 ds->db_htile_surface = S_028ABC_FULL_CACHE(1) |
3090 S_028ABC_PIPE_ALIGNED(iview->image->surface.u.gfx9.htile.pipe_aligned) |
3091 S_028ABC_RB_ALIGNED(iview->image->surface.u.gfx9.htile.rb_aligned);
3092 }
3093 } else {
3094 const struct legacy_surf_level *level_info = &iview->image->surface.u.legacy.level[level];
3095
3096 if (stencil_only)
3097 level_info = &iview->image->surface.u.legacy.stencil_level[level];
3098
3099 z_offs += iview->image->surface.u.legacy.level[level].offset;
3100 s_offs += iview->image->surface.u.legacy.stencil_level[level].offset;
3101
3102 ds->db_depth_info = S_02803C_ADDR5_SWIZZLE_MASK(!iview->image->tc_compatible_htile);
3103 ds->db_z_info = S_028040_FORMAT(format) | S_028040_ZRANGE_PRECISION(1);
3104 ds->db_stencil_info = S_028044_FORMAT(stencil_format);
3105
3106 if (iview->image->info.samples > 1)
3107 ds->db_z_info |= S_028040_NUM_SAMPLES(util_logbase2(iview->image->info.samples));
3108
3109 if (device->physical_device->rad_info.chip_class >= CIK) {
3110 struct radeon_info *info = &device->physical_device->rad_info;
3111 unsigned tiling_index = iview->image->surface.u.legacy.tiling_index[level];
3112 unsigned stencil_index = iview->image->surface.u.legacy.stencil_tiling_index[level];
3113 unsigned macro_index = iview->image->surface.u.legacy.macro_tile_index;
3114 unsigned tile_mode = info->si_tile_mode_array[tiling_index];
3115 unsigned stencil_tile_mode = info->si_tile_mode_array[stencil_index];
3116 unsigned macro_mode = info->cik_macrotile_mode_array[macro_index];
3117
3118 if (stencil_only)
3119 tile_mode = stencil_tile_mode;
3120
3121 ds->db_depth_info |=
3122 S_02803C_ARRAY_MODE(G_009910_ARRAY_MODE(tile_mode)) |
3123 S_02803C_PIPE_CONFIG(G_009910_PIPE_CONFIG(tile_mode)) |
3124 S_02803C_BANK_WIDTH(G_009990_BANK_WIDTH(macro_mode)) |
3125 S_02803C_BANK_HEIGHT(G_009990_BANK_HEIGHT(macro_mode)) |
3126 S_02803C_MACRO_TILE_ASPECT(G_009990_MACRO_TILE_ASPECT(macro_mode)) |
3127 S_02803C_NUM_BANKS(G_009990_NUM_BANKS(macro_mode));
3128 ds->db_z_info |= S_028040_TILE_SPLIT(G_009910_TILE_SPLIT(tile_mode));
3129 ds->db_stencil_info |= S_028044_TILE_SPLIT(G_009910_TILE_SPLIT(stencil_tile_mode));
3130 } else {
3131 unsigned tile_mode_index = si_tile_mode_index(iview->image, level, false);
3132 ds->db_z_info |= S_028040_TILE_MODE_INDEX(tile_mode_index);
3133 tile_mode_index = si_tile_mode_index(iview->image, level, true);
3134 ds->db_stencil_info |= S_028044_TILE_MODE_INDEX(tile_mode_index);
3135 if (stencil_only)
3136 ds->db_z_info |= S_028040_TILE_MODE_INDEX(tile_mode_index);
3137 }
3138
3139 ds->db_depth_size = S_028058_PITCH_TILE_MAX((level_info->nblk_x / 8) - 1) |
3140 S_028058_HEIGHT_TILE_MAX((level_info->nblk_y / 8) - 1);
3141 ds->db_depth_slice = S_02805C_SLICE_TILE_MAX((level_info->nblk_x * level_info->nblk_y) / 64 - 1);
3142
3143 if (radv_htile_enabled(iview->image, level)) {
3144 ds->db_z_info |= S_028040_TILE_SURFACE_ENABLE(1);
3145
3146 if (!iview->image->surface.has_stencil &&
3147 !iview->image->tc_compatible_htile)
3148 /* Use all of the htile_buffer for depth if there's no stencil. */
3149 ds->db_stencil_info |= S_028044_TILE_STENCIL_DISABLE(1);
3150
3151 va = radv_buffer_get_va(iview->bo) + iview->image->offset +
3152 iview->image->htile_offset;
3153 ds->db_htile_data_base = va >> 8;
3154 ds->db_htile_surface = S_028ABC_FULL_CACHE(1);
3155
3156 if (iview->image->tc_compatible_htile) {
3157 ds->db_htile_surface |= S_028ABC_TC_COMPATIBLE(1);
3158
3159 if (iview->image->info.samples <= 1)
3160 ds->db_z_info |= S_028040_DECOMPRESS_ON_N_ZPLANES(5);
3161 else if (iview->image->info.samples <= 4)
3162 ds->db_z_info |= S_028040_DECOMPRESS_ON_N_ZPLANES(3);
3163 else
3164 ds->db_z_info|= S_028040_DECOMPRESS_ON_N_ZPLANES(2);
3165 }
3166 }
3167 }
3168
3169 ds->db_z_read_base = ds->db_z_write_base = z_offs >> 8;
3170 ds->db_stencil_read_base = ds->db_stencil_write_base = s_offs >> 8;
3171 }
3172
3173 VkResult radv_CreateFramebuffer(
3174 VkDevice _device,
3175 const VkFramebufferCreateInfo* pCreateInfo,
3176 const VkAllocationCallbacks* pAllocator,
3177 VkFramebuffer* pFramebuffer)
3178 {
3179 RADV_FROM_HANDLE(radv_device, device, _device);
3180 struct radv_framebuffer *framebuffer;
3181
3182 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO);
3183
3184 size_t size = sizeof(*framebuffer) +
3185 sizeof(struct radv_attachment_info) * pCreateInfo->attachmentCount;
3186 framebuffer = vk_alloc2(&device->alloc, pAllocator, size, 8,
3187 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
3188 if (framebuffer == NULL)
3189 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
3190
3191 framebuffer->attachment_count = pCreateInfo->attachmentCount;
3192 framebuffer->width = pCreateInfo->width;
3193 framebuffer->height = pCreateInfo->height;
3194 framebuffer->layers = pCreateInfo->layers;
3195 for (uint32_t i = 0; i < pCreateInfo->attachmentCount; i++) {
3196 VkImageView _iview = pCreateInfo->pAttachments[i];
3197 struct radv_image_view *iview = radv_image_view_from_handle(_iview);
3198 framebuffer->attachments[i].attachment = iview;
3199 if (iview->aspect_mask & VK_IMAGE_ASPECT_COLOR_BIT) {
3200 radv_initialise_color_surface(device, &framebuffer->attachments[i].cb, iview);
3201 } else if (iview->aspect_mask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) {
3202 radv_initialise_ds_surface(device, &framebuffer->attachments[i].ds, iview);
3203 }
3204 framebuffer->width = MIN2(framebuffer->width, iview->extent.width);
3205 framebuffer->height = MIN2(framebuffer->height, iview->extent.height);
3206 framebuffer->layers = MIN2(framebuffer->layers, radv_surface_layer_count(iview));
3207 }
3208
3209 *pFramebuffer = radv_framebuffer_to_handle(framebuffer);
3210 return VK_SUCCESS;
3211 }
3212
3213 void radv_DestroyFramebuffer(
3214 VkDevice _device,
3215 VkFramebuffer _fb,
3216 const VkAllocationCallbacks* pAllocator)
3217 {
3218 RADV_FROM_HANDLE(radv_device, device, _device);
3219 RADV_FROM_HANDLE(radv_framebuffer, fb, _fb);
3220
3221 if (!fb)
3222 return;
3223 vk_free2(&device->alloc, pAllocator, fb);
3224 }
3225
3226 static unsigned radv_tex_wrap(VkSamplerAddressMode address_mode)
3227 {
3228 switch (address_mode) {
3229 case VK_SAMPLER_ADDRESS_MODE_REPEAT:
3230 return V_008F30_SQ_TEX_WRAP;
3231 case VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT:
3232 return V_008F30_SQ_TEX_MIRROR;
3233 case VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE:
3234 return V_008F30_SQ_TEX_CLAMP_LAST_TEXEL;
3235 case VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER:
3236 return V_008F30_SQ_TEX_CLAMP_BORDER;
3237 case VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE:
3238 return V_008F30_SQ_TEX_MIRROR_ONCE_LAST_TEXEL;
3239 default:
3240 unreachable("illegal tex wrap mode");
3241 break;
3242 }
3243 }
3244
3245 static unsigned
3246 radv_tex_compare(VkCompareOp op)
3247 {
3248 switch (op) {
3249 case VK_COMPARE_OP_NEVER:
3250 return V_008F30_SQ_TEX_DEPTH_COMPARE_NEVER;
3251 case VK_COMPARE_OP_LESS:
3252 return V_008F30_SQ_TEX_DEPTH_COMPARE_LESS;
3253 case VK_COMPARE_OP_EQUAL:
3254 return V_008F30_SQ_TEX_DEPTH_COMPARE_EQUAL;
3255 case VK_COMPARE_OP_LESS_OR_EQUAL:
3256 return V_008F30_SQ_TEX_DEPTH_COMPARE_LESSEQUAL;
3257 case VK_COMPARE_OP_GREATER:
3258 return V_008F30_SQ_TEX_DEPTH_COMPARE_GREATER;
3259 case VK_COMPARE_OP_NOT_EQUAL:
3260 return V_008F30_SQ_TEX_DEPTH_COMPARE_NOTEQUAL;
3261 case VK_COMPARE_OP_GREATER_OR_EQUAL:
3262 return V_008F30_SQ_TEX_DEPTH_COMPARE_GREATEREQUAL;
3263 case VK_COMPARE_OP_ALWAYS:
3264 return V_008F30_SQ_TEX_DEPTH_COMPARE_ALWAYS;
3265 default:
3266 unreachable("illegal compare mode");
3267 break;
3268 }
3269 }
3270
3271 static unsigned
3272 radv_tex_filter(VkFilter filter, unsigned max_ansio)
3273 {
3274 switch (filter) {
3275 case VK_FILTER_NEAREST:
3276 return (max_ansio > 1 ? V_008F38_SQ_TEX_XY_FILTER_ANISO_POINT :
3277 V_008F38_SQ_TEX_XY_FILTER_POINT);
3278 case VK_FILTER_LINEAR:
3279 return (max_ansio > 1 ? V_008F38_SQ_TEX_XY_FILTER_ANISO_BILINEAR :
3280 V_008F38_SQ_TEX_XY_FILTER_BILINEAR);
3281 case VK_FILTER_CUBIC_IMG:
3282 default:
3283 fprintf(stderr, "illegal texture filter");
3284 return 0;
3285 }
3286 }
3287
3288 static unsigned
3289 radv_tex_mipfilter(VkSamplerMipmapMode mode)
3290 {
3291 switch (mode) {
3292 case VK_SAMPLER_MIPMAP_MODE_NEAREST:
3293 return V_008F38_SQ_TEX_Z_FILTER_POINT;
3294 case VK_SAMPLER_MIPMAP_MODE_LINEAR:
3295 return V_008F38_SQ_TEX_Z_FILTER_LINEAR;
3296 default:
3297 return V_008F38_SQ_TEX_Z_FILTER_NONE;
3298 }
3299 }
3300
3301 static unsigned
3302 radv_tex_bordercolor(VkBorderColor bcolor)
3303 {
3304 switch (bcolor) {
3305 case VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK:
3306 case VK_BORDER_COLOR_INT_TRANSPARENT_BLACK:
3307 return V_008F3C_SQ_TEX_BORDER_COLOR_TRANS_BLACK;
3308 case VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK:
3309 case VK_BORDER_COLOR_INT_OPAQUE_BLACK:
3310 return V_008F3C_SQ_TEX_BORDER_COLOR_OPAQUE_BLACK;
3311 case VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE:
3312 case VK_BORDER_COLOR_INT_OPAQUE_WHITE:
3313 return V_008F3C_SQ_TEX_BORDER_COLOR_OPAQUE_WHITE;
3314 default:
3315 break;
3316 }
3317 return 0;
3318 }
3319
3320 static unsigned
3321 radv_tex_aniso_filter(unsigned filter)
3322 {
3323 if (filter < 2)
3324 return 0;
3325 if (filter < 4)
3326 return 1;
3327 if (filter < 8)
3328 return 2;
3329 if (filter < 16)
3330 return 3;
3331 return 4;
3332 }
3333
3334 static void
3335 radv_init_sampler(struct radv_device *device,
3336 struct radv_sampler *sampler,
3337 const VkSamplerCreateInfo *pCreateInfo)
3338 {
3339 uint32_t max_aniso = pCreateInfo->anisotropyEnable && pCreateInfo->maxAnisotropy > 1.0 ?
3340 (uint32_t) pCreateInfo->maxAnisotropy : 0;
3341 uint32_t max_aniso_ratio = radv_tex_aniso_filter(max_aniso);
3342 bool is_vi = (device->physical_device->rad_info.chip_class >= VI);
3343
3344 sampler->state[0] = (S_008F30_CLAMP_X(radv_tex_wrap(pCreateInfo->addressModeU)) |
3345 S_008F30_CLAMP_Y(radv_tex_wrap(pCreateInfo->addressModeV)) |
3346 S_008F30_CLAMP_Z(radv_tex_wrap(pCreateInfo->addressModeW)) |
3347 S_008F30_MAX_ANISO_RATIO(max_aniso_ratio) |
3348 S_008F30_DEPTH_COMPARE_FUNC(radv_tex_compare(pCreateInfo->compareOp)) |
3349 S_008F30_FORCE_UNNORMALIZED(pCreateInfo->unnormalizedCoordinates ? 1 : 0) |
3350 S_008F30_ANISO_THRESHOLD(max_aniso_ratio >> 1) |
3351 S_008F30_ANISO_BIAS(max_aniso_ratio) |
3352 S_008F30_DISABLE_CUBE_WRAP(0) |
3353 S_008F30_COMPAT_MODE(is_vi));
3354 sampler->state[1] = (S_008F34_MIN_LOD(S_FIXED(CLAMP(pCreateInfo->minLod, 0, 15), 8)) |
3355 S_008F34_MAX_LOD(S_FIXED(CLAMP(pCreateInfo->maxLod, 0, 15), 8)) |
3356 S_008F34_PERF_MIP(max_aniso_ratio ? max_aniso_ratio + 6 : 0));
3357 sampler->state[2] = (S_008F38_LOD_BIAS(S_FIXED(CLAMP(pCreateInfo->mipLodBias, -16, 16), 8)) |
3358 S_008F38_XY_MAG_FILTER(radv_tex_filter(pCreateInfo->magFilter, max_aniso)) |
3359 S_008F38_XY_MIN_FILTER(radv_tex_filter(pCreateInfo->minFilter, max_aniso)) |
3360 S_008F38_MIP_FILTER(radv_tex_mipfilter(pCreateInfo->mipmapMode)) |
3361 S_008F38_MIP_POINT_PRECLAMP(0) |
3362 S_008F38_DISABLE_LSB_CEIL(1) |
3363 S_008F38_FILTER_PREC_FIX(1) |
3364 S_008F38_ANISO_OVERRIDE(is_vi));
3365 sampler->state[3] = (S_008F3C_BORDER_COLOR_PTR(0) |
3366 S_008F3C_BORDER_COLOR_TYPE(radv_tex_bordercolor(pCreateInfo->borderColor)));
3367 }
3368
3369 VkResult radv_CreateSampler(
3370 VkDevice _device,
3371 const VkSamplerCreateInfo* pCreateInfo,
3372 const VkAllocationCallbacks* pAllocator,
3373 VkSampler* pSampler)
3374 {
3375 RADV_FROM_HANDLE(radv_device, device, _device);
3376 struct radv_sampler *sampler;
3377
3378 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO);
3379
3380 sampler = vk_alloc2(&device->alloc, pAllocator, sizeof(*sampler), 8,
3381 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
3382 if (!sampler)
3383 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
3384
3385 radv_init_sampler(device, sampler, pCreateInfo);
3386 *pSampler = radv_sampler_to_handle(sampler);
3387
3388 return VK_SUCCESS;
3389 }
3390
3391 void radv_DestroySampler(
3392 VkDevice _device,
3393 VkSampler _sampler,
3394 const VkAllocationCallbacks* pAllocator)
3395 {
3396 RADV_FROM_HANDLE(radv_device, device, _device);
3397 RADV_FROM_HANDLE(radv_sampler, sampler, _sampler);
3398
3399 if (!sampler)
3400 return;
3401 vk_free2(&device->alloc, pAllocator, sampler);
3402 }
3403
3404 /* vk_icd.h does not declare this function, so we declare it here to
3405 * suppress Wmissing-prototypes.
3406 */
3407 PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
3408 vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion);
3409
3410 PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
3411 vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion)
3412 {
3413 /* For the full details on loader interface versioning, see
3414 * <https://github.com/KhronosGroup/Vulkan-LoaderAndValidationLayers/blob/master/loader/LoaderAndLayerInterface.md>.
3415 * What follows is a condensed summary, to help you navigate the large and
3416 * confusing official doc.
3417 *
3418 * - Loader interface v0 is incompatible with later versions. We don't
3419 * support it.
3420 *
3421 * - In loader interface v1:
3422 * - The first ICD entrypoint called by the loader is
3423 * vk_icdGetInstanceProcAddr(). The ICD must statically expose this
3424 * entrypoint.
3425 * - The ICD must statically expose no other Vulkan symbol unless it is
3426 * linked with -Bsymbolic.
3427 * - Each dispatchable Vulkan handle created by the ICD must be
3428 * a pointer to a struct whose first member is VK_LOADER_DATA. The
3429 * ICD must initialize VK_LOADER_DATA.loadMagic to ICD_LOADER_MAGIC.
3430 * - The loader implements vkCreate{PLATFORM}SurfaceKHR() and
3431 * vkDestroySurfaceKHR(). The ICD must be capable of working with
3432 * such loader-managed surfaces.
3433 *
3434 * - Loader interface v2 differs from v1 in:
3435 * - The first ICD entrypoint called by the loader is
3436 * vk_icdNegotiateLoaderICDInterfaceVersion(). The ICD must
3437 * statically expose this entrypoint.
3438 *
3439 * - Loader interface v3 differs from v2 in:
3440 * - The ICD must implement vkCreate{PLATFORM}SurfaceKHR(),
3441 * vkDestroySurfaceKHR(), and other API which uses VKSurfaceKHR,
3442 * because the loader no longer does so.
3443 */
3444 *pSupportedVersion = MIN2(*pSupportedVersion, 3u);
3445 return VK_SUCCESS;
3446 }
3447
3448 VkResult radv_GetMemoryFdKHR(VkDevice _device,
3449 const VkMemoryGetFdInfoKHR *pGetFdInfo,
3450 int *pFD)
3451 {
3452 RADV_FROM_HANDLE(radv_device, device, _device);
3453 RADV_FROM_HANDLE(radv_device_memory, memory, pGetFdInfo->memory);
3454
3455 assert(pGetFdInfo->sType == VK_STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR);
3456
3457 /* We support only one handle type. */
3458 assert(pGetFdInfo->handleType ==
3459 VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR);
3460
3461 bool ret = radv_get_memory_fd(device, memory, pFD);
3462 if (ret == false)
3463 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
3464 return VK_SUCCESS;
3465 }
3466
3467 VkResult radv_GetMemoryFdPropertiesKHR(VkDevice _device,
3468 VkExternalMemoryHandleTypeFlagBitsKHR handleType,
3469 int fd,
3470 VkMemoryFdPropertiesKHR *pMemoryFdProperties)
3471 {
3472 /* The valid usage section for this function says:
3473 *
3474 * "handleType must not be one of the handle types defined as opaque."
3475 *
3476 * Since we only handle opaque handles for now, there are no FD properties.
3477 */
3478 return VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR;
3479 }
3480
3481 VkResult radv_ImportSemaphoreFdKHR(VkDevice _device,
3482 const VkImportSemaphoreFdInfoKHR *pImportSemaphoreFdInfo)
3483 {
3484 RADV_FROM_HANDLE(radv_device, device, _device);
3485 RADV_FROM_HANDLE(radv_semaphore, sem, pImportSemaphoreFdInfo->semaphore);
3486 uint32_t syncobj_handle = 0;
3487 assert(pImportSemaphoreFdInfo->handleType == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR);
3488
3489 int ret = device->ws->import_syncobj(device->ws, pImportSemaphoreFdInfo->fd, &syncobj_handle);
3490 if (ret != 0)
3491 return VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR;
3492
3493 if (pImportSemaphoreFdInfo->flags & VK_SEMAPHORE_IMPORT_TEMPORARY_BIT_KHR) {
3494 sem->temp_syncobj = syncobj_handle;
3495 } else {
3496 sem->syncobj = syncobj_handle;
3497 }
3498 close(pImportSemaphoreFdInfo->fd);
3499 return VK_SUCCESS;
3500 }
3501
3502 VkResult radv_GetSemaphoreFdKHR(VkDevice _device,
3503 const VkSemaphoreGetFdInfoKHR *pGetFdInfo,
3504 int *pFd)
3505 {
3506 RADV_FROM_HANDLE(radv_device, device, _device);
3507 RADV_FROM_HANDLE(radv_semaphore, sem, pGetFdInfo->semaphore);
3508 int ret;
3509 uint32_t syncobj_handle;
3510
3511 assert(pGetFdInfo->handleType == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR);
3512 if (sem->temp_syncobj)
3513 syncobj_handle = sem->temp_syncobj;
3514 else
3515 syncobj_handle = sem->syncobj;
3516 ret = device->ws->export_syncobj(device->ws, syncobj_handle, pFd);
3517 if (ret)
3518 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR);
3519 return VK_SUCCESS;
3520 }
3521
3522 void radv_GetPhysicalDeviceExternalSemaphorePropertiesKHR(
3523 VkPhysicalDevice physicalDevice,
3524 const VkPhysicalDeviceExternalSemaphoreInfoKHR* pExternalSemaphoreInfo,
3525 VkExternalSemaphorePropertiesKHR* pExternalSemaphoreProperties)
3526 {
3527 if (pExternalSemaphoreInfo->handleType == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR) {
3528 pExternalSemaphoreProperties->exportFromImportedHandleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR;
3529 pExternalSemaphoreProperties->compatibleHandleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR;
3530 pExternalSemaphoreProperties->externalSemaphoreFeatures = VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT_KHR |
3531 VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT_KHR;
3532 } else {
3533 pExternalSemaphoreProperties->exportFromImportedHandleTypes = 0;
3534 pExternalSemaphoreProperties->compatibleHandleTypes = 0;
3535 pExternalSemaphoreProperties->externalSemaphoreFeatures = 0;
3536 }
3537 }