anv: Implement VK_EXT_subgroup_size_control
[mesa.git] / src / intel / vulkan / anv_device.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <assert.h>
25 #include <stdbool.h>
26 #include <string.h>
27 #include <sys/mman.h>
28 #include <sys/sysinfo.h>
29 #include <unistd.h>
30 #include <fcntl.h>
31 #include <xf86drm.h>
32 #include "drm-uapi/drm_fourcc.h"
33
34 #include "anv_private.h"
35 #include "util/strtod.h"
36 #include "util/debug.h"
37 #include "util/build_id.h"
38 #include "util/disk_cache.h"
39 #include "util/mesa-sha1.h"
40 #include "util/os_file.h"
41 #include "util/u_atomic.h"
42 #include "util/u_string.h"
43 #include "git_sha1.h"
44 #include "vk_util.h"
45 #include "common/gen_defines.h"
46 #include "compiler/glsl_types.h"
47
48 #include "genxml/gen7_pack.h"
49
50 /* This is probably far to big but it reflects the max size used for messages
51 * in OpenGLs KHR_debug.
52 */
53 #define MAX_DEBUG_MESSAGE_LENGTH 4096
54
55 static void
56 compiler_debug_log(void *data, const char *fmt, ...)
57 {
58 char str[MAX_DEBUG_MESSAGE_LENGTH];
59 struct anv_device *device = (struct anv_device *)data;
60
61 if (list_empty(&device->instance->debug_report_callbacks.callbacks))
62 return;
63
64 va_list args;
65 va_start(args, fmt);
66 (void) vsnprintf(str, MAX_DEBUG_MESSAGE_LENGTH, fmt, args);
67 va_end(args);
68
69 vk_debug_report(&device->instance->debug_report_callbacks,
70 VK_DEBUG_REPORT_DEBUG_BIT_EXT,
71 VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
72 0, 0, 0, "anv", str);
73 }
74
75 static void
76 compiler_perf_log(void *data, const char *fmt, ...)
77 {
78 va_list args;
79 va_start(args, fmt);
80
81 if (unlikely(INTEL_DEBUG & DEBUG_PERF))
82 intel_logd_v(fmt, args);
83
84 va_end(args);
85 }
86
87 static uint64_t
88 anv_compute_heap_size(int fd, uint64_t gtt_size)
89 {
90 /* Query the total ram from the system */
91 struct sysinfo info;
92 sysinfo(&info);
93
94 uint64_t total_ram = (uint64_t)info.totalram * (uint64_t)info.mem_unit;
95
96 /* We don't want to burn too much ram with the GPU. If the user has 4GiB
97 * or less, we use at most half. If they have more than 4GiB, we use 3/4.
98 */
99 uint64_t available_ram;
100 if (total_ram <= 4ull * 1024ull * 1024ull * 1024ull)
101 available_ram = total_ram / 2;
102 else
103 available_ram = total_ram * 3 / 4;
104
105 /* We also want to leave some padding for things we allocate in the driver,
106 * so don't go over 3/4 of the GTT either.
107 */
108 uint64_t available_gtt = gtt_size * 3 / 4;
109
110 return MIN2(available_ram, available_gtt);
111 }
112
113 static VkResult
114 anv_physical_device_init_heaps(struct anv_physical_device *device, int fd)
115 {
116 uint64_t gtt_size;
117 if (anv_gem_get_context_param(fd, 0, I915_CONTEXT_PARAM_GTT_SIZE,
118 &gtt_size) == -1) {
119 /* If, for whatever reason, we can't actually get the GTT size from the
120 * kernel (too old?) fall back to the aperture size.
121 */
122 anv_perf_warn(NULL, NULL,
123 "Failed to get I915_CONTEXT_PARAM_GTT_SIZE: %m");
124
125 if (anv_gem_get_aperture(fd, &gtt_size) == -1) {
126 return vk_errorf(NULL, NULL, VK_ERROR_INITIALIZATION_FAILED,
127 "failed to get aperture size: %m");
128 }
129 }
130
131 device->supports_48bit_addresses = (device->info.gen >= 8) &&
132 gtt_size > (4ULL << 30 /* GiB */);
133
134 uint64_t heap_size = anv_compute_heap_size(fd, gtt_size);
135
136 if (heap_size > (2ull << 30) && !device->supports_48bit_addresses) {
137 /* When running with an overridden PCI ID, we may get a GTT size from
138 * the kernel that is greater than 2 GiB but the execbuf check for 48bit
139 * address support can still fail. Just clamp the address space size to
140 * 2 GiB if we don't have 48-bit support.
141 */
142 intel_logw("%s:%d: The kernel reported a GTT size larger than 2 GiB but "
143 "not support for 48-bit addresses",
144 __FILE__, __LINE__);
145 heap_size = 2ull << 30;
146 }
147
148 if (heap_size <= 3ull * (1ull << 30)) {
149 /* In this case, everything fits nicely into the 32-bit address space,
150 * so there's no need for supporting 48bit addresses on client-allocated
151 * memory objects.
152 */
153 device->memory.heap_count = 1;
154 device->memory.heaps[0] = (struct anv_memory_heap) {
155 .vma_start = LOW_HEAP_MIN_ADDRESS,
156 .vma_size = LOW_HEAP_SIZE,
157 .size = heap_size,
158 .flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT,
159 .supports_48bit_addresses = false,
160 };
161 } else {
162 /* Not everything will fit nicely into a 32-bit address space. In this
163 * case we need a 64-bit heap. Advertise a small 32-bit heap and a
164 * larger 48-bit heap. If we're in this case, then we have a total heap
165 * size larger than 3GiB which most likely means they have 8 GiB of
166 * video memory and so carving off 1 GiB for the 32-bit heap should be
167 * reasonable.
168 */
169 const uint64_t heap_size_32bit = 1ull << 30;
170 const uint64_t heap_size_48bit = heap_size - heap_size_32bit;
171
172 assert(device->supports_48bit_addresses);
173
174 device->memory.heap_count = 2;
175 device->memory.heaps[0] = (struct anv_memory_heap) {
176 .vma_start = HIGH_HEAP_MIN_ADDRESS,
177 /* Leave the last 4GiB out of the high vma range, so that no state
178 * base address + size can overflow 48 bits. For more information see
179 * the comment about Wa32bitGeneralStateOffset in anv_allocator.c
180 */
181 .vma_size = gtt_size - (1ull << 32) - HIGH_HEAP_MIN_ADDRESS,
182 .size = heap_size_48bit,
183 .flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT,
184 .supports_48bit_addresses = true,
185 };
186 device->memory.heaps[1] = (struct anv_memory_heap) {
187 .vma_start = LOW_HEAP_MIN_ADDRESS,
188 .vma_size = LOW_HEAP_SIZE,
189 .size = heap_size_32bit,
190 .flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT,
191 .supports_48bit_addresses = false,
192 };
193 }
194
195 uint32_t type_count = 0;
196 for (uint32_t heap = 0; heap < device->memory.heap_count; heap++) {
197 uint32_t valid_buffer_usage = ~0;
198
199 /* There appears to be a hardware issue in the VF cache where it only
200 * considers the bottom 32 bits of memory addresses. If you happen to
201 * have two vertex buffers which get placed exactly 4 GiB apart and use
202 * them in back-to-back draw calls, you can get collisions. In order to
203 * solve this problem, we require vertex and index buffers be bound to
204 * memory allocated out of the 32-bit heap.
205 */
206 if (device->memory.heaps[heap].supports_48bit_addresses) {
207 valid_buffer_usage &= ~(VK_BUFFER_USAGE_INDEX_BUFFER_BIT |
208 VK_BUFFER_USAGE_VERTEX_BUFFER_BIT);
209 }
210
211 if (device->info.has_llc) {
212 /* Big core GPUs share LLC with the CPU and thus one memory type can be
213 * both cached and coherent at the same time.
214 */
215 device->memory.types[type_count++] = (struct anv_memory_type) {
216 .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
217 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
218 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT |
219 VK_MEMORY_PROPERTY_HOST_CACHED_BIT,
220 .heapIndex = heap,
221 .valid_buffer_usage = valid_buffer_usage,
222 };
223 } else {
224 /* The spec requires that we expose a host-visible, coherent memory
225 * type, but Atom GPUs don't share LLC. Thus we offer two memory types
226 * to give the application a choice between cached, but not coherent and
227 * coherent but uncached (WC though).
228 */
229 device->memory.types[type_count++] = (struct anv_memory_type) {
230 .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
231 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
232 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
233 .heapIndex = heap,
234 .valid_buffer_usage = valid_buffer_usage,
235 };
236 device->memory.types[type_count++] = (struct anv_memory_type) {
237 .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
238 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
239 VK_MEMORY_PROPERTY_HOST_CACHED_BIT,
240 .heapIndex = heap,
241 .valid_buffer_usage = valid_buffer_usage,
242 };
243 }
244 }
245 device->memory.type_count = type_count;
246
247 return VK_SUCCESS;
248 }
249
250 static VkResult
251 anv_physical_device_init_uuids(struct anv_physical_device *device)
252 {
253 const struct build_id_note *note =
254 build_id_find_nhdr_for_addr(anv_physical_device_init_uuids);
255 if (!note) {
256 return vk_errorf(device->instance, device,
257 VK_ERROR_INITIALIZATION_FAILED,
258 "Failed to find build-id");
259 }
260
261 unsigned build_id_len = build_id_length(note);
262 if (build_id_len < 20) {
263 return vk_errorf(device->instance, device,
264 VK_ERROR_INITIALIZATION_FAILED,
265 "build-id too short. It needs to be a SHA");
266 }
267
268 memcpy(device->driver_build_sha1, build_id_data(note), 20);
269
270 struct mesa_sha1 sha1_ctx;
271 uint8_t sha1[20];
272 STATIC_ASSERT(VK_UUID_SIZE <= sizeof(sha1));
273
274 /* The pipeline cache UUID is used for determining when a pipeline cache is
275 * invalid. It needs both a driver build and the PCI ID of the device.
276 */
277 _mesa_sha1_init(&sha1_ctx);
278 _mesa_sha1_update(&sha1_ctx, build_id_data(note), build_id_len);
279 _mesa_sha1_update(&sha1_ctx, &device->chipset_id,
280 sizeof(device->chipset_id));
281 _mesa_sha1_update(&sha1_ctx, &device->always_use_bindless,
282 sizeof(device->always_use_bindless));
283 _mesa_sha1_update(&sha1_ctx, &device->has_a64_buffer_access,
284 sizeof(device->has_a64_buffer_access));
285 _mesa_sha1_update(&sha1_ctx, &device->has_bindless_images,
286 sizeof(device->has_bindless_images));
287 _mesa_sha1_update(&sha1_ctx, &device->has_bindless_samplers,
288 sizeof(device->has_bindless_samplers));
289 _mesa_sha1_final(&sha1_ctx, sha1);
290 memcpy(device->pipeline_cache_uuid, sha1, VK_UUID_SIZE);
291
292 /* The driver UUID is used for determining sharability of images and memory
293 * between two Vulkan instances in separate processes. People who want to
294 * share memory need to also check the device UUID (below) so all this
295 * needs to be is the build-id.
296 */
297 memcpy(device->driver_uuid, build_id_data(note), VK_UUID_SIZE);
298
299 /* The device UUID uniquely identifies the given device within the machine.
300 * Since we never have more than one device, this doesn't need to be a real
301 * UUID. However, on the off-chance that someone tries to use this to
302 * cache pre-tiled images or something of the like, we use the PCI ID and
303 * some bits of ISL info to ensure that this is safe.
304 */
305 _mesa_sha1_init(&sha1_ctx);
306 _mesa_sha1_update(&sha1_ctx, &device->chipset_id,
307 sizeof(device->chipset_id));
308 _mesa_sha1_update(&sha1_ctx, &device->isl_dev.has_bit6_swizzling,
309 sizeof(device->isl_dev.has_bit6_swizzling));
310 _mesa_sha1_final(&sha1_ctx, sha1);
311 memcpy(device->device_uuid, sha1, VK_UUID_SIZE);
312
313 return VK_SUCCESS;
314 }
315
316 static void
317 anv_physical_device_init_disk_cache(struct anv_physical_device *device)
318 {
319 #ifdef ENABLE_SHADER_CACHE
320 char renderer[10];
321 MAYBE_UNUSED int len = snprintf(renderer, sizeof(renderer), "anv_%04x",
322 device->chipset_id);
323 assert(len == sizeof(renderer) - 2);
324
325 char timestamp[41];
326 _mesa_sha1_format(timestamp, device->driver_build_sha1);
327
328 const uint64_t driver_flags =
329 brw_get_compiler_config_value(device->compiler);
330 device->disk_cache = disk_cache_create(renderer, timestamp, driver_flags);
331 #else
332 device->disk_cache = NULL;
333 #endif
334 }
335
336 static void
337 anv_physical_device_free_disk_cache(struct anv_physical_device *device)
338 {
339 #ifdef ENABLE_SHADER_CACHE
340 if (device->disk_cache)
341 disk_cache_destroy(device->disk_cache);
342 #else
343 assert(device->disk_cache == NULL);
344 #endif
345 }
346
347 static uint64_t
348 get_available_system_memory()
349 {
350 char *meminfo = os_read_file("/proc/meminfo");
351 if (!meminfo)
352 return 0;
353
354 char *str = strstr(meminfo, "MemAvailable:");
355 if (!str) {
356 free(meminfo);
357 return 0;
358 }
359
360 uint64_t kb_mem_available;
361 if (sscanf(str, "MemAvailable: %" PRIx64, &kb_mem_available) == 1) {
362 free(meminfo);
363 return kb_mem_available << 10;
364 }
365
366 free(meminfo);
367 return 0;
368 }
369
370 static VkResult
371 anv_physical_device_init(struct anv_physical_device *device,
372 struct anv_instance *instance,
373 drmDevicePtr drm_device)
374 {
375 const char *primary_path = drm_device->nodes[DRM_NODE_PRIMARY];
376 const char *path = drm_device->nodes[DRM_NODE_RENDER];
377 VkResult result;
378 int fd;
379 int master_fd = -1;
380
381 brw_process_intel_debug_variable();
382
383 fd = open(path, O_RDWR | O_CLOEXEC);
384 if (fd < 0)
385 return vk_error(VK_ERROR_INCOMPATIBLE_DRIVER);
386
387 device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
388 device->instance = instance;
389
390 assert(strlen(path) < ARRAY_SIZE(device->path));
391 snprintf(device->path, ARRAY_SIZE(device->path), "%s", path);
392
393 device->no_hw = getenv("INTEL_NO_HW") != NULL;
394
395 const int pci_id_override = gen_get_pci_device_id_override();
396 if (pci_id_override < 0) {
397 device->chipset_id = anv_gem_get_param(fd, I915_PARAM_CHIPSET_ID);
398 if (!device->chipset_id) {
399 result = vk_error(VK_ERROR_INCOMPATIBLE_DRIVER);
400 goto fail;
401 }
402 } else {
403 device->chipset_id = pci_id_override;
404 device->no_hw = true;
405 }
406
407 device->pci_info.domain = drm_device->businfo.pci->domain;
408 device->pci_info.bus = drm_device->businfo.pci->bus;
409 device->pci_info.device = drm_device->businfo.pci->dev;
410 device->pci_info.function = drm_device->businfo.pci->func;
411
412 device->name = gen_get_device_name(device->chipset_id);
413 if (!gen_get_device_info(device->chipset_id, &device->info)) {
414 result = vk_error(VK_ERROR_INCOMPATIBLE_DRIVER);
415 goto fail;
416 }
417
418 if (device->info.is_haswell) {
419 intel_logw("Haswell Vulkan support is incomplete");
420 } else if (device->info.gen == 7 && !device->info.is_baytrail) {
421 intel_logw("Ivy Bridge Vulkan support is incomplete");
422 } else if (device->info.gen == 7 && device->info.is_baytrail) {
423 intel_logw("Bay Trail Vulkan support is incomplete");
424 } else if (device->info.gen >= 8 && device->info.gen <= 11) {
425 /* Gen8-11 fully supported */
426 } else {
427 result = vk_errorf(device->instance, device,
428 VK_ERROR_INCOMPATIBLE_DRIVER,
429 "Vulkan not yet supported on %s", device->name);
430 goto fail;
431 }
432
433 device->cmd_parser_version = -1;
434 if (device->info.gen == 7) {
435 device->cmd_parser_version =
436 anv_gem_get_param(fd, I915_PARAM_CMD_PARSER_VERSION);
437 if (device->cmd_parser_version == -1) {
438 result = vk_errorf(device->instance, device,
439 VK_ERROR_INITIALIZATION_FAILED,
440 "failed to get command parser version");
441 goto fail;
442 }
443 }
444
445 if (!anv_gem_get_param(fd, I915_PARAM_HAS_WAIT_TIMEOUT)) {
446 result = vk_errorf(device->instance, device,
447 VK_ERROR_INITIALIZATION_FAILED,
448 "kernel missing gem wait");
449 goto fail;
450 }
451
452 if (!anv_gem_get_param(fd, I915_PARAM_HAS_EXECBUF2)) {
453 result = vk_errorf(device->instance, device,
454 VK_ERROR_INITIALIZATION_FAILED,
455 "kernel missing execbuf2");
456 goto fail;
457 }
458
459 if (!device->info.has_llc &&
460 anv_gem_get_param(fd, I915_PARAM_MMAP_VERSION) < 1) {
461 result = vk_errorf(device->instance, device,
462 VK_ERROR_INITIALIZATION_FAILED,
463 "kernel missing wc mmap");
464 goto fail;
465 }
466
467 result = anv_physical_device_init_heaps(device, fd);
468 if (result != VK_SUCCESS)
469 goto fail;
470
471 device->has_exec_async = anv_gem_get_param(fd, I915_PARAM_HAS_EXEC_ASYNC);
472 device->has_exec_capture = anv_gem_get_param(fd, I915_PARAM_HAS_EXEC_CAPTURE);
473 device->has_exec_fence = anv_gem_get_param(fd, I915_PARAM_HAS_EXEC_FENCE);
474 device->has_syncobj = anv_gem_get_param(fd, I915_PARAM_HAS_EXEC_FENCE_ARRAY);
475 device->has_syncobj_wait = device->has_syncobj &&
476 anv_gem_supports_syncobj_wait(fd);
477 device->has_context_priority = anv_gem_has_context_priority(fd);
478
479 device->use_softpin = anv_gem_get_param(fd, I915_PARAM_HAS_EXEC_SOFTPIN)
480 && device->supports_48bit_addresses;
481
482 device->has_context_isolation =
483 anv_gem_get_param(fd, I915_PARAM_HAS_CONTEXT_ISOLATION);
484
485 device->always_use_bindless =
486 env_var_as_boolean("ANV_ALWAYS_BINDLESS", false);
487
488 /* We first got the A64 messages on broadwell and we can only use them if
489 * we can pass addresses directly into the shader which requires softpin.
490 */
491 device->has_a64_buffer_access = device->info.gen >= 8 &&
492 device->use_softpin;
493
494 /* We first get bindless image access on Skylake and we can only really do
495 * it if we don't have any relocations so we need softpin.
496 */
497 device->has_bindless_images = device->info.gen >= 9 &&
498 device->use_softpin;
499
500 /* We've had bindless samplers since Ivy Bridge (forever in Vulkan terms)
501 * because it's just a matter of setting the sampler address in the sample
502 * message header. However, we've not bothered to wire it up for vec4 so
503 * we leave it disabled on gen7.
504 */
505 device->has_bindless_samplers = device->info.gen >= 8;
506
507 device->has_mem_available = get_available_system_memory() != 0;
508
509 /* Starting with Gen10, the timestamp frequency of the command streamer may
510 * vary from one part to another. We can query the value from the kernel.
511 */
512 if (device->info.gen >= 10) {
513 int timestamp_frequency =
514 anv_gem_get_param(fd, I915_PARAM_CS_TIMESTAMP_FREQUENCY);
515
516 if (timestamp_frequency < 0)
517 intel_logw("Kernel 4.16-rc1+ required to properly query CS timestamp frequency");
518 else
519 device->info.timestamp_frequency = timestamp_frequency;
520 }
521
522 /* GENs prior to 8 do not support EU/Subslice info */
523 if (device->info.gen >= 8) {
524 device->subslice_total = anv_gem_get_param(fd, I915_PARAM_SUBSLICE_TOTAL);
525 device->eu_total = anv_gem_get_param(fd, I915_PARAM_EU_TOTAL);
526
527 /* Without this information, we cannot get the right Braswell
528 * brandstrings, and we have to use conservative numbers for GPGPU on
529 * many platforms, but otherwise, things will just work.
530 */
531 if (device->subslice_total < 1 || device->eu_total < 1) {
532 intel_logw("Kernel 4.1 required to properly query GPU properties");
533 }
534 } else if (device->info.gen == 7) {
535 device->subslice_total = 1 << (device->info.gt - 1);
536 }
537
538 if (device->info.is_cherryview &&
539 device->subslice_total > 0 && device->eu_total > 0) {
540 /* Logical CS threads = EUs per subslice * num threads per EU */
541 uint32_t max_cs_threads =
542 device->eu_total / device->subslice_total * device->info.num_thread_per_eu;
543
544 /* Fuse configurations may give more threads than expected, never less. */
545 if (max_cs_threads > device->info.max_cs_threads)
546 device->info.max_cs_threads = max_cs_threads;
547 }
548
549 device->compiler = brw_compiler_create(NULL, &device->info);
550 if (device->compiler == NULL) {
551 result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
552 goto fail;
553 }
554 device->compiler->shader_debug_log = compiler_debug_log;
555 device->compiler->shader_perf_log = compiler_perf_log;
556 device->compiler->supports_pull_constants = false;
557 device->compiler->constant_buffer_0_is_relative =
558 device->info.gen < 8 || !device->has_context_isolation;
559 device->compiler->supports_shader_constants = true;
560
561 /* Broadwell PRM says:
562 *
563 * "Before Gen8, there was a historical configuration control field to
564 * swizzle address bit[6] for in X/Y tiling modes. This was set in three
565 * different places: TILECTL[1:0], ARB_MODE[5:4], and
566 * DISP_ARB_CTL[14:13].
567 *
568 * For Gen8 and subsequent generations, the swizzle fields are all
569 * reserved, and the CPU's memory controller performs all address
570 * swizzling modifications."
571 */
572 bool swizzled =
573 device->info.gen < 8 && anv_gem_get_bit6_swizzle(fd, I915_TILING_X);
574
575 isl_device_init(&device->isl_dev, &device->info, swizzled);
576
577 result = anv_physical_device_init_uuids(device);
578 if (result != VK_SUCCESS)
579 goto fail;
580
581 anv_physical_device_init_disk_cache(device);
582
583 if (instance->enabled_extensions.KHR_display) {
584 master_fd = open(primary_path, O_RDWR | O_CLOEXEC);
585 if (master_fd >= 0) {
586 /* prod the device with a GETPARAM call which will fail if
587 * we don't have permission to even render on this device
588 */
589 if (anv_gem_get_param(master_fd, I915_PARAM_CHIPSET_ID) == 0) {
590 close(master_fd);
591 master_fd = -1;
592 }
593 }
594 }
595 device->master_fd = master_fd;
596
597 result = anv_init_wsi(device);
598 if (result != VK_SUCCESS) {
599 ralloc_free(device->compiler);
600 anv_physical_device_free_disk_cache(device);
601 goto fail;
602 }
603
604 anv_physical_device_get_supported_extensions(device,
605 &device->supported_extensions);
606
607
608 device->local_fd = fd;
609
610 return VK_SUCCESS;
611
612 fail:
613 close(fd);
614 if (master_fd != -1)
615 close(master_fd);
616 return result;
617 }
618
619 static void
620 anv_physical_device_finish(struct anv_physical_device *device)
621 {
622 anv_finish_wsi(device);
623 anv_physical_device_free_disk_cache(device);
624 ralloc_free(device->compiler);
625 close(device->local_fd);
626 if (device->master_fd >= 0)
627 close(device->master_fd);
628 }
629
630 static void *
631 default_alloc_func(void *pUserData, size_t size, size_t align,
632 VkSystemAllocationScope allocationScope)
633 {
634 return malloc(size);
635 }
636
637 static void *
638 default_realloc_func(void *pUserData, void *pOriginal, size_t size,
639 size_t align, VkSystemAllocationScope allocationScope)
640 {
641 return realloc(pOriginal, size);
642 }
643
644 static void
645 default_free_func(void *pUserData, void *pMemory)
646 {
647 free(pMemory);
648 }
649
650 static const VkAllocationCallbacks default_alloc = {
651 .pUserData = NULL,
652 .pfnAllocation = default_alloc_func,
653 .pfnReallocation = default_realloc_func,
654 .pfnFree = default_free_func,
655 };
656
657 VkResult anv_EnumerateInstanceExtensionProperties(
658 const char* pLayerName,
659 uint32_t* pPropertyCount,
660 VkExtensionProperties* pProperties)
661 {
662 VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
663
664 for (int i = 0; i < ANV_INSTANCE_EXTENSION_COUNT; i++) {
665 if (anv_instance_extensions_supported.extensions[i]) {
666 vk_outarray_append(&out, prop) {
667 *prop = anv_instance_extensions[i];
668 }
669 }
670 }
671
672 return vk_outarray_status(&out);
673 }
674
675 VkResult anv_CreateInstance(
676 const VkInstanceCreateInfo* pCreateInfo,
677 const VkAllocationCallbacks* pAllocator,
678 VkInstance* pInstance)
679 {
680 struct anv_instance *instance;
681 VkResult result;
682
683 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO);
684
685 struct anv_instance_extension_table enabled_extensions = {};
686 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
687 int idx;
688 for (idx = 0; idx < ANV_INSTANCE_EXTENSION_COUNT; idx++) {
689 if (strcmp(pCreateInfo->ppEnabledExtensionNames[i],
690 anv_instance_extensions[idx].extensionName) == 0)
691 break;
692 }
693
694 if (idx >= ANV_INSTANCE_EXTENSION_COUNT)
695 return vk_error(VK_ERROR_EXTENSION_NOT_PRESENT);
696
697 if (!anv_instance_extensions_supported.extensions[idx])
698 return vk_error(VK_ERROR_EXTENSION_NOT_PRESENT);
699
700 enabled_extensions.extensions[idx] = true;
701 }
702
703 instance = vk_alloc2(&default_alloc, pAllocator, sizeof(*instance), 8,
704 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
705 if (!instance)
706 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
707
708 instance->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
709
710 if (pAllocator)
711 instance->alloc = *pAllocator;
712 else
713 instance->alloc = default_alloc;
714
715 instance->app_info = (struct anv_app_info) { .api_version = 0 };
716 if (pCreateInfo->pApplicationInfo) {
717 const VkApplicationInfo *app = pCreateInfo->pApplicationInfo;
718
719 instance->app_info.app_name =
720 vk_strdup(&instance->alloc, app->pApplicationName,
721 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
722 instance->app_info.app_version = app->applicationVersion;
723
724 instance->app_info.engine_name =
725 vk_strdup(&instance->alloc, app->pEngineName,
726 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
727 instance->app_info.engine_version = app->engineVersion;
728
729 instance->app_info.api_version = app->apiVersion;
730 }
731
732 if (instance->app_info.api_version == 0)
733 instance->app_info.api_version = VK_API_VERSION_1_0;
734
735 instance->enabled_extensions = enabled_extensions;
736
737 for (unsigned i = 0; i < ARRAY_SIZE(instance->dispatch.entrypoints); i++) {
738 /* Vulkan requires that entrypoints for extensions which have not been
739 * enabled must not be advertised.
740 */
741 if (!anv_instance_entrypoint_is_enabled(i, instance->app_info.api_version,
742 &instance->enabled_extensions)) {
743 instance->dispatch.entrypoints[i] = NULL;
744 } else {
745 instance->dispatch.entrypoints[i] =
746 anv_instance_dispatch_table.entrypoints[i];
747 }
748 }
749
750 for (unsigned i = 0; i < ARRAY_SIZE(instance->device_dispatch.entrypoints); i++) {
751 /* Vulkan requires that entrypoints for extensions which have not been
752 * enabled must not be advertised.
753 */
754 if (!anv_device_entrypoint_is_enabled(i, instance->app_info.api_version,
755 &instance->enabled_extensions, NULL)) {
756 instance->device_dispatch.entrypoints[i] = NULL;
757 } else {
758 instance->device_dispatch.entrypoints[i] =
759 anv_device_dispatch_table.entrypoints[i];
760 }
761 }
762
763 instance->physicalDeviceCount = -1;
764
765 result = vk_debug_report_instance_init(&instance->debug_report_callbacks);
766 if (result != VK_SUCCESS) {
767 vk_free2(&default_alloc, pAllocator, instance);
768 return vk_error(result);
769 }
770
771 instance->pipeline_cache_enabled =
772 env_var_as_boolean("ANV_ENABLE_PIPELINE_CACHE", true);
773
774 _mesa_locale_init();
775 glsl_type_singleton_init_or_ref();
776
777 VG(VALGRIND_CREATE_MEMPOOL(instance, 0, false));
778
779 *pInstance = anv_instance_to_handle(instance);
780
781 return VK_SUCCESS;
782 }
783
784 void anv_DestroyInstance(
785 VkInstance _instance,
786 const VkAllocationCallbacks* pAllocator)
787 {
788 ANV_FROM_HANDLE(anv_instance, instance, _instance);
789
790 if (!instance)
791 return;
792
793 if (instance->physicalDeviceCount > 0) {
794 /* We support at most one physical device. */
795 assert(instance->physicalDeviceCount == 1);
796 anv_physical_device_finish(&instance->physicalDevice);
797 }
798
799 vk_free(&instance->alloc, (char *)instance->app_info.app_name);
800 vk_free(&instance->alloc, (char *)instance->app_info.engine_name);
801
802 VG(VALGRIND_DESTROY_MEMPOOL(instance));
803
804 vk_debug_report_instance_destroy(&instance->debug_report_callbacks);
805
806 glsl_type_singleton_decref();
807 _mesa_locale_fini();
808
809 vk_free(&instance->alloc, instance);
810 }
811
812 static VkResult
813 anv_enumerate_devices(struct anv_instance *instance)
814 {
815 /* TODO: Check for more devices ? */
816 drmDevicePtr devices[8];
817 VkResult result = VK_ERROR_INCOMPATIBLE_DRIVER;
818 int max_devices;
819
820 instance->physicalDeviceCount = 0;
821
822 max_devices = drmGetDevices2(0, devices, ARRAY_SIZE(devices));
823 if (max_devices < 1)
824 return VK_ERROR_INCOMPATIBLE_DRIVER;
825
826 for (unsigned i = 0; i < (unsigned)max_devices; i++) {
827 if (devices[i]->available_nodes & 1 << DRM_NODE_RENDER &&
828 devices[i]->bustype == DRM_BUS_PCI &&
829 devices[i]->deviceinfo.pci->vendor_id == 0x8086) {
830
831 result = anv_physical_device_init(&instance->physicalDevice,
832 instance, devices[i]);
833 if (result != VK_ERROR_INCOMPATIBLE_DRIVER)
834 break;
835 }
836 }
837 drmFreeDevices(devices, max_devices);
838
839 if (result == VK_SUCCESS)
840 instance->physicalDeviceCount = 1;
841
842 return result;
843 }
844
845 static VkResult
846 anv_instance_ensure_physical_device(struct anv_instance *instance)
847 {
848 if (instance->physicalDeviceCount < 0) {
849 VkResult result = anv_enumerate_devices(instance);
850 if (result != VK_SUCCESS &&
851 result != VK_ERROR_INCOMPATIBLE_DRIVER)
852 return result;
853 }
854
855 return VK_SUCCESS;
856 }
857
858 VkResult anv_EnumeratePhysicalDevices(
859 VkInstance _instance,
860 uint32_t* pPhysicalDeviceCount,
861 VkPhysicalDevice* pPhysicalDevices)
862 {
863 ANV_FROM_HANDLE(anv_instance, instance, _instance);
864 VK_OUTARRAY_MAKE(out, pPhysicalDevices, pPhysicalDeviceCount);
865
866 VkResult result = anv_instance_ensure_physical_device(instance);
867 if (result != VK_SUCCESS)
868 return result;
869
870 if (instance->physicalDeviceCount == 0)
871 return VK_SUCCESS;
872
873 assert(instance->physicalDeviceCount == 1);
874 vk_outarray_append(&out, i) {
875 *i = anv_physical_device_to_handle(&instance->physicalDevice);
876 }
877
878 return vk_outarray_status(&out);
879 }
880
881 VkResult anv_EnumeratePhysicalDeviceGroups(
882 VkInstance _instance,
883 uint32_t* pPhysicalDeviceGroupCount,
884 VkPhysicalDeviceGroupProperties* pPhysicalDeviceGroupProperties)
885 {
886 ANV_FROM_HANDLE(anv_instance, instance, _instance);
887 VK_OUTARRAY_MAKE(out, pPhysicalDeviceGroupProperties,
888 pPhysicalDeviceGroupCount);
889
890 VkResult result = anv_instance_ensure_physical_device(instance);
891 if (result != VK_SUCCESS)
892 return result;
893
894 if (instance->physicalDeviceCount == 0)
895 return VK_SUCCESS;
896
897 assert(instance->physicalDeviceCount == 1);
898
899 vk_outarray_append(&out, p) {
900 p->physicalDeviceCount = 1;
901 memset(p->physicalDevices, 0, sizeof(p->physicalDevices));
902 p->physicalDevices[0] =
903 anv_physical_device_to_handle(&instance->physicalDevice);
904 p->subsetAllocation = false;
905
906 vk_foreach_struct(ext, p->pNext)
907 anv_debug_ignored_stype(ext->sType);
908 }
909
910 return vk_outarray_status(&out);
911 }
912
913 void anv_GetPhysicalDeviceFeatures(
914 VkPhysicalDevice physicalDevice,
915 VkPhysicalDeviceFeatures* pFeatures)
916 {
917 ANV_FROM_HANDLE(anv_physical_device, pdevice, physicalDevice);
918
919 *pFeatures = (VkPhysicalDeviceFeatures) {
920 .robustBufferAccess = true,
921 .fullDrawIndexUint32 = true,
922 .imageCubeArray = true,
923 .independentBlend = true,
924 .geometryShader = true,
925 .tessellationShader = true,
926 .sampleRateShading = true,
927 .dualSrcBlend = true,
928 .logicOp = true,
929 .multiDrawIndirect = true,
930 .drawIndirectFirstInstance = true,
931 .depthClamp = true,
932 .depthBiasClamp = true,
933 .fillModeNonSolid = true,
934 .depthBounds = false,
935 .wideLines = true,
936 .largePoints = true,
937 .alphaToOne = true,
938 .multiViewport = true,
939 .samplerAnisotropy = true,
940 .textureCompressionETC2 = pdevice->info.gen >= 8 ||
941 pdevice->info.is_baytrail,
942 .textureCompressionASTC_LDR = pdevice->info.gen >= 9, /* FINISHME CHV */
943 .textureCompressionBC = true,
944 .occlusionQueryPrecise = true,
945 .pipelineStatisticsQuery = true,
946 .fragmentStoresAndAtomics = true,
947 .shaderTessellationAndGeometryPointSize = true,
948 .shaderImageGatherExtended = true,
949 .shaderStorageImageExtendedFormats = true,
950 .shaderStorageImageMultisample = false,
951 .shaderStorageImageReadWithoutFormat = false,
952 .shaderStorageImageWriteWithoutFormat = true,
953 .shaderUniformBufferArrayDynamicIndexing = true,
954 .shaderSampledImageArrayDynamicIndexing = true,
955 .shaderStorageBufferArrayDynamicIndexing = true,
956 .shaderStorageImageArrayDynamicIndexing = true,
957 .shaderClipDistance = true,
958 .shaderCullDistance = true,
959 .shaderFloat64 = pdevice->info.gen >= 8 &&
960 pdevice->info.has_64bit_types,
961 .shaderInt64 = pdevice->info.gen >= 8 &&
962 pdevice->info.has_64bit_types,
963 .shaderInt16 = pdevice->info.gen >= 8,
964 .shaderResourceMinLod = pdevice->info.gen >= 9,
965 .variableMultisampleRate = true,
966 .inheritedQueries = true,
967 };
968
969 /* We can't do image stores in vec4 shaders */
970 pFeatures->vertexPipelineStoresAndAtomics =
971 pdevice->compiler->scalar_stage[MESA_SHADER_VERTEX] &&
972 pdevice->compiler->scalar_stage[MESA_SHADER_GEOMETRY];
973
974 struct anv_app_info *app_info = &pdevice->instance->app_info;
975
976 /* The new DOOM and Wolfenstein games require depthBounds without
977 * checking for it. They seem to run fine without it so just claim it's
978 * there and accept the consequences.
979 */
980 if (app_info->engine_name && strcmp(app_info->engine_name, "idTech") == 0)
981 pFeatures->depthBounds = true;
982 }
983
984 void anv_GetPhysicalDeviceFeatures2(
985 VkPhysicalDevice physicalDevice,
986 VkPhysicalDeviceFeatures2* pFeatures)
987 {
988 ANV_FROM_HANDLE(anv_physical_device, pdevice, physicalDevice);
989 anv_GetPhysicalDeviceFeatures(physicalDevice, &pFeatures->features);
990
991 vk_foreach_struct(ext, pFeatures->pNext) {
992 switch (ext->sType) {
993 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_8BIT_STORAGE_FEATURES_KHR: {
994 VkPhysicalDevice8BitStorageFeaturesKHR *features =
995 (VkPhysicalDevice8BitStorageFeaturesKHR *)ext;
996 features->storageBuffer8BitAccess = pdevice->info.gen >= 8;
997 features->uniformAndStorageBuffer8BitAccess = pdevice->info.gen >= 8;
998 features->storagePushConstant8 = pdevice->info.gen >= 8;
999 break;
1000 }
1001
1002 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES: {
1003 VkPhysicalDevice16BitStorageFeatures *features =
1004 (VkPhysicalDevice16BitStorageFeatures *)ext;
1005 features->storageBuffer16BitAccess = pdevice->info.gen >= 8;
1006 features->uniformAndStorageBuffer16BitAccess = pdevice->info.gen >= 8;
1007 features->storagePushConstant16 = pdevice->info.gen >= 8;
1008 features->storageInputOutput16 = false;
1009 break;
1010 }
1011
1012 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES_EXT: {
1013 VkPhysicalDeviceBufferDeviceAddressFeaturesEXT *features = (void *)ext;
1014 features->bufferDeviceAddress = pdevice->has_a64_buffer_access;
1015 features->bufferDeviceAddressCaptureReplay = false;
1016 features->bufferDeviceAddressMultiDevice = false;
1017 break;
1018 }
1019
1020 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COMPUTE_SHADER_DERIVATIVES_FEATURES_NV: {
1021 VkPhysicalDeviceComputeShaderDerivativesFeaturesNV *features =
1022 (VkPhysicalDeviceComputeShaderDerivativesFeaturesNV *)ext;
1023 features->computeDerivativeGroupQuads = true;
1024 features->computeDerivativeGroupLinear = true;
1025 break;
1026 }
1027
1028 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONDITIONAL_RENDERING_FEATURES_EXT: {
1029 VkPhysicalDeviceConditionalRenderingFeaturesEXT *features =
1030 (VkPhysicalDeviceConditionalRenderingFeaturesEXT*)ext;
1031 features->conditionalRendering = pdevice->info.gen >= 8 ||
1032 pdevice->info.is_haswell;
1033 features->inheritedConditionalRendering = pdevice->info.gen >= 8 ||
1034 pdevice->info.is_haswell;
1035 break;
1036 }
1037
1038 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_CLIP_ENABLE_FEATURES_EXT: {
1039 VkPhysicalDeviceDepthClipEnableFeaturesEXT *features =
1040 (VkPhysicalDeviceDepthClipEnableFeaturesEXT *)ext;
1041 features->depthClipEnable = true;
1042 break;
1043 }
1044
1045 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FLOAT16_INT8_FEATURES_KHR: {
1046 VkPhysicalDeviceFloat16Int8FeaturesKHR *features = (void *)ext;
1047 features->shaderFloat16 = pdevice->info.gen >= 8;
1048 features->shaderInt8 = pdevice->info.gen >= 8;
1049 break;
1050 }
1051
1052 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADER_INTERLOCK_FEATURES_EXT: {
1053 VkPhysicalDeviceFragmentShaderInterlockFeaturesEXT *features =
1054 (VkPhysicalDeviceFragmentShaderInterlockFeaturesEXT *)ext;
1055 features->fragmentShaderSampleInterlock = pdevice->info.gen >= 9;
1056 features->fragmentShaderPixelInterlock = pdevice->info.gen >= 9;
1057 features->fragmentShaderShadingRateInterlock = false;
1058 break;
1059 }
1060
1061 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_HOST_QUERY_RESET_FEATURES_EXT: {
1062 VkPhysicalDeviceHostQueryResetFeaturesEXT *features =
1063 (VkPhysicalDeviceHostQueryResetFeaturesEXT *)ext;
1064 features->hostQueryReset = true;
1065 break;
1066 }
1067
1068 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES_EXT: {
1069 VkPhysicalDeviceDescriptorIndexingFeaturesEXT *features =
1070 (VkPhysicalDeviceDescriptorIndexingFeaturesEXT *)ext;
1071 features->shaderInputAttachmentArrayDynamicIndexing = false;
1072 features->shaderUniformTexelBufferArrayDynamicIndexing = true;
1073 features->shaderStorageTexelBufferArrayDynamicIndexing = true;
1074 features->shaderUniformBufferArrayNonUniformIndexing = false;
1075 features->shaderSampledImageArrayNonUniformIndexing = true;
1076 features->shaderStorageBufferArrayNonUniformIndexing = true;
1077 features->shaderStorageImageArrayNonUniformIndexing = true;
1078 features->shaderInputAttachmentArrayNonUniformIndexing = false;
1079 features->shaderUniformTexelBufferArrayNonUniformIndexing = true;
1080 features->shaderStorageTexelBufferArrayNonUniformIndexing = true;
1081 features->descriptorBindingUniformBufferUpdateAfterBind = false;
1082 features->descriptorBindingSampledImageUpdateAfterBind = true;
1083 features->descriptorBindingStorageImageUpdateAfterBind = true;
1084 features->descriptorBindingStorageBufferUpdateAfterBind = true;
1085 features->descriptorBindingUniformTexelBufferUpdateAfterBind = true;
1086 features->descriptorBindingStorageTexelBufferUpdateAfterBind = true;
1087 features->descriptorBindingUpdateUnusedWhilePending = true;
1088 features->descriptorBindingPartiallyBound = true;
1089 features->descriptorBindingVariableDescriptorCount = false;
1090 features->runtimeDescriptorArray = true;
1091 break;
1092 }
1093
1094 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_FEATURES_EXT: {
1095 VkPhysicalDeviceInlineUniformBlockFeaturesEXT *features =
1096 (VkPhysicalDeviceInlineUniformBlockFeaturesEXT *)ext;
1097 features->inlineUniformBlock = true;
1098 features->descriptorBindingInlineUniformBlockUpdateAfterBind = true;
1099 break;
1100 }
1101
1102 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES: {
1103 VkPhysicalDeviceMultiviewFeatures *features =
1104 (VkPhysicalDeviceMultiviewFeatures *)ext;
1105 features->multiview = true;
1106 features->multiviewGeometryShader = true;
1107 features->multiviewTessellationShader = true;
1108 break;
1109 }
1110
1111 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGELESS_FRAMEBUFFER_FEATURES_KHR: {
1112 VkPhysicalDeviceImagelessFramebufferFeaturesKHR *features =
1113 (VkPhysicalDeviceImagelessFramebufferFeaturesKHR *)ext;
1114 features->imagelessFramebuffer = true;
1115 break;
1116 }
1117
1118 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES: {
1119 VkPhysicalDeviceProtectedMemoryFeatures *features = (void *)ext;
1120 features->protectedMemory = false;
1121 break;
1122 }
1123
1124 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES: {
1125 VkPhysicalDeviceSamplerYcbcrConversionFeatures *features =
1126 (VkPhysicalDeviceSamplerYcbcrConversionFeatures *) ext;
1127 features->samplerYcbcrConversion = true;
1128 break;
1129 }
1130
1131 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SCALAR_BLOCK_LAYOUT_FEATURES_EXT: {
1132 VkPhysicalDeviceScalarBlockLayoutFeaturesEXT *features =
1133 (VkPhysicalDeviceScalarBlockLayoutFeaturesEXT *)ext;
1134 features->scalarBlockLayout = true;
1135 break;
1136 }
1137
1138 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_INT64_FEATURES_KHR: {
1139 VkPhysicalDeviceShaderAtomicInt64FeaturesKHR *features = (void *)ext;
1140 features->shaderBufferInt64Atomics =
1141 pdevice->info.gen >= 9 && pdevice->use_softpin;
1142 features->shaderSharedInt64Atomics = VK_FALSE;
1143 break;
1144 }
1145
1146 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DEMOTE_TO_HELPER_INVOCATION_FEATURES_EXT: {
1147 VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT *features = (void *)ext;
1148 features->shaderDemoteToHelperInvocation = true;
1149 break;
1150 }
1151
1152 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETERS_FEATURES: {
1153 VkPhysicalDeviceShaderDrawParametersFeatures *features = (void *)ext;
1154 features->shaderDrawParameters = true;
1155 break;
1156 }
1157
1158 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_FEATURES_EXT: {
1159 VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT *features =
1160 (VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT *)ext;
1161 features->texelBufferAlignment = true;
1162 break;
1163 }
1164
1165 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTERS_FEATURES: {
1166 VkPhysicalDeviceVariablePointersFeatures *features = (void *)ext;
1167 features->variablePointersStorageBuffer = true;
1168 features->variablePointers = true;
1169 break;
1170 }
1171
1172 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_FEATURES_EXT: {
1173 VkPhysicalDeviceTransformFeedbackFeaturesEXT *features =
1174 (VkPhysicalDeviceTransformFeedbackFeaturesEXT *)ext;
1175 features->transformFeedback = true;
1176 features->geometryStreams = true;
1177 break;
1178 }
1179
1180 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_UNIFORM_BUFFER_STANDARD_LAYOUT_FEATURES_KHR: {
1181 VkPhysicalDeviceUniformBufferStandardLayoutFeaturesKHR *features =
1182 (VkPhysicalDeviceUniformBufferStandardLayoutFeaturesKHR *)ext;
1183 features->uniformBufferStandardLayout = true;
1184 break;
1185 }
1186
1187 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_FEATURES_EXT: {
1188 VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT *features =
1189 (VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT *)ext;
1190 features->vertexAttributeInstanceRateDivisor = true;
1191 features->vertexAttributeInstanceRateZeroDivisor = true;
1192 break;
1193 }
1194
1195 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_YCBCR_IMAGE_ARRAYS_FEATURES_EXT: {
1196 VkPhysicalDeviceYcbcrImageArraysFeaturesEXT *features =
1197 (VkPhysicalDeviceYcbcrImageArraysFeaturesEXT *)ext;
1198 features->ycbcrImageArrays = true;
1199 break;
1200 }
1201
1202 default:
1203 anv_debug_ignored_stype(ext->sType);
1204 break;
1205 }
1206 }
1207 }
1208
1209 #define MAX_PER_STAGE_DESCRIPTOR_UNIFORM_BUFFERS 64
1210
1211 #define MAX_PER_STAGE_DESCRIPTOR_INPUT_ATTACHMENTS 64
1212 #define MAX_DESCRIPTOR_SET_INPUT_ATTACHMENTS 256
1213
1214 void anv_GetPhysicalDeviceProperties(
1215 VkPhysicalDevice physicalDevice,
1216 VkPhysicalDeviceProperties* pProperties)
1217 {
1218 ANV_FROM_HANDLE(anv_physical_device, pdevice, physicalDevice);
1219 const struct gen_device_info *devinfo = &pdevice->info;
1220
1221 /* See assertions made when programming the buffer surface state. */
1222 const uint32_t max_raw_buffer_sz = devinfo->gen >= 7 ?
1223 (1ul << 30) : (1ul << 27);
1224
1225 const uint32_t max_ssbos = pdevice->has_a64_buffer_access ? UINT16_MAX : 64;
1226 const uint32_t max_textures =
1227 pdevice->has_bindless_images ? UINT16_MAX : 128;
1228 const uint32_t max_samplers =
1229 pdevice->has_bindless_samplers ? UINT16_MAX :
1230 (devinfo->gen >= 8 || devinfo->is_haswell) ? 128 : 16;
1231 const uint32_t max_images =
1232 pdevice->has_bindless_images ? UINT16_MAX : MAX_IMAGES;
1233
1234 /* The moment we have anything bindless, claim a high per-stage limit */
1235 const uint32_t max_per_stage =
1236 pdevice->has_a64_buffer_access ? UINT32_MAX :
1237 MAX_BINDING_TABLE_SIZE - MAX_RTS;
1238
1239 VkSampleCountFlags sample_counts =
1240 isl_device_get_sample_counts(&pdevice->isl_dev);
1241
1242
1243 VkPhysicalDeviceLimits limits = {
1244 .maxImageDimension1D = (1 << 14),
1245 .maxImageDimension2D = (1 << 14),
1246 .maxImageDimension3D = (1 << 11),
1247 .maxImageDimensionCube = (1 << 14),
1248 .maxImageArrayLayers = (1 << 11),
1249 .maxTexelBufferElements = 128 * 1024 * 1024,
1250 .maxUniformBufferRange = (1ul << 27),
1251 .maxStorageBufferRange = max_raw_buffer_sz,
1252 .maxPushConstantsSize = MAX_PUSH_CONSTANTS_SIZE,
1253 .maxMemoryAllocationCount = UINT32_MAX,
1254 .maxSamplerAllocationCount = 64 * 1024,
1255 .bufferImageGranularity = 64, /* A cache line */
1256 .sparseAddressSpaceSize = 0,
1257 .maxBoundDescriptorSets = MAX_SETS,
1258 .maxPerStageDescriptorSamplers = max_samplers,
1259 .maxPerStageDescriptorUniformBuffers = MAX_PER_STAGE_DESCRIPTOR_UNIFORM_BUFFERS,
1260 .maxPerStageDescriptorStorageBuffers = max_ssbos,
1261 .maxPerStageDescriptorSampledImages = max_textures,
1262 .maxPerStageDescriptorStorageImages = max_images,
1263 .maxPerStageDescriptorInputAttachments = MAX_PER_STAGE_DESCRIPTOR_INPUT_ATTACHMENTS,
1264 .maxPerStageResources = max_per_stage,
1265 .maxDescriptorSetSamplers = 6 * max_samplers, /* number of stages * maxPerStageDescriptorSamplers */
1266 .maxDescriptorSetUniformBuffers = 6 * MAX_PER_STAGE_DESCRIPTOR_UNIFORM_BUFFERS, /* number of stages * maxPerStageDescriptorUniformBuffers */
1267 .maxDescriptorSetUniformBuffersDynamic = MAX_DYNAMIC_BUFFERS / 2,
1268 .maxDescriptorSetStorageBuffers = 6 * max_ssbos, /* number of stages * maxPerStageDescriptorStorageBuffers */
1269 .maxDescriptorSetStorageBuffersDynamic = MAX_DYNAMIC_BUFFERS / 2,
1270 .maxDescriptorSetSampledImages = 6 * max_textures, /* number of stages * maxPerStageDescriptorSampledImages */
1271 .maxDescriptorSetStorageImages = 6 * max_images, /* number of stages * maxPerStageDescriptorStorageImages */
1272 .maxDescriptorSetInputAttachments = MAX_DESCRIPTOR_SET_INPUT_ATTACHMENTS,
1273 .maxVertexInputAttributes = MAX_VBS,
1274 .maxVertexInputBindings = MAX_VBS,
1275 .maxVertexInputAttributeOffset = 2047,
1276 .maxVertexInputBindingStride = 2048,
1277 .maxVertexOutputComponents = 128,
1278 .maxTessellationGenerationLevel = 64,
1279 .maxTessellationPatchSize = 32,
1280 .maxTessellationControlPerVertexInputComponents = 128,
1281 .maxTessellationControlPerVertexOutputComponents = 128,
1282 .maxTessellationControlPerPatchOutputComponents = 128,
1283 .maxTessellationControlTotalOutputComponents = 2048,
1284 .maxTessellationEvaluationInputComponents = 128,
1285 .maxTessellationEvaluationOutputComponents = 128,
1286 .maxGeometryShaderInvocations = 32,
1287 .maxGeometryInputComponents = 64,
1288 .maxGeometryOutputComponents = 128,
1289 .maxGeometryOutputVertices = 256,
1290 .maxGeometryTotalOutputComponents = 1024,
1291 .maxFragmentInputComponents = 116, /* 128 components - (PSIZ, CLIP_DIST0, CLIP_DIST1) */
1292 .maxFragmentOutputAttachments = 8,
1293 .maxFragmentDualSrcAttachments = 1,
1294 .maxFragmentCombinedOutputResources = 8,
1295 .maxComputeSharedMemorySize = 64 * 1024,
1296 .maxComputeWorkGroupCount = { 65535, 65535, 65535 },
1297 .maxComputeWorkGroupInvocations = 32 * devinfo->max_cs_threads,
1298 .maxComputeWorkGroupSize = {
1299 16 * devinfo->max_cs_threads,
1300 16 * devinfo->max_cs_threads,
1301 16 * devinfo->max_cs_threads,
1302 },
1303 .subPixelPrecisionBits = 8,
1304 .subTexelPrecisionBits = 8,
1305 .mipmapPrecisionBits = 8,
1306 .maxDrawIndexedIndexValue = UINT32_MAX,
1307 .maxDrawIndirectCount = UINT32_MAX,
1308 .maxSamplerLodBias = 16,
1309 .maxSamplerAnisotropy = 16,
1310 .maxViewports = MAX_VIEWPORTS,
1311 .maxViewportDimensions = { (1 << 14), (1 << 14) },
1312 .viewportBoundsRange = { INT16_MIN, INT16_MAX },
1313 .viewportSubPixelBits = 13, /* We take a float? */
1314 .minMemoryMapAlignment = 4096, /* A page */
1315 /* The dataport requires texel alignment so we need to assume a worst
1316 * case of R32G32B32A32 which is 16 bytes.
1317 */
1318 .minTexelBufferOffsetAlignment = 16,
1319 /* We need 16 for UBO block reads to work and 32 for push UBOs */
1320 .minUniformBufferOffsetAlignment = 32,
1321 .minStorageBufferOffsetAlignment = 4,
1322 .minTexelOffset = -8,
1323 .maxTexelOffset = 7,
1324 .minTexelGatherOffset = -32,
1325 .maxTexelGatherOffset = 31,
1326 .minInterpolationOffset = -0.5,
1327 .maxInterpolationOffset = 0.4375,
1328 .subPixelInterpolationOffsetBits = 4,
1329 .maxFramebufferWidth = (1 << 14),
1330 .maxFramebufferHeight = (1 << 14),
1331 .maxFramebufferLayers = (1 << 11),
1332 .framebufferColorSampleCounts = sample_counts,
1333 .framebufferDepthSampleCounts = sample_counts,
1334 .framebufferStencilSampleCounts = sample_counts,
1335 .framebufferNoAttachmentsSampleCounts = sample_counts,
1336 .maxColorAttachments = MAX_RTS,
1337 .sampledImageColorSampleCounts = sample_counts,
1338 .sampledImageIntegerSampleCounts = VK_SAMPLE_COUNT_1_BIT,
1339 .sampledImageDepthSampleCounts = sample_counts,
1340 .sampledImageStencilSampleCounts = sample_counts,
1341 .storageImageSampleCounts = VK_SAMPLE_COUNT_1_BIT,
1342 .maxSampleMaskWords = 1,
1343 .timestampComputeAndGraphics = true,
1344 .timestampPeriod = 1000000000.0 / devinfo->timestamp_frequency,
1345 .maxClipDistances = 8,
1346 .maxCullDistances = 8,
1347 .maxCombinedClipAndCullDistances = 8,
1348 .discreteQueuePriorities = 2,
1349 .pointSizeRange = { 0.125, 255.875 },
1350 .lineWidthRange = { 0.0, 7.9921875 },
1351 .pointSizeGranularity = (1.0 / 8.0),
1352 .lineWidthGranularity = (1.0 / 128.0),
1353 .strictLines = false, /* FINISHME */
1354 .standardSampleLocations = true,
1355 .optimalBufferCopyOffsetAlignment = 128,
1356 .optimalBufferCopyRowPitchAlignment = 128,
1357 .nonCoherentAtomSize = 64,
1358 };
1359
1360 *pProperties = (VkPhysicalDeviceProperties) {
1361 .apiVersion = anv_physical_device_api_version(pdevice),
1362 .driverVersion = vk_get_driver_version(),
1363 .vendorID = 0x8086,
1364 .deviceID = pdevice->chipset_id,
1365 .deviceType = VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU,
1366 .limits = limits,
1367 .sparseProperties = {0}, /* Broadwell doesn't do sparse. */
1368 };
1369
1370 snprintf(pProperties->deviceName, sizeof(pProperties->deviceName),
1371 "%s", pdevice->name);
1372 memcpy(pProperties->pipelineCacheUUID,
1373 pdevice->pipeline_cache_uuid, VK_UUID_SIZE);
1374 }
1375
1376 void anv_GetPhysicalDeviceProperties2(
1377 VkPhysicalDevice physicalDevice,
1378 VkPhysicalDeviceProperties2* pProperties)
1379 {
1380 ANV_FROM_HANDLE(anv_physical_device, pdevice, physicalDevice);
1381
1382 anv_GetPhysicalDeviceProperties(physicalDevice, &pProperties->properties);
1383
1384 vk_foreach_struct(ext, pProperties->pNext) {
1385 switch (ext->sType) {
1386 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_STENCIL_RESOLVE_PROPERTIES_KHR: {
1387 VkPhysicalDeviceDepthStencilResolvePropertiesKHR *props =
1388 (VkPhysicalDeviceDepthStencilResolvePropertiesKHR *)ext;
1389
1390 /* We support all of the depth resolve modes */
1391 props->supportedDepthResolveModes =
1392 VK_RESOLVE_MODE_SAMPLE_ZERO_BIT_KHR |
1393 VK_RESOLVE_MODE_AVERAGE_BIT_KHR |
1394 VK_RESOLVE_MODE_MIN_BIT_KHR |
1395 VK_RESOLVE_MODE_MAX_BIT_KHR;
1396
1397 /* Average doesn't make sense for stencil so we don't support that */
1398 props->supportedStencilResolveModes =
1399 VK_RESOLVE_MODE_SAMPLE_ZERO_BIT_KHR;
1400 if (pdevice->info.gen >= 8) {
1401 /* The advanced stencil resolve modes currently require stencil
1402 * sampling be supported by the hardware.
1403 */
1404 props->supportedStencilResolveModes |=
1405 VK_RESOLVE_MODE_MIN_BIT_KHR |
1406 VK_RESOLVE_MODE_MAX_BIT_KHR;
1407 }
1408
1409 props->independentResolveNone = true;
1410 props->independentResolve = true;
1411 break;
1412 }
1413
1414 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_PROPERTIES_EXT: {
1415 VkPhysicalDeviceDescriptorIndexingPropertiesEXT *props =
1416 (VkPhysicalDeviceDescriptorIndexingPropertiesEXT *)ext;
1417
1418 /* It's a bit hard to exactly map our implementation to the limits
1419 * described here. The bindless surface handle in the extended
1420 * message descriptors is 20 bits and it's an index into the table of
1421 * RENDER_SURFACE_STATE structs that starts at bindless surface base
1422 * address. Given that most things consume two surface states per
1423 * view (general/sampled for textures and write-only/read-write for
1424 * images), we claim 2^19 things.
1425 *
1426 * For SSBOs, we just use A64 messages so there is no real limit
1427 * there beyond the limit on the total size of a descriptor set.
1428 */
1429 const unsigned max_bindless_views = 1 << 19;
1430
1431 props->maxUpdateAfterBindDescriptorsInAllPools = max_bindless_views;
1432 props->shaderUniformBufferArrayNonUniformIndexingNative = false;
1433 props->shaderSampledImageArrayNonUniformIndexingNative = false;
1434 props->shaderStorageBufferArrayNonUniformIndexingNative = true;
1435 props->shaderStorageImageArrayNonUniformIndexingNative = false;
1436 props->shaderInputAttachmentArrayNonUniformIndexingNative = false;
1437 props->robustBufferAccessUpdateAfterBind = true;
1438 props->quadDivergentImplicitLod = false;
1439 props->maxPerStageDescriptorUpdateAfterBindSamplers = max_bindless_views;
1440 props->maxPerStageDescriptorUpdateAfterBindUniformBuffers = MAX_PER_STAGE_DESCRIPTOR_UNIFORM_BUFFERS;
1441 props->maxPerStageDescriptorUpdateAfterBindStorageBuffers = UINT32_MAX;
1442 props->maxPerStageDescriptorUpdateAfterBindSampledImages = max_bindless_views;
1443 props->maxPerStageDescriptorUpdateAfterBindStorageImages = max_bindless_views;
1444 props->maxPerStageDescriptorUpdateAfterBindInputAttachments = MAX_PER_STAGE_DESCRIPTOR_INPUT_ATTACHMENTS;
1445 props->maxPerStageUpdateAfterBindResources = UINT32_MAX;
1446 props->maxDescriptorSetUpdateAfterBindSamplers = max_bindless_views;
1447 props->maxDescriptorSetUpdateAfterBindUniformBuffers = 6 * MAX_PER_STAGE_DESCRIPTOR_UNIFORM_BUFFERS;
1448 props->maxDescriptorSetUpdateAfterBindUniformBuffersDynamic = MAX_DYNAMIC_BUFFERS / 2;
1449 props->maxDescriptorSetUpdateAfterBindStorageBuffers = UINT32_MAX;
1450 props->maxDescriptorSetUpdateAfterBindStorageBuffersDynamic = MAX_DYNAMIC_BUFFERS / 2;
1451 props->maxDescriptorSetUpdateAfterBindSampledImages = max_bindless_views;
1452 props->maxDescriptorSetUpdateAfterBindStorageImages = max_bindless_views;
1453 props->maxDescriptorSetUpdateAfterBindInputAttachments = MAX_DESCRIPTOR_SET_INPUT_ATTACHMENTS;
1454 break;
1455 }
1456
1457 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DRIVER_PROPERTIES_KHR: {
1458 VkPhysicalDeviceDriverPropertiesKHR *driver_props =
1459 (VkPhysicalDeviceDriverPropertiesKHR *) ext;
1460
1461 driver_props->driverID = VK_DRIVER_ID_INTEL_OPEN_SOURCE_MESA_KHR;
1462 snprintf(driver_props->driverName, VK_MAX_DRIVER_NAME_SIZE_KHR,
1463 "Intel open-source Mesa driver");
1464
1465 snprintf(driver_props->driverInfo, VK_MAX_DRIVER_INFO_SIZE_KHR,
1466 "Mesa " PACKAGE_VERSION MESA_GIT_SHA1);
1467
1468 driver_props->conformanceVersion = (VkConformanceVersionKHR) {
1469 .major = 1,
1470 .minor = 1,
1471 .subminor = 2,
1472 .patch = 0,
1473 };
1474 break;
1475 }
1476
1477 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_MEMORY_HOST_PROPERTIES_EXT: {
1478 VkPhysicalDeviceExternalMemoryHostPropertiesEXT *props =
1479 (VkPhysicalDeviceExternalMemoryHostPropertiesEXT *) ext;
1480 /* Userptr needs page aligned memory. */
1481 props->minImportedHostPointerAlignment = 4096;
1482 break;
1483 }
1484
1485 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES: {
1486 VkPhysicalDeviceIDProperties *id_props =
1487 (VkPhysicalDeviceIDProperties *)ext;
1488 memcpy(id_props->deviceUUID, pdevice->device_uuid, VK_UUID_SIZE);
1489 memcpy(id_props->driverUUID, pdevice->driver_uuid, VK_UUID_SIZE);
1490 /* The LUID is for Windows. */
1491 id_props->deviceLUIDValid = false;
1492 break;
1493 }
1494
1495 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_PROPERTIES_EXT: {
1496 VkPhysicalDeviceInlineUniformBlockPropertiesEXT *props =
1497 (VkPhysicalDeviceInlineUniformBlockPropertiesEXT *)ext;
1498 props->maxInlineUniformBlockSize = MAX_INLINE_UNIFORM_BLOCK_SIZE;
1499 props->maxPerStageDescriptorInlineUniformBlocks =
1500 MAX_INLINE_UNIFORM_BLOCK_DESCRIPTORS;
1501 props->maxPerStageDescriptorUpdateAfterBindInlineUniformBlocks =
1502 MAX_INLINE_UNIFORM_BLOCK_DESCRIPTORS;
1503 props->maxDescriptorSetInlineUniformBlocks =
1504 MAX_INLINE_UNIFORM_BLOCK_DESCRIPTORS;
1505 props->maxDescriptorSetUpdateAfterBindInlineUniformBlocks =
1506 MAX_INLINE_UNIFORM_BLOCK_DESCRIPTORS;
1507 break;
1508 }
1509
1510 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES: {
1511 VkPhysicalDeviceMaintenance3Properties *props =
1512 (VkPhysicalDeviceMaintenance3Properties *)ext;
1513 /* This value doesn't matter for us today as our per-stage
1514 * descriptors are the real limit.
1515 */
1516 props->maxPerSetDescriptors = 1024;
1517 props->maxMemoryAllocationSize = MAX_MEMORY_ALLOCATION_SIZE;
1518 break;
1519 }
1520
1521 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES: {
1522 VkPhysicalDeviceMultiviewProperties *properties =
1523 (VkPhysicalDeviceMultiviewProperties *)ext;
1524 properties->maxMultiviewViewCount = 16;
1525 properties->maxMultiviewInstanceIndex = UINT32_MAX / 16;
1526 break;
1527 }
1528
1529 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PCI_BUS_INFO_PROPERTIES_EXT: {
1530 VkPhysicalDevicePCIBusInfoPropertiesEXT *properties =
1531 (VkPhysicalDevicePCIBusInfoPropertiesEXT *)ext;
1532 properties->pciDomain = pdevice->pci_info.domain;
1533 properties->pciBus = pdevice->pci_info.bus;
1534 properties->pciDevice = pdevice->pci_info.device;
1535 properties->pciFunction = pdevice->pci_info.function;
1536 break;
1537 }
1538
1539 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES: {
1540 VkPhysicalDevicePointClippingProperties *properties =
1541 (VkPhysicalDevicePointClippingProperties *) ext;
1542 properties->pointClippingBehavior = VK_POINT_CLIPPING_BEHAVIOR_USER_CLIP_PLANES_ONLY;
1543 break;
1544 }
1545
1546 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_PROPERTIES: {
1547 VkPhysicalDeviceProtectedMemoryProperties *props =
1548 (VkPhysicalDeviceProtectedMemoryProperties *)ext;
1549 props->protectedNoFault = false;
1550 break;
1551 }
1552
1553 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR: {
1554 VkPhysicalDevicePushDescriptorPropertiesKHR *properties =
1555 (VkPhysicalDevicePushDescriptorPropertiesKHR *) ext;
1556
1557 properties->maxPushDescriptors = MAX_PUSH_DESCRIPTORS;
1558 break;
1559 }
1560
1561 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_FILTER_MINMAX_PROPERTIES_EXT: {
1562 VkPhysicalDeviceSamplerFilterMinmaxPropertiesEXT *properties =
1563 (VkPhysicalDeviceSamplerFilterMinmaxPropertiesEXT *)ext;
1564 properties->filterMinmaxImageComponentMapping = pdevice->info.gen >= 9;
1565 properties->filterMinmaxSingleComponentFormats = true;
1566 break;
1567 }
1568
1569 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_PROPERTIES: {
1570 VkPhysicalDeviceSubgroupProperties *properties = (void *)ext;
1571
1572 properties->subgroupSize = BRW_SUBGROUP_SIZE;
1573
1574 VkShaderStageFlags scalar_stages = 0;
1575 for (unsigned stage = 0; stage < MESA_SHADER_STAGES; stage++) {
1576 if (pdevice->compiler->scalar_stage[stage])
1577 scalar_stages |= mesa_to_vk_shader_stage(stage);
1578 }
1579 properties->supportedStages = scalar_stages;
1580
1581 properties->supportedOperations = VK_SUBGROUP_FEATURE_BASIC_BIT |
1582 VK_SUBGROUP_FEATURE_VOTE_BIT |
1583 VK_SUBGROUP_FEATURE_ARITHMETIC_BIT |
1584 VK_SUBGROUP_FEATURE_BALLOT_BIT |
1585 VK_SUBGROUP_FEATURE_SHUFFLE_BIT |
1586 VK_SUBGROUP_FEATURE_SHUFFLE_RELATIVE_BIT |
1587 VK_SUBGROUP_FEATURE_CLUSTERED_BIT |
1588 VK_SUBGROUP_FEATURE_QUAD_BIT;
1589 properties->quadOperationsInAllStages = true;
1590 break;
1591 }
1592
1593 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES_EXT: {
1594 VkPhysicalDeviceSubgroupSizeControlPropertiesEXT *props =
1595 (VkPhysicalDeviceSubgroupSizeControlPropertiesEXT *)ext;
1596 STATIC_ASSERT(8 <= BRW_SUBGROUP_SIZE && BRW_SUBGROUP_SIZE <= 32);
1597 props->minSubgroupSize = 8;
1598 props->maxSubgroupSize = 32;
1599 props->maxComputeWorkgroupSubgroups = pdevice->info.max_cs_threads;
1600 props->requiredSubgroupSizeStages = VK_SHADER_STAGE_COMPUTE_BIT;
1601 break;
1602 }
1603
1604 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_PROPERTIES_EXT: {
1605 VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT *props =
1606 (VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT *)ext;
1607
1608 /* From the SKL PRM Vol. 2d, docs for RENDER_SURFACE_STATE::Surface
1609 * Base Address:
1610 *
1611 * "For SURFTYPE_BUFFER non-rendertarget surfaces, this field
1612 * specifies the base address of the first element of the surface,
1613 * computed in software by adding the surface base address to the
1614 * byte offset of the element in the buffer. The base address must
1615 * be aligned to element size."
1616 *
1617 * The typed dataport messages require that things be texel aligned.
1618 * Otherwise, we may just load/store the wrong data or, in the worst
1619 * case, there may be hangs.
1620 */
1621 props->storageTexelBufferOffsetAlignmentBytes = 16;
1622 props->storageTexelBufferOffsetSingleTexelAlignment = true;
1623
1624 /* The sampler, however, is much more forgiving and it can handle
1625 * arbitrary byte alignment for linear and buffer surfaces. It's
1626 * hard to find a good PRM citation for this but years of empirical
1627 * experience demonstrate that this is true.
1628 */
1629 props->uniformTexelBufferOffsetAlignmentBytes = 1;
1630 props->uniformTexelBufferOffsetSingleTexelAlignment = false;
1631 break;
1632 }
1633
1634 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_PROPERTIES_EXT: {
1635 VkPhysicalDeviceTransformFeedbackPropertiesEXT *props =
1636 (VkPhysicalDeviceTransformFeedbackPropertiesEXT *)ext;
1637
1638 props->maxTransformFeedbackStreams = MAX_XFB_STREAMS;
1639 props->maxTransformFeedbackBuffers = MAX_XFB_BUFFERS;
1640 props->maxTransformFeedbackBufferSize = (1ull << 32);
1641 props->maxTransformFeedbackStreamDataSize = 128 * 4;
1642 props->maxTransformFeedbackBufferDataSize = 128 * 4;
1643 props->maxTransformFeedbackBufferDataStride = 2048;
1644 props->transformFeedbackQueries = true;
1645 props->transformFeedbackStreamsLinesTriangles = false;
1646 props->transformFeedbackRasterizationStreamSelect = false;
1647 props->transformFeedbackDraw = true;
1648 break;
1649 }
1650
1651 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_PROPERTIES_EXT: {
1652 VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT *props =
1653 (VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT *)ext;
1654 /* We have to restrict this a bit for multiview */
1655 props->maxVertexAttribDivisor = UINT32_MAX / 16;
1656 break;
1657 }
1658
1659 default:
1660 anv_debug_ignored_stype(ext->sType);
1661 break;
1662 }
1663 }
1664 }
1665
1666 /* We support exactly one queue family. */
1667 static const VkQueueFamilyProperties
1668 anv_queue_family_properties = {
1669 .queueFlags = VK_QUEUE_GRAPHICS_BIT |
1670 VK_QUEUE_COMPUTE_BIT |
1671 VK_QUEUE_TRANSFER_BIT,
1672 .queueCount = 1,
1673 .timestampValidBits = 36, /* XXX: Real value here */
1674 .minImageTransferGranularity = { 1, 1, 1 },
1675 };
1676
1677 void anv_GetPhysicalDeviceQueueFamilyProperties(
1678 VkPhysicalDevice physicalDevice,
1679 uint32_t* pCount,
1680 VkQueueFamilyProperties* pQueueFamilyProperties)
1681 {
1682 VK_OUTARRAY_MAKE(out, pQueueFamilyProperties, pCount);
1683
1684 vk_outarray_append(&out, p) {
1685 *p = anv_queue_family_properties;
1686 }
1687 }
1688
1689 void anv_GetPhysicalDeviceQueueFamilyProperties2(
1690 VkPhysicalDevice physicalDevice,
1691 uint32_t* pQueueFamilyPropertyCount,
1692 VkQueueFamilyProperties2* pQueueFamilyProperties)
1693 {
1694
1695 VK_OUTARRAY_MAKE(out, pQueueFamilyProperties, pQueueFamilyPropertyCount);
1696
1697 vk_outarray_append(&out, p) {
1698 p->queueFamilyProperties = anv_queue_family_properties;
1699
1700 vk_foreach_struct(s, p->pNext) {
1701 anv_debug_ignored_stype(s->sType);
1702 }
1703 }
1704 }
1705
1706 void anv_GetPhysicalDeviceMemoryProperties(
1707 VkPhysicalDevice physicalDevice,
1708 VkPhysicalDeviceMemoryProperties* pMemoryProperties)
1709 {
1710 ANV_FROM_HANDLE(anv_physical_device, physical_device, physicalDevice);
1711
1712 pMemoryProperties->memoryTypeCount = physical_device->memory.type_count;
1713 for (uint32_t i = 0; i < physical_device->memory.type_count; i++) {
1714 pMemoryProperties->memoryTypes[i] = (VkMemoryType) {
1715 .propertyFlags = physical_device->memory.types[i].propertyFlags,
1716 .heapIndex = physical_device->memory.types[i].heapIndex,
1717 };
1718 }
1719
1720 pMemoryProperties->memoryHeapCount = physical_device->memory.heap_count;
1721 for (uint32_t i = 0; i < physical_device->memory.heap_count; i++) {
1722 pMemoryProperties->memoryHeaps[i] = (VkMemoryHeap) {
1723 .size = physical_device->memory.heaps[i].size,
1724 .flags = physical_device->memory.heaps[i].flags,
1725 };
1726 }
1727 }
1728
1729 static void
1730 anv_get_memory_budget(VkPhysicalDevice physicalDevice,
1731 VkPhysicalDeviceMemoryBudgetPropertiesEXT *memoryBudget)
1732 {
1733 ANV_FROM_HANDLE(anv_physical_device, device, physicalDevice);
1734 uint64_t sys_available = get_available_system_memory();
1735 assert(sys_available > 0);
1736
1737 VkDeviceSize total_heaps_size = 0;
1738 for (size_t i = 0; i < device->memory.heap_count; i++)
1739 total_heaps_size += device->memory.heaps[i].size;
1740
1741 for (size_t i = 0; i < device->memory.heap_count; i++) {
1742 VkDeviceSize heap_size = device->memory.heaps[i].size;
1743 VkDeviceSize heap_used = device->memory.heaps[i].used;
1744 VkDeviceSize heap_budget;
1745
1746 double heap_proportion = (double) heap_size / total_heaps_size;
1747 VkDeviceSize sys_available_prop = sys_available * heap_proportion;
1748
1749 /*
1750 * Let's not incite the app to starve the system: report at most 90% of
1751 * available system memory.
1752 */
1753 uint64_t heap_available = sys_available_prop * 9 / 10;
1754 heap_budget = MIN2(heap_size, heap_used + heap_available);
1755
1756 /*
1757 * Round down to the nearest MB
1758 */
1759 heap_budget &= ~((1ull << 20) - 1);
1760
1761 /*
1762 * The heapBudget value must be non-zero for array elements less than
1763 * VkPhysicalDeviceMemoryProperties::memoryHeapCount. The heapBudget
1764 * value must be less than or equal to VkMemoryHeap::size for each heap.
1765 */
1766 assert(0 < heap_budget && heap_budget <= heap_size);
1767
1768 memoryBudget->heapUsage[i] = heap_used;
1769 memoryBudget->heapBudget[i] = heap_budget;
1770 }
1771
1772 /* The heapBudget and heapUsage values must be zero for array elements
1773 * greater than or equal to VkPhysicalDeviceMemoryProperties::memoryHeapCount
1774 */
1775 for (uint32_t i = device->memory.heap_count; i < VK_MAX_MEMORY_HEAPS; i++) {
1776 memoryBudget->heapBudget[i] = 0;
1777 memoryBudget->heapUsage[i] = 0;
1778 }
1779 }
1780
1781 void anv_GetPhysicalDeviceMemoryProperties2(
1782 VkPhysicalDevice physicalDevice,
1783 VkPhysicalDeviceMemoryProperties2* pMemoryProperties)
1784 {
1785 anv_GetPhysicalDeviceMemoryProperties(physicalDevice,
1786 &pMemoryProperties->memoryProperties);
1787
1788 vk_foreach_struct(ext, pMemoryProperties->pNext) {
1789 switch (ext->sType) {
1790 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_BUDGET_PROPERTIES_EXT:
1791 anv_get_memory_budget(physicalDevice, (void*)ext);
1792 break;
1793 default:
1794 anv_debug_ignored_stype(ext->sType);
1795 break;
1796 }
1797 }
1798 }
1799
1800 void
1801 anv_GetDeviceGroupPeerMemoryFeatures(
1802 VkDevice device,
1803 uint32_t heapIndex,
1804 uint32_t localDeviceIndex,
1805 uint32_t remoteDeviceIndex,
1806 VkPeerMemoryFeatureFlags* pPeerMemoryFeatures)
1807 {
1808 assert(localDeviceIndex == 0 && remoteDeviceIndex == 0);
1809 *pPeerMemoryFeatures = VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT |
1810 VK_PEER_MEMORY_FEATURE_COPY_DST_BIT |
1811 VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT |
1812 VK_PEER_MEMORY_FEATURE_GENERIC_DST_BIT;
1813 }
1814
1815 PFN_vkVoidFunction anv_GetInstanceProcAddr(
1816 VkInstance _instance,
1817 const char* pName)
1818 {
1819 ANV_FROM_HANDLE(anv_instance, instance, _instance);
1820
1821 /* The Vulkan 1.0 spec for vkGetInstanceProcAddr has a table of exactly
1822 * when we have to return valid function pointers, NULL, or it's left
1823 * undefined. See the table for exact details.
1824 */
1825 if (pName == NULL)
1826 return NULL;
1827
1828 #define LOOKUP_ANV_ENTRYPOINT(entrypoint) \
1829 if (strcmp(pName, "vk" #entrypoint) == 0) \
1830 return (PFN_vkVoidFunction)anv_##entrypoint
1831
1832 LOOKUP_ANV_ENTRYPOINT(EnumerateInstanceExtensionProperties);
1833 LOOKUP_ANV_ENTRYPOINT(EnumerateInstanceLayerProperties);
1834 LOOKUP_ANV_ENTRYPOINT(EnumerateInstanceVersion);
1835 LOOKUP_ANV_ENTRYPOINT(CreateInstance);
1836
1837 #undef LOOKUP_ANV_ENTRYPOINT
1838
1839 if (instance == NULL)
1840 return NULL;
1841
1842 int idx = anv_get_instance_entrypoint_index(pName);
1843 if (idx >= 0)
1844 return instance->dispatch.entrypoints[idx];
1845
1846 idx = anv_get_device_entrypoint_index(pName);
1847 if (idx >= 0)
1848 return instance->device_dispatch.entrypoints[idx];
1849
1850 return NULL;
1851 }
1852
1853 /* With version 1+ of the loader interface the ICD should expose
1854 * vk_icdGetInstanceProcAddr to work around certain LD_PRELOAD issues seen in apps.
1855 */
1856 PUBLIC
1857 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vk_icdGetInstanceProcAddr(
1858 VkInstance instance,
1859 const char* pName);
1860
1861 PUBLIC
1862 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vk_icdGetInstanceProcAddr(
1863 VkInstance instance,
1864 const char* pName)
1865 {
1866 return anv_GetInstanceProcAddr(instance, pName);
1867 }
1868
1869 PFN_vkVoidFunction anv_GetDeviceProcAddr(
1870 VkDevice _device,
1871 const char* pName)
1872 {
1873 ANV_FROM_HANDLE(anv_device, device, _device);
1874
1875 if (!device || !pName)
1876 return NULL;
1877
1878 int idx = anv_get_device_entrypoint_index(pName);
1879 if (idx < 0)
1880 return NULL;
1881
1882 return device->dispatch.entrypoints[idx];
1883 }
1884
1885 VkResult
1886 anv_CreateDebugReportCallbackEXT(VkInstance _instance,
1887 const VkDebugReportCallbackCreateInfoEXT* pCreateInfo,
1888 const VkAllocationCallbacks* pAllocator,
1889 VkDebugReportCallbackEXT* pCallback)
1890 {
1891 ANV_FROM_HANDLE(anv_instance, instance, _instance);
1892 return vk_create_debug_report_callback(&instance->debug_report_callbacks,
1893 pCreateInfo, pAllocator, &instance->alloc,
1894 pCallback);
1895 }
1896
1897 void
1898 anv_DestroyDebugReportCallbackEXT(VkInstance _instance,
1899 VkDebugReportCallbackEXT _callback,
1900 const VkAllocationCallbacks* pAllocator)
1901 {
1902 ANV_FROM_HANDLE(anv_instance, instance, _instance);
1903 vk_destroy_debug_report_callback(&instance->debug_report_callbacks,
1904 _callback, pAllocator, &instance->alloc);
1905 }
1906
1907 void
1908 anv_DebugReportMessageEXT(VkInstance _instance,
1909 VkDebugReportFlagsEXT flags,
1910 VkDebugReportObjectTypeEXT objectType,
1911 uint64_t object,
1912 size_t location,
1913 int32_t messageCode,
1914 const char* pLayerPrefix,
1915 const char* pMessage)
1916 {
1917 ANV_FROM_HANDLE(anv_instance, instance, _instance);
1918 vk_debug_report(&instance->debug_report_callbacks, flags, objectType,
1919 object, location, messageCode, pLayerPrefix, pMessage);
1920 }
1921
1922 static void
1923 anv_queue_init(struct anv_device *device, struct anv_queue *queue)
1924 {
1925 queue->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
1926 queue->device = device;
1927 queue->flags = 0;
1928 }
1929
1930 static void
1931 anv_queue_finish(struct anv_queue *queue)
1932 {
1933 }
1934
1935 static struct anv_state
1936 anv_state_pool_emit_data(struct anv_state_pool *pool, size_t size, size_t align, const void *p)
1937 {
1938 struct anv_state state;
1939
1940 state = anv_state_pool_alloc(pool, size, align);
1941 memcpy(state.map, p, size);
1942
1943 return state;
1944 }
1945
1946 /* Haswell border color is a bit of a disaster. Float and unorm formats use a
1947 * straightforward 32-bit float color in the first 64 bytes. Instead of using
1948 * a nice float/integer union like Gen8+, Haswell specifies the integer border
1949 * color as a separate entry /after/ the float color. The layout of this entry
1950 * also depends on the format's bpp (with extra hacks for RG32), and overlaps.
1951 *
1952 * Since we don't know the format/bpp, we can't make any of the border colors
1953 * containing '1' work for all formats, as it would be in the wrong place for
1954 * some of them. We opt to make 32-bit integers work as this seems like the
1955 * most common option. Fortunately, transparent black works regardless, as
1956 * all zeroes is the same in every bit-size.
1957 */
1958 struct hsw_border_color {
1959 float float32[4];
1960 uint32_t _pad0[12];
1961 uint32_t uint32[4];
1962 uint32_t _pad1[108];
1963 };
1964
1965 struct gen8_border_color {
1966 union {
1967 float float32[4];
1968 uint32_t uint32[4];
1969 };
1970 /* Pad out to 64 bytes */
1971 uint32_t _pad[12];
1972 };
1973
1974 static void
1975 anv_device_init_border_colors(struct anv_device *device)
1976 {
1977 if (device->info.is_haswell) {
1978 static const struct hsw_border_color border_colors[] = {
1979 [VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK] = { .float32 = { 0.0, 0.0, 0.0, 0.0 } },
1980 [VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK] = { .float32 = { 0.0, 0.0, 0.0, 1.0 } },
1981 [VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE] = { .float32 = { 1.0, 1.0, 1.0, 1.0 } },
1982 [VK_BORDER_COLOR_INT_TRANSPARENT_BLACK] = { .uint32 = { 0, 0, 0, 0 } },
1983 [VK_BORDER_COLOR_INT_OPAQUE_BLACK] = { .uint32 = { 0, 0, 0, 1 } },
1984 [VK_BORDER_COLOR_INT_OPAQUE_WHITE] = { .uint32 = { 1, 1, 1, 1 } },
1985 };
1986
1987 device->border_colors =
1988 anv_state_pool_emit_data(&device->dynamic_state_pool,
1989 sizeof(border_colors), 512, border_colors);
1990 } else {
1991 static const struct gen8_border_color border_colors[] = {
1992 [VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK] = { .float32 = { 0.0, 0.0, 0.0, 0.0 } },
1993 [VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK] = { .float32 = { 0.0, 0.0, 0.0, 1.0 } },
1994 [VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE] = { .float32 = { 1.0, 1.0, 1.0, 1.0 } },
1995 [VK_BORDER_COLOR_INT_TRANSPARENT_BLACK] = { .uint32 = { 0, 0, 0, 0 } },
1996 [VK_BORDER_COLOR_INT_OPAQUE_BLACK] = { .uint32 = { 0, 0, 0, 1 } },
1997 [VK_BORDER_COLOR_INT_OPAQUE_WHITE] = { .uint32 = { 1, 1, 1, 1 } },
1998 };
1999
2000 device->border_colors =
2001 anv_state_pool_emit_data(&device->dynamic_state_pool,
2002 sizeof(border_colors), 64, border_colors);
2003 }
2004 }
2005
2006 static void
2007 anv_device_init_trivial_batch(struct anv_device *device)
2008 {
2009 anv_bo_init_new(&device->trivial_batch_bo, device, 4096);
2010
2011 if (device->instance->physicalDevice.has_exec_async)
2012 device->trivial_batch_bo.flags |= EXEC_OBJECT_ASYNC;
2013
2014 if (device->instance->physicalDevice.use_softpin)
2015 device->trivial_batch_bo.flags |= EXEC_OBJECT_PINNED;
2016
2017 anv_vma_alloc(device, &device->trivial_batch_bo);
2018
2019 void *map = anv_gem_mmap(device, device->trivial_batch_bo.gem_handle,
2020 0, 4096, 0);
2021
2022 struct anv_batch batch = {
2023 .start = map,
2024 .next = map,
2025 .end = map + 4096,
2026 };
2027
2028 anv_batch_emit(&batch, GEN7_MI_BATCH_BUFFER_END, bbe);
2029 anv_batch_emit(&batch, GEN7_MI_NOOP, noop);
2030
2031 if (!device->info.has_llc)
2032 gen_clflush_range(map, batch.next - map);
2033
2034 anv_gem_munmap(map, device->trivial_batch_bo.size);
2035 }
2036
2037 VkResult anv_EnumerateDeviceExtensionProperties(
2038 VkPhysicalDevice physicalDevice,
2039 const char* pLayerName,
2040 uint32_t* pPropertyCount,
2041 VkExtensionProperties* pProperties)
2042 {
2043 ANV_FROM_HANDLE(anv_physical_device, device, physicalDevice);
2044 VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
2045
2046 for (int i = 0; i < ANV_DEVICE_EXTENSION_COUNT; i++) {
2047 if (device->supported_extensions.extensions[i]) {
2048 vk_outarray_append(&out, prop) {
2049 *prop = anv_device_extensions[i];
2050 }
2051 }
2052 }
2053
2054 return vk_outarray_status(&out);
2055 }
2056
2057 static void
2058 anv_device_init_dispatch(struct anv_device *device)
2059 {
2060 const struct anv_device_dispatch_table *genX_table;
2061 switch (device->info.gen) {
2062 case 11:
2063 genX_table = &gen11_device_dispatch_table;
2064 break;
2065 case 10:
2066 genX_table = &gen10_device_dispatch_table;
2067 break;
2068 case 9:
2069 genX_table = &gen9_device_dispatch_table;
2070 break;
2071 case 8:
2072 genX_table = &gen8_device_dispatch_table;
2073 break;
2074 case 7:
2075 if (device->info.is_haswell)
2076 genX_table = &gen75_device_dispatch_table;
2077 else
2078 genX_table = &gen7_device_dispatch_table;
2079 break;
2080 default:
2081 unreachable("unsupported gen\n");
2082 }
2083
2084 for (unsigned i = 0; i < ARRAY_SIZE(device->dispatch.entrypoints); i++) {
2085 /* Vulkan requires that entrypoints for extensions which have not been
2086 * enabled must not be advertised.
2087 */
2088 if (!anv_device_entrypoint_is_enabled(i, device->instance->app_info.api_version,
2089 &device->instance->enabled_extensions,
2090 &device->enabled_extensions)) {
2091 device->dispatch.entrypoints[i] = NULL;
2092 } else if (genX_table->entrypoints[i]) {
2093 device->dispatch.entrypoints[i] = genX_table->entrypoints[i];
2094 } else {
2095 device->dispatch.entrypoints[i] =
2096 anv_device_dispatch_table.entrypoints[i];
2097 }
2098 }
2099 }
2100
2101 static int
2102 vk_priority_to_gen(int priority)
2103 {
2104 switch (priority) {
2105 case VK_QUEUE_GLOBAL_PRIORITY_LOW_EXT:
2106 return GEN_CONTEXT_LOW_PRIORITY;
2107 case VK_QUEUE_GLOBAL_PRIORITY_MEDIUM_EXT:
2108 return GEN_CONTEXT_MEDIUM_PRIORITY;
2109 case VK_QUEUE_GLOBAL_PRIORITY_HIGH_EXT:
2110 return GEN_CONTEXT_HIGH_PRIORITY;
2111 case VK_QUEUE_GLOBAL_PRIORITY_REALTIME_EXT:
2112 return GEN_CONTEXT_REALTIME_PRIORITY;
2113 default:
2114 unreachable("Invalid priority");
2115 }
2116 }
2117
2118 static void
2119 anv_device_init_hiz_clear_value_bo(struct anv_device *device)
2120 {
2121 anv_bo_init_new(&device->hiz_clear_bo, device, 4096);
2122
2123 if (device->instance->physicalDevice.has_exec_async)
2124 device->hiz_clear_bo.flags |= EXEC_OBJECT_ASYNC;
2125
2126 if (device->instance->physicalDevice.use_softpin)
2127 device->hiz_clear_bo.flags |= EXEC_OBJECT_PINNED;
2128
2129 anv_vma_alloc(device, &device->hiz_clear_bo);
2130
2131 uint32_t *map = anv_gem_mmap(device, device->hiz_clear_bo.gem_handle,
2132 0, 4096, 0);
2133
2134 union isl_color_value hiz_clear = { .u32 = { 0, } };
2135 hiz_clear.f32[0] = ANV_HZ_FC_VAL;
2136
2137 memcpy(map, hiz_clear.u32, sizeof(hiz_clear.u32));
2138 anv_gem_munmap(map, device->hiz_clear_bo.size);
2139 }
2140
2141 static bool
2142 get_bo_from_pool(struct gen_batch_decode_bo *ret,
2143 struct anv_block_pool *pool,
2144 uint64_t address)
2145 {
2146 for (uint32_t i = 0; i < pool->nbos; i++) {
2147 uint64_t bo_address = pool->bos[i].offset & (~0ull >> 16);
2148 uint32_t bo_size = pool->bos[i].size;
2149 if (address >= bo_address && address < (bo_address + bo_size)) {
2150 *ret = (struct gen_batch_decode_bo) {
2151 .addr = bo_address,
2152 .size = bo_size,
2153 .map = pool->bos[i].map,
2154 };
2155 return true;
2156 }
2157 }
2158 return false;
2159 }
2160
2161 /* Finding a buffer for batch decoding */
2162 static struct gen_batch_decode_bo
2163 decode_get_bo(void *v_batch, bool ppgtt, uint64_t address)
2164 {
2165 struct anv_device *device = v_batch;
2166 struct gen_batch_decode_bo ret_bo = {};
2167
2168 assert(ppgtt);
2169
2170 if (get_bo_from_pool(&ret_bo, &device->dynamic_state_pool.block_pool, address))
2171 return ret_bo;
2172 if (get_bo_from_pool(&ret_bo, &device->instruction_state_pool.block_pool, address))
2173 return ret_bo;
2174 if (get_bo_from_pool(&ret_bo, &device->binding_table_pool.block_pool, address))
2175 return ret_bo;
2176 if (get_bo_from_pool(&ret_bo, &device->surface_state_pool.block_pool, address))
2177 return ret_bo;
2178
2179 if (!device->cmd_buffer_being_decoded)
2180 return (struct gen_batch_decode_bo) { };
2181
2182 struct anv_batch_bo **bo;
2183
2184 u_vector_foreach(bo, &device->cmd_buffer_being_decoded->seen_bbos) {
2185 /* The decoder zeroes out the top 16 bits, so we need to as well */
2186 uint64_t bo_address = (*bo)->bo.offset & (~0ull >> 16);
2187
2188 if (address >= bo_address && address < bo_address + (*bo)->bo.size) {
2189 return (struct gen_batch_decode_bo) {
2190 .addr = bo_address,
2191 .size = (*bo)->bo.size,
2192 .map = (*bo)->bo.map,
2193 };
2194 }
2195 }
2196
2197 return (struct gen_batch_decode_bo) { };
2198 }
2199
2200 VkResult anv_CreateDevice(
2201 VkPhysicalDevice physicalDevice,
2202 const VkDeviceCreateInfo* pCreateInfo,
2203 const VkAllocationCallbacks* pAllocator,
2204 VkDevice* pDevice)
2205 {
2206 ANV_FROM_HANDLE(anv_physical_device, physical_device, physicalDevice);
2207 VkResult result;
2208 struct anv_device *device;
2209
2210 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO);
2211
2212 struct anv_device_extension_table enabled_extensions = { };
2213 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
2214 int idx;
2215 for (idx = 0; idx < ANV_DEVICE_EXTENSION_COUNT; idx++) {
2216 if (strcmp(pCreateInfo->ppEnabledExtensionNames[i],
2217 anv_device_extensions[idx].extensionName) == 0)
2218 break;
2219 }
2220
2221 if (idx >= ANV_DEVICE_EXTENSION_COUNT)
2222 return vk_error(VK_ERROR_EXTENSION_NOT_PRESENT);
2223
2224 if (!physical_device->supported_extensions.extensions[idx])
2225 return vk_error(VK_ERROR_EXTENSION_NOT_PRESENT);
2226
2227 enabled_extensions.extensions[idx] = true;
2228 }
2229
2230 /* Check enabled features */
2231 if (pCreateInfo->pEnabledFeatures) {
2232 VkPhysicalDeviceFeatures supported_features;
2233 anv_GetPhysicalDeviceFeatures(physicalDevice, &supported_features);
2234 VkBool32 *supported_feature = (VkBool32 *)&supported_features;
2235 VkBool32 *enabled_feature = (VkBool32 *)pCreateInfo->pEnabledFeatures;
2236 unsigned num_features = sizeof(VkPhysicalDeviceFeatures) / sizeof(VkBool32);
2237 for (uint32_t i = 0; i < num_features; i++) {
2238 if (enabled_feature[i] && !supported_feature[i])
2239 return vk_error(VK_ERROR_FEATURE_NOT_PRESENT);
2240 }
2241 }
2242
2243 /* Check requested queues and fail if we are requested to create any
2244 * queues with flags we don't support.
2245 */
2246 assert(pCreateInfo->queueCreateInfoCount > 0);
2247 for (uint32_t i = 0; i < pCreateInfo->queueCreateInfoCount; i++) {
2248 if (pCreateInfo->pQueueCreateInfos[i].flags != 0)
2249 return vk_error(VK_ERROR_INITIALIZATION_FAILED);
2250 }
2251
2252 /* Check if client specified queue priority. */
2253 const VkDeviceQueueGlobalPriorityCreateInfoEXT *queue_priority =
2254 vk_find_struct_const(pCreateInfo->pQueueCreateInfos[0].pNext,
2255 DEVICE_QUEUE_GLOBAL_PRIORITY_CREATE_INFO_EXT);
2256
2257 VkQueueGlobalPriorityEXT priority =
2258 queue_priority ? queue_priority->globalPriority :
2259 VK_QUEUE_GLOBAL_PRIORITY_MEDIUM_EXT;
2260
2261 device = vk_alloc2(&physical_device->instance->alloc, pAllocator,
2262 sizeof(*device), 8,
2263 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
2264 if (!device)
2265 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
2266
2267 if (INTEL_DEBUG & DEBUG_BATCH) {
2268 const unsigned decode_flags =
2269 GEN_BATCH_DECODE_FULL |
2270 ((INTEL_DEBUG & DEBUG_COLOR) ? GEN_BATCH_DECODE_IN_COLOR : 0) |
2271 GEN_BATCH_DECODE_OFFSETS |
2272 GEN_BATCH_DECODE_FLOATS;
2273
2274 gen_batch_decode_ctx_init(&device->decoder_ctx,
2275 &physical_device->info,
2276 stderr, decode_flags, NULL,
2277 decode_get_bo, NULL, device);
2278 }
2279
2280 device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
2281 device->instance = physical_device->instance;
2282 device->chipset_id = physical_device->chipset_id;
2283 device->no_hw = physical_device->no_hw;
2284 device->_lost = false;
2285
2286 if (pAllocator)
2287 device->alloc = *pAllocator;
2288 else
2289 device->alloc = physical_device->instance->alloc;
2290
2291 /* XXX(chadv): Can we dup() physicalDevice->fd here? */
2292 device->fd = open(physical_device->path, O_RDWR | O_CLOEXEC);
2293 if (device->fd == -1) {
2294 result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
2295 goto fail_device;
2296 }
2297
2298 device->context_id = anv_gem_create_context(device);
2299 if (device->context_id == -1) {
2300 result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
2301 goto fail_fd;
2302 }
2303
2304 if (physical_device->use_softpin) {
2305 if (pthread_mutex_init(&device->vma_mutex, NULL) != 0) {
2306 result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
2307 goto fail_fd;
2308 }
2309
2310 /* keep the page with address zero out of the allocator */
2311 struct anv_memory_heap *low_heap =
2312 &physical_device->memory.heaps[physical_device->memory.heap_count - 1];
2313 util_vma_heap_init(&device->vma_lo, low_heap->vma_start, low_heap->vma_size);
2314 device->vma_lo_available = low_heap->size;
2315
2316 struct anv_memory_heap *high_heap =
2317 &physical_device->memory.heaps[0];
2318 util_vma_heap_init(&device->vma_hi, high_heap->vma_start, high_heap->vma_size);
2319 device->vma_hi_available = physical_device->memory.heap_count == 1 ? 0 :
2320 high_heap->size;
2321 }
2322
2323 list_inithead(&device->memory_objects);
2324
2325 /* As per spec, the driver implementation may deny requests to acquire
2326 * a priority above the default priority (MEDIUM) if the caller does not
2327 * have sufficient privileges. In this scenario VK_ERROR_NOT_PERMITTED_EXT
2328 * is returned.
2329 */
2330 if (physical_device->has_context_priority) {
2331 int err = anv_gem_set_context_param(device->fd, device->context_id,
2332 I915_CONTEXT_PARAM_PRIORITY,
2333 vk_priority_to_gen(priority));
2334 if (err != 0 && priority > VK_QUEUE_GLOBAL_PRIORITY_MEDIUM_EXT) {
2335 result = vk_error(VK_ERROR_NOT_PERMITTED_EXT);
2336 goto fail_fd;
2337 }
2338 }
2339
2340 device->info = physical_device->info;
2341 device->isl_dev = physical_device->isl_dev;
2342
2343 /* On Broadwell and later, we can use batch chaining to more efficiently
2344 * implement growing command buffers. Prior to Haswell, the kernel
2345 * command parser gets in the way and we have to fall back to growing
2346 * the batch.
2347 */
2348 device->can_chain_batches = device->info.gen >= 8;
2349
2350 device->robust_buffer_access = pCreateInfo->pEnabledFeatures &&
2351 pCreateInfo->pEnabledFeatures->robustBufferAccess;
2352 device->enabled_extensions = enabled_extensions;
2353
2354 anv_device_init_dispatch(device);
2355
2356 if (pthread_mutex_init(&device->mutex, NULL) != 0) {
2357 result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
2358 goto fail_context_id;
2359 }
2360
2361 pthread_condattr_t condattr;
2362 if (pthread_condattr_init(&condattr) != 0) {
2363 result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
2364 goto fail_mutex;
2365 }
2366 if (pthread_condattr_setclock(&condattr, CLOCK_MONOTONIC) != 0) {
2367 pthread_condattr_destroy(&condattr);
2368 result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
2369 goto fail_mutex;
2370 }
2371 if (pthread_cond_init(&device->queue_submit, &condattr) != 0) {
2372 pthread_condattr_destroy(&condattr);
2373 result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
2374 goto fail_mutex;
2375 }
2376 pthread_condattr_destroy(&condattr);
2377
2378 uint64_t bo_flags =
2379 (physical_device->supports_48bit_addresses ? EXEC_OBJECT_SUPPORTS_48B_ADDRESS : 0) |
2380 (physical_device->has_exec_async ? EXEC_OBJECT_ASYNC : 0) |
2381 (physical_device->has_exec_capture ? EXEC_OBJECT_CAPTURE : 0) |
2382 (physical_device->use_softpin ? EXEC_OBJECT_PINNED : 0);
2383
2384 anv_bo_pool_init(&device->batch_bo_pool, device, bo_flags);
2385
2386 result = anv_bo_cache_init(&device->bo_cache);
2387 if (result != VK_SUCCESS)
2388 goto fail_batch_bo_pool;
2389
2390 if (!physical_device->use_softpin)
2391 bo_flags &= ~EXEC_OBJECT_SUPPORTS_48B_ADDRESS;
2392
2393 result = anv_state_pool_init(&device->dynamic_state_pool, device,
2394 DYNAMIC_STATE_POOL_MIN_ADDRESS,
2395 16384,
2396 bo_flags);
2397 if (result != VK_SUCCESS)
2398 goto fail_bo_cache;
2399
2400 result = anv_state_pool_init(&device->instruction_state_pool, device,
2401 INSTRUCTION_STATE_POOL_MIN_ADDRESS,
2402 16384,
2403 bo_flags);
2404 if (result != VK_SUCCESS)
2405 goto fail_dynamic_state_pool;
2406
2407 result = anv_state_pool_init(&device->surface_state_pool, device,
2408 SURFACE_STATE_POOL_MIN_ADDRESS,
2409 4096,
2410 bo_flags);
2411 if (result != VK_SUCCESS)
2412 goto fail_instruction_state_pool;
2413
2414 if (physical_device->use_softpin) {
2415 result = anv_state_pool_init(&device->binding_table_pool, device,
2416 BINDING_TABLE_POOL_MIN_ADDRESS,
2417 4096,
2418 bo_flags);
2419 if (result != VK_SUCCESS)
2420 goto fail_surface_state_pool;
2421 }
2422
2423 result = anv_bo_init_new(&device->workaround_bo, device, 4096);
2424 if (result != VK_SUCCESS)
2425 goto fail_binding_table_pool;
2426
2427 if (physical_device->use_softpin)
2428 device->workaround_bo.flags |= EXEC_OBJECT_PINNED;
2429
2430 if (!anv_vma_alloc(device, &device->workaround_bo))
2431 goto fail_workaround_bo;
2432
2433 anv_device_init_trivial_batch(device);
2434
2435 if (device->info.gen >= 10)
2436 anv_device_init_hiz_clear_value_bo(device);
2437
2438 anv_scratch_pool_init(device, &device->scratch_pool);
2439
2440 anv_queue_init(device, &device->queue);
2441
2442 switch (device->info.gen) {
2443 case 7:
2444 if (!device->info.is_haswell)
2445 result = gen7_init_device_state(device);
2446 else
2447 result = gen75_init_device_state(device);
2448 break;
2449 case 8:
2450 result = gen8_init_device_state(device);
2451 break;
2452 case 9:
2453 result = gen9_init_device_state(device);
2454 break;
2455 case 10:
2456 result = gen10_init_device_state(device);
2457 break;
2458 case 11:
2459 result = gen11_init_device_state(device);
2460 break;
2461 default:
2462 /* Shouldn't get here as we don't create physical devices for any other
2463 * gens. */
2464 unreachable("unhandled gen");
2465 }
2466 if (result != VK_SUCCESS)
2467 goto fail_workaround_bo;
2468
2469 anv_pipeline_cache_init(&device->default_pipeline_cache, device, true);
2470
2471 anv_device_init_blorp(device);
2472
2473 anv_device_init_border_colors(device);
2474
2475 *pDevice = anv_device_to_handle(device);
2476
2477 return VK_SUCCESS;
2478
2479 fail_workaround_bo:
2480 anv_queue_finish(&device->queue);
2481 anv_scratch_pool_finish(device, &device->scratch_pool);
2482 anv_gem_munmap(device->workaround_bo.map, device->workaround_bo.size);
2483 anv_gem_close(device, device->workaround_bo.gem_handle);
2484 fail_binding_table_pool:
2485 if (physical_device->use_softpin)
2486 anv_state_pool_finish(&device->binding_table_pool);
2487 fail_surface_state_pool:
2488 anv_state_pool_finish(&device->surface_state_pool);
2489 fail_instruction_state_pool:
2490 anv_state_pool_finish(&device->instruction_state_pool);
2491 fail_dynamic_state_pool:
2492 anv_state_pool_finish(&device->dynamic_state_pool);
2493 fail_bo_cache:
2494 anv_bo_cache_finish(&device->bo_cache);
2495 fail_batch_bo_pool:
2496 anv_bo_pool_finish(&device->batch_bo_pool);
2497 pthread_cond_destroy(&device->queue_submit);
2498 fail_mutex:
2499 pthread_mutex_destroy(&device->mutex);
2500 fail_context_id:
2501 anv_gem_destroy_context(device, device->context_id);
2502 fail_fd:
2503 close(device->fd);
2504 fail_device:
2505 vk_free(&device->alloc, device);
2506
2507 return result;
2508 }
2509
2510 void anv_DestroyDevice(
2511 VkDevice _device,
2512 const VkAllocationCallbacks* pAllocator)
2513 {
2514 ANV_FROM_HANDLE(anv_device, device, _device);
2515 struct anv_physical_device *physical_device;
2516
2517 if (!device)
2518 return;
2519
2520 physical_device = &device->instance->physicalDevice;
2521
2522 anv_device_finish_blorp(device);
2523
2524 anv_pipeline_cache_finish(&device->default_pipeline_cache);
2525
2526 anv_queue_finish(&device->queue);
2527
2528 #ifdef HAVE_VALGRIND
2529 /* We only need to free these to prevent valgrind errors. The backing
2530 * BO will go away in a couple of lines so we don't actually leak.
2531 */
2532 anv_state_pool_free(&device->dynamic_state_pool, device->border_colors);
2533 #endif
2534
2535 anv_scratch_pool_finish(device, &device->scratch_pool);
2536
2537 anv_gem_munmap(device->workaround_bo.map, device->workaround_bo.size);
2538 anv_vma_free(device, &device->workaround_bo);
2539 anv_gem_close(device, device->workaround_bo.gem_handle);
2540
2541 anv_vma_free(device, &device->trivial_batch_bo);
2542 anv_gem_close(device, device->trivial_batch_bo.gem_handle);
2543 if (device->info.gen >= 10)
2544 anv_gem_close(device, device->hiz_clear_bo.gem_handle);
2545
2546 if (physical_device->use_softpin)
2547 anv_state_pool_finish(&device->binding_table_pool);
2548 anv_state_pool_finish(&device->surface_state_pool);
2549 anv_state_pool_finish(&device->instruction_state_pool);
2550 anv_state_pool_finish(&device->dynamic_state_pool);
2551
2552 anv_bo_cache_finish(&device->bo_cache);
2553
2554 anv_bo_pool_finish(&device->batch_bo_pool);
2555
2556 pthread_cond_destroy(&device->queue_submit);
2557 pthread_mutex_destroy(&device->mutex);
2558
2559 anv_gem_destroy_context(device, device->context_id);
2560
2561 if (INTEL_DEBUG & DEBUG_BATCH)
2562 gen_batch_decode_ctx_finish(&device->decoder_ctx);
2563
2564 close(device->fd);
2565
2566 vk_free(&device->alloc, device);
2567 }
2568
2569 VkResult anv_EnumerateInstanceLayerProperties(
2570 uint32_t* pPropertyCount,
2571 VkLayerProperties* pProperties)
2572 {
2573 if (pProperties == NULL) {
2574 *pPropertyCount = 0;
2575 return VK_SUCCESS;
2576 }
2577
2578 /* None supported at this time */
2579 return vk_error(VK_ERROR_LAYER_NOT_PRESENT);
2580 }
2581
2582 VkResult anv_EnumerateDeviceLayerProperties(
2583 VkPhysicalDevice physicalDevice,
2584 uint32_t* pPropertyCount,
2585 VkLayerProperties* pProperties)
2586 {
2587 if (pProperties == NULL) {
2588 *pPropertyCount = 0;
2589 return VK_SUCCESS;
2590 }
2591
2592 /* None supported at this time */
2593 return vk_error(VK_ERROR_LAYER_NOT_PRESENT);
2594 }
2595
2596 void anv_GetDeviceQueue(
2597 VkDevice _device,
2598 uint32_t queueNodeIndex,
2599 uint32_t queueIndex,
2600 VkQueue* pQueue)
2601 {
2602 ANV_FROM_HANDLE(anv_device, device, _device);
2603
2604 assert(queueIndex == 0);
2605
2606 *pQueue = anv_queue_to_handle(&device->queue);
2607 }
2608
2609 void anv_GetDeviceQueue2(
2610 VkDevice _device,
2611 const VkDeviceQueueInfo2* pQueueInfo,
2612 VkQueue* pQueue)
2613 {
2614 ANV_FROM_HANDLE(anv_device, device, _device);
2615
2616 assert(pQueueInfo->queueIndex == 0);
2617
2618 if (pQueueInfo->flags == device->queue.flags)
2619 *pQueue = anv_queue_to_handle(&device->queue);
2620 else
2621 *pQueue = NULL;
2622 }
2623
2624 VkResult
2625 _anv_device_set_lost(struct anv_device *device,
2626 const char *file, int line,
2627 const char *msg, ...)
2628 {
2629 VkResult err;
2630 va_list ap;
2631
2632 device->_lost = true;
2633
2634 va_start(ap, msg);
2635 err = __vk_errorv(device->instance, device,
2636 VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
2637 VK_ERROR_DEVICE_LOST, file, line, msg, ap);
2638 va_end(ap);
2639
2640 if (env_var_as_boolean("ANV_ABORT_ON_DEVICE_LOSS", false))
2641 abort();
2642
2643 return err;
2644 }
2645
2646 VkResult
2647 anv_device_query_status(struct anv_device *device)
2648 {
2649 /* This isn't likely as most of the callers of this function already check
2650 * for it. However, it doesn't hurt to check and it potentially lets us
2651 * avoid an ioctl.
2652 */
2653 if (anv_device_is_lost(device))
2654 return VK_ERROR_DEVICE_LOST;
2655
2656 uint32_t active, pending;
2657 int ret = anv_gem_gpu_get_reset_stats(device, &active, &pending);
2658 if (ret == -1) {
2659 /* We don't know the real error. */
2660 return anv_device_set_lost(device, "get_reset_stats failed: %m");
2661 }
2662
2663 if (active) {
2664 return anv_device_set_lost(device, "GPU hung on one of our command buffers");
2665 } else if (pending) {
2666 return anv_device_set_lost(device, "GPU hung with commands in-flight");
2667 }
2668
2669 return VK_SUCCESS;
2670 }
2671
2672 VkResult
2673 anv_device_bo_busy(struct anv_device *device, struct anv_bo *bo)
2674 {
2675 /* Note: This only returns whether or not the BO is in use by an i915 GPU.
2676 * Other usages of the BO (such as on different hardware) will not be
2677 * flagged as "busy" by this ioctl. Use with care.
2678 */
2679 int ret = anv_gem_busy(device, bo->gem_handle);
2680 if (ret == 1) {
2681 return VK_NOT_READY;
2682 } else if (ret == -1) {
2683 /* We don't know the real error. */
2684 return anv_device_set_lost(device, "gem wait failed: %m");
2685 }
2686
2687 /* Query for device status after the busy call. If the BO we're checking
2688 * got caught in a GPU hang we don't want to return VK_SUCCESS to the
2689 * client because it clearly doesn't have valid data. Yes, this most
2690 * likely means an ioctl, but we just did an ioctl to query the busy status
2691 * so it's no great loss.
2692 */
2693 return anv_device_query_status(device);
2694 }
2695
2696 VkResult
2697 anv_device_wait(struct anv_device *device, struct anv_bo *bo,
2698 int64_t timeout)
2699 {
2700 int ret = anv_gem_wait(device, bo->gem_handle, &timeout);
2701 if (ret == -1 && errno == ETIME) {
2702 return VK_TIMEOUT;
2703 } else if (ret == -1) {
2704 /* We don't know the real error. */
2705 return anv_device_set_lost(device, "gem wait failed: %m");
2706 }
2707
2708 /* Query for device status after the wait. If the BO we're waiting on got
2709 * caught in a GPU hang we don't want to return VK_SUCCESS to the client
2710 * because it clearly doesn't have valid data. Yes, this most likely means
2711 * an ioctl, but we just did an ioctl to wait so it's no great loss.
2712 */
2713 return anv_device_query_status(device);
2714 }
2715
2716 VkResult anv_DeviceWaitIdle(
2717 VkDevice _device)
2718 {
2719 ANV_FROM_HANDLE(anv_device, device, _device);
2720 if (anv_device_is_lost(device))
2721 return VK_ERROR_DEVICE_LOST;
2722
2723 struct anv_batch batch;
2724
2725 uint32_t cmds[8];
2726 batch.start = batch.next = cmds;
2727 batch.end = (void *) cmds + sizeof(cmds);
2728
2729 anv_batch_emit(&batch, GEN7_MI_BATCH_BUFFER_END, bbe);
2730 anv_batch_emit(&batch, GEN7_MI_NOOP, noop);
2731
2732 return anv_device_submit_simple_batch(device, &batch);
2733 }
2734
2735 bool
2736 anv_vma_alloc(struct anv_device *device, struct anv_bo *bo)
2737 {
2738 if (!(bo->flags & EXEC_OBJECT_PINNED))
2739 return true;
2740
2741 pthread_mutex_lock(&device->vma_mutex);
2742
2743 bo->offset = 0;
2744
2745 if (bo->flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS &&
2746 device->vma_hi_available >= bo->size) {
2747 uint64_t addr = util_vma_heap_alloc(&device->vma_hi, bo->size, 4096);
2748 if (addr) {
2749 bo->offset = gen_canonical_address(addr);
2750 assert(addr == gen_48b_address(bo->offset));
2751 device->vma_hi_available -= bo->size;
2752 }
2753 }
2754
2755 if (bo->offset == 0 && device->vma_lo_available >= bo->size) {
2756 uint64_t addr = util_vma_heap_alloc(&device->vma_lo, bo->size, 4096);
2757 if (addr) {
2758 bo->offset = gen_canonical_address(addr);
2759 assert(addr == gen_48b_address(bo->offset));
2760 device->vma_lo_available -= bo->size;
2761 }
2762 }
2763
2764 pthread_mutex_unlock(&device->vma_mutex);
2765
2766 return bo->offset != 0;
2767 }
2768
2769 void
2770 anv_vma_free(struct anv_device *device, struct anv_bo *bo)
2771 {
2772 if (!(bo->flags & EXEC_OBJECT_PINNED))
2773 return;
2774
2775 const uint64_t addr_48b = gen_48b_address(bo->offset);
2776
2777 pthread_mutex_lock(&device->vma_mutex);
2778
2779 if (addr_48b >= LOW_HEAP_MIN_ADDRESS &&
2780 addr_48b <= LOW_HEAP_MAX_ADDRESS) {
2781 util_vma_heap_free(&device->vma_lo, addr_48b, bo->size);
2782 device->vma_lo_available += bo->size;
2783 } else {
2784 MAYBE_UNUSED const struct anv_physical_device *physical_device =
2785 &device->instance->physicalDevice;
2786 assert(addr_48b >= physical_device->memory.heaps[0].vma_start &&
2787 addr_48b < (physical_device->memory.heaps[0].vma_start +
2788 physical_device->memory.heaps[0].vma_size));
2789 util_vma_heap_free(&device->vma_hi, addr_48b, bo->size);
2790 device->vma_hi_available += bo->size;
2791 }
2792
2793 pthread_mutex_unlock(&device->vma_mutex);
2794
2795 bo->offset = 0;
2796 }
2797
2798 VkResult
2799 anv_bo_init_new(struct anv_bo *bo, struct anv_device *device, uint64_t size)
2800 {
2801 uint32_t gem_handle = anv_gem_create(device, size);
2802 if (!gem_handle)
2803 return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
2804
2805 anv_bo_init(bo, gem_handle, size);
2806
2807 return VK_SUCCESS;
2808 }
2809
2810 VkResult anv_AllocateMemory(
2811 VkDevice _device,
2812 const VkMemoryAllocateInfo* pAllocateInfo,
2813 const VkAllocationCallbacks* pAllocator,
2814 VkDeviceMemory* pMem)
2815 {
2816 ANV_FROM_HANDLE(anv_device, device, _device);
2817 struct anv_physical_device *pdevice = &device->instance->physicalDevice;
2818 struct anv_device_memory *mem;
2819 VkResult result = VK_SUCCESS;
2820
2821 assert(pAllocateInfo->sType == VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO);
2822
2823 /* The Vulkan 1.0.33 spec says "allocationSize must be greater than 0". */
2824 assert(pAllocateInfo->allocationSize > 0);
2825
2826 if (pAllocateInfo->allocationSize > MAX_MEMORY_ALLOCATION_SIZE)
2827 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
2828
2829 /* FINISHME: Fail if allocation request exceeds heap size. */
2830
2831 mem = vk_alloc2(&device->alloc, pAllocator, sizeof(*mem), 8,
2832 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2833 if (mem == NULL)
2834 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
2835
2836 assert(pAllocateInfo->memoryTypeIndex < pdevice->memory.type_count);
2837 mem->type = &pdevice->memory.types[pAllocateInfo->memoryTypeIndex];
2838 mem->map = NULL;
2839 mem->map_size = 0;
2840 mem->ahw = NULL;
2841 mem->host_ptr = NULL;
2842
2843 uint64_t bo_flags = 0;
2844
2845 assert(mem->type->heapIndex < pdevice->memory.heap_count);
2846 if (pdevice->memory.heaps[mem->type->heapIndex].supports_48bit_addresses)
2847 bo_flags |= EXEC_OBJECT_SUPPORTS_48B_ADDRESS;
2848
2849 const struct wsi_memory_allocate_info *wsi_info =
2850 vk_find_struct_const(pAllocateInfo->pNext, WSI_MEMORY_ALLOCATE_INFO_MESA);
2851 if (wsi_info && wsi_info->implicit_sync) {
2852 /* We need to set the WRITE flag on window system buffers so that GEM
2853 * will know we're writing to them and synchronize uses on other rings
2854 * (eg if the display server uses the blitter ring).
2855 */
2856 bo_flags |= EXEC_OBJECT_WRITE;
2857 } else if (pdevice->has_exec_async) {
2858 bo_flags |= EXEC_OBJECT_ASYNC;
2859 }
2860
2861 if (pdevice->use_softpin)
2862 bo_flags |= EXEC_OBJECT_PINNED;
2863
2864 const VkExportMemoryAllocateInfo *export_info =
2865 vk_find_struct_const(pAllocateInfo->pNext, EXPORT_MEMORY_ALLOCATE_INFO);
2866
2867 /* Check if we need to support Android HW buffer export. If so,
2868 * create AHardwareBuffer and import memory from it.
2869 */
2870 bool android_export = false;
2871 if (export_info && export_info->handleTypes &
2872 VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID)
2873 android_export = true;
2874
2875 /* Android memory import. */
2876 const struct VkImportAndroidHardwareBufferInfoANDROID *ahw_import_info =
2877 vk_find_struct_const(pAllocateInfo->pNext,
2878 IMPORT_ANDROID_HARDWARE_BUFFER_INFO_ANDROID);
2879
2880 if (ahw_import_info) {
2881 result = anv_import_ahw_memory(_device, mem, ahw_import_info);
2882 if (result != VK_SUCCESS)
2883 goto fail;
2884
2885 goto success;
2886 } else if (android_export) {
2887 result = anv_create_ahw_memory(_device, mem, pAllocateInfo);
2888 if (result != VK_SUCCESS)
2889 goto fail;
2890
2891 const struct VkImportAndroidHardwareBufferInfoANDROID import_info = {
2892 .buffer = mem->ahw,
2893 };
2894 result = anv_import_ahw_memory(_device, mem, &import_info);
2895 if (result != VK_SUCCESS)
2896 goto fail;
2897
2898 goto success;
2899 }
2900
2901 const VkImportMemoryFdInfoKHR *fd_info =
2902 vk_find_struct_const(pAllocateInfo->pNext, IMPORT_MEMORY_FD_INFO_KHR);
2903
2904 /* The Vulkan spec permits handleType to be 0, in which case the struct is
2905 * ignored.
2906 */
2907 if (fd_info && fd_info->handleType) {
2908 /* At the moment, we support only the below handle types. */
2909 assert(fd_info->handleType ==
2910 VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT ||
2911 fd_info->handleType ==
2912 VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
2913
2914 result = anv_bo_cache_import(device, &device->bo_cache, fd_info->fd,
2915 bo_flags | ANV_BO_EXTERNAL, &mem->bo);
2916 if (result != VK_SUCCESS)
2917 goto fail;
2918
2919 VkDeviceSize aligned_alloc_size =
2920 align_u64(pAllocateInfo->allocationSize, 4096);
2921
2922 /* For security purposes, we reject importing the bo if it's smaller
2923 * than the requested allocation size. This prevents a malicious client
2924 * from passing a buffer to a trusted client, lying about the size, and
2925 * telling the trusted client to try and texture from an image that goes
2926 * out-of-bounds. This sort of thing could lead to GPU hangs or worse
2927 * in the trusted client. The trusted client can protect itself against
2928 * this sort of attack but only if it can trust the buffer size.
2929 */
2930 if (mem->bo->size < aligned_alloc_size) {
2931 result = vk_errorf(device->instance, device,
2932 VK_ERROR_INVALID_EXTERNAL_HANDLE,
2933 "aligned allocationSize too large for "
2934 "VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT: "
2935 "%"PRIu64"B > %"PRIu64"B",
2936 aligned_alloc_size, mem->bo->size);
2937 anv_bo_cache_release(device, &device->bo_cache, mem->bo);
2938 goto fail;
2939 }
2940
2941 /* From the Vulkan spec:
2942 *
2943 * "Importing memory from a file descriptor transfers ownership of
2944 * the file descriptor from the application to the Vulkan
2945 * implementation. The application must not perform any operations on
2946 * the file descriptor after a successful import."
2947 *
2948 * If the import fails, we leave the file descriptor open.
2949 */
2950 close(fd_info->fd);
2951 goto success;
2952 }
2953
2954 const VkImportMemoryHostPointerInfoEXT *host_ptr_info =
2955 vk_find_struct_const(pAllocateInfo->pNext,
2956 IMPORT_MEMORY_HOST_POINTER_INFO_EXT);
2957 if (host_ptr_info && host_ptr_info->handleType) {
2958 if (host_ptr_info->handleType ==
2959 VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_MAPPED_FOREIGN_MEMORY_BIT_EXT) {
2960 result = vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
2961 goto fail;
2962 }
2963
2964 assert(host_ptr_info->handleType ==
2965 VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT);
2966
2967 result = anv_bo_cache_import_host_ptr(
2968 device, &device->bo_cache, host_ptr_info->pHostPointer,
2969 pAllocateInfo->allocationSize, bo_flags, &mem->bo);
2970
2971 if (result != VK_SUCCESS)
2972 goto fail;
2973
2974 mem->host_ptr = host_ptr_info->pHostPointer;
2975 goto success;
2976 }
2977
2978 /* Regular allocate (not importing memory). */
2979
2980 if (export_info && export_info->handleTypes)
2981 bo_flags |= ANV_BO_EXTERNAL;
2982
2983 result = anv_bo_cache_alloc(device, &device->bo_cache,
2984 pAllocateInfo->allocationSize, bo_flags,
2985 &mem->bo);
2986 if (result != VK_SUCCESS)
2987 goto fail;
2988
2989 const VkMemoryDedicatedAllocateInfo *dedicated_info =
2990 vk_find_struct_const(pAllocateInfo->pNext, MEMORY_DEDICATED_ALLOCATE_INFO);
2991 if (dedicated_info && dedicated_info->image != VK_NULL_HANDLE) {
2992 ANV_FROM_HANDLE(anv_image, image, dedicated_info->image);
2993
2994 /* Some legacy (non-modifiers) consumers need the tiling to be set on
2995 * the BO. In this case, we have a dedicated allocation.
2996 */
2997 if (image->needs_set_tiling) {
2998 const uint32_t i915_tiling =
2999 isl_tiling_to_i915_tiling(image->planes[0].surface.isl.tiling);
3000 int ret = anv_gem_set_tiling(device, mem->bo->gem_handle,
3001 image->planes[0].surface.isl.row_pitch_B,
3002 i915_tiling);
3003 if (ret) {
3004 anv_bo_cache_release(device, &device->bo_cache, mem->bo);
3005 return vk_errorf(device->instance, NULL,
3006 VK_ERROR_OUT_OF_DEVICE_MEMORY,
3007 "failed to set BO tiling: %m");
3008 }
3009 }
3010 }
3011
3012 success:
3013 pthread_mutex_lock(&device->mutex);
3014 list_addtail(&mem->link, &device->memory_objects);
3015 pthread_mutex_unlock(&device->mutex);
3016
3017 *pMem = anv_device_memory_to_handle(mem);
3018
3019 p_atomic_add(&pdevice->memory.heaps[mem->type->heapIndex].used,
3020 mem->bo->size);
3021
3022 return VK_SUCCESS;
3023
3024 fail:
3025 vk_free2(&device->alloc, pAllocator, mem);
3026
3027 return result;
3028 }
3029
3030 VkResult anv_GetMemoryFdKHR(
3031 VkDevice device_h,
3032 const VkMemoryGetFdInfoKHR* pGetFdInfo,
3033 int* pFd)
3034 {
3035 ANV_FROM_HANDLE(anv_device, dev, device_h);
3036 ANV_FROM_HANDLE(anv_device_memory, mem, pGetFdInfo->memory);
3037
3038 assert(pGetFdInfo->sType == VK_STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR);
3039
3040 assert(pGetFdInfo->handleType == VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT ||
3041 pGetFdInfo->handleType == VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
3042
3043 return anv_bo_cache_export(dev, &dev->bo_cache, mem->bo, pFd);
3044 }
3045
3046 VkResult anv_GetMemoryFdPropertiesKHR(
3047 VkDevice _device,
3048 VkExternalMemoryHandleTypeFlagBits handleType,
3049 int fd,
3050 VkMemoryFdPropertiesKHR* pMemoryFdProperties)
3051 {
3052 ANV_FROM_HANDLE(anv_device, device, _device);
3053 struct anv_physical_device *pdevice = &device->instance->physicalDevice;
3054
3055 switch (handleType) {
3056 case VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT:
3057 /* dma-buf can be imported as any memory type */
3058 pMemoryFdProperties->memoryTypeBits =
3059 (1 << pdevice->memory.type_count) - 1;
3060 return VK_SUCCESS;
3061
3062 default:
3063 /* The valid usage section for this function says:
3064 *
3065 * "handleType must not be one of the handle types defined as
3066 * opaque."
3067 *
3068 * So opaque handle types fall into the default "unsupported" case.
3069 */
3070 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
3071 }
3072 }
3073
3074 VkResult anv_GetMemoryHostPointerPropertiesEXT(
3075 VkDevice _device,
3076 VkExternalMemoryHandleTypeFlagBits handleType,
3077 const void* pHostPointer,
3078 VkMemoryHostPointerPropertiesEXT* pMemoryHostPointerProperties)
3079 {
3080 ANV_FROM_HANDLE(anv_device, device, _device);
3081
3082 assert(pMemoryHostPointerProperties->sType ==
3083 VK_STRUCTURE_TYPE_MEMORY_HOST_POINTER_PROPERTIES_EXT);
3084
3085 switch (handleType) {
3086 case VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT: {
3087 struct anv_physical_device *pdevice = &device->instance->physicalDevice;
3088
3089 /* Host memory can be imported as any memory type. */
3090 pMemoryHostPointerProperties->memoryTypeBits =
3091 (1ull << pdevice->memory.type_count) - 1;
3092
3093 return VK_SUCCESS;
3094 }
3095 default:
3096 return VK_ERROR_INVALID_EXTERNAL_HANDLE;
3097 }
3098 }
3099
3100 void anv_FreeMemory(
3101 VkDevice _device,
3102 VkDeviceMemory _mem,
3103 const VkAllocationCallbacks* pAllocator)
3104 {
3105 ANV_FROM_HANDLE(anv_device, device, _device);
3106 ANV_FROM_HANDLE(anv_device_memory, mem, _mem);
3107 struct anv_physical_device *pdevice = &device->instance->physicalDevice;
3108
3109 if (mem == NULL)
3110 return;
3111
3112 pthread_mutex_lock(&device->mutex);
3113 list_del(&mem->link);
3114 pthread_mutex_unlock(&device->mutex);
3115
3116 if (mem->map)
3117 anv_UnmapMemory(_device, _mem);
3118
3119 p_atomic_add(&pdevice->memory.heaps[mem->type->heapIndex].used,
3120 -mem->bo->size);
3121
3122 anv_bo_cache_release(device, &device->bo_cache, mem->bo);
3123
3124 #if defined(ANDROID) && ANDROID_API_LEVEL >= 26
3125 if (mem->ahw)
3126 AHardwareBuffer_release(mem->ahw);
3127 #endif
3128
3129 vk_free2(&device->alloc, pAllocator, mem);
3130 }
3131
3132 VkResult anv_MapMemory(
3133 VkDevice _device,
3134 VkDeviceMemory _memory,
3135 VkDeviceSize offset,
3136 VkDeviceSize size,
3137 VkMemoryMapFlags flags,
3138 void** ppData)
3139 {
3140 ANV_FROM_HANDLE(anv_device, device, _device);
3141 ANV_FROM_HANDLE(anv_device_memory, mem, _memory);
3142
3143 if (mem == NULL) {
3144 *ppData = NULL;
3145 return VK_SUCCESS;
3146 }
3147
3148 if (mem->host_ptr) {
3149 *ppData = mem->host_ptr + offset;
3150 return VK_SUCCESS;
3151 }
3152
3153 if (size == VK_WHOLE_SIZE)
3154 size = mem->bo->size - offset;
3155
3156 /* From the Vulkan spec version 1.0.32 docs for MapMemory:
3157 *
3158 * * If size is not equal to VK_WHOLE_SIZE, size must be greater than 0
3159 * assert(size != 0);
3160 * * If size is not equal to VK_WHOLE_SIZE, size must be less than or
3161 * equal to the size of the memory minus offset
3162 */
3163 assert(size > 0);
3164 assert(offset + size <= mem->bo->size);
3165
3166 /* FIXME: Is this supposed to be thread safe? Since vkUnmapMemory() only
3167 * takes a VkDeviceMemory pointer, it seems like only one map of the memory
3168 * at a time is valid. We could just mmap up front and return an offset
3169 * pointer here, but that may exhaust virtual memory on 32 bit
3170 * userspace. */
3171
3172 uint32_t gem_flags = 0;
3173
3174 if (!device->info.has_llc &&
3175 (mem->type->propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT))
3176 gem_flags |= I915_MMAP_WC;
3177
3178 /* GEM will fail to map if the offset isn't 4k-aligned. Round down. */
3179 uint64_t map_offset = offset & ~4095ull;
3180 assert(offset >= map_offset);
3181 uint64_t map_size = (offset + size) - map_offset;
3182
3183 /* Let's map whole pages */
3184 map_size = align_u64(map_size, 4096);
3185
3186 void *map = anv_gem_mmap(device, mem->bo->gem_handle,
3187 map_offset, map_size, gem_flags);
3188 if (map == MAP_FAILED)
3189 return vk_error(VK_ERROR_MEMORY_MAP_FAILED);
3190
3191 mem->map = map;
3192 mem->map_size = map_size;
3193
3194 *ppData = mem->map + (offset - map_offset);
3195
3196 return VK_SUCCESS;
3197 }
3198
3199 void anv_UnmapMemory(
3200 VkDevice _device,
3201 VkDeviceMemory _memory)
3202 {
3203 ANV_FROM_HANDLE(anv_device_memory, mem, _memory);
3204
3205 if (mem == NULL || mem->host_ptr)
3206 return;
3207
3208 anv_gem_munmap(mem->map, mem->map_size);
3209
3210 mem->map = NULL;
3211 mem->map_size = 0;
3212 }
3213
3214 static void
3215 clflush_mapped_ranges(struct anv_device *device,
3216 uint32_t count,
3217 const VkMappedMemoryRange *ranges)
3218 {
3219 for (uint32_t i = 0; i < count; i++) {
3220 ANV_FROM_HANDLE(anv_device_memory, mem, ranges[i].memory);
3221 if (ranges[i].offset >= mem->map_size)
3222 continue;
3223
3224 gen_clflush_range(mem->map + ranges[i].offset,
3225 MIN2(ranges[i].size, mem->map_size - ranges[i].offset));
3226 }
3227 }
3228
3229 VkResult anv_FlushMappedMemoryRanges(
3230 VkDevice _device,
3231 uint32_t memoryRangeCount,
3232 const VkMappedMemoryRange* pMemoryRanges)
3233 {
3234 ANV_FROM_HANDLE(anv_device, device, _device);
3235
3236 if (device->info.has_llc)
3237 return VK_SUCCESS;
3238
3239 /* Make sure the writes we're flushing have landed. */
3240 __builtin_ia32_mfence();
3241
3242 clflush_mapped_ranges(device, memoryRangeCount, pMemoryRanges);
3243
3244 return VK_SUCCESS;
3245 }
3246
3247 VkResult anv_InvalidateMappedMemoryRanges(
3248 VkDevice _device,
3249 uint32_t memoryRangeCount,
3250 const VkMappedMemoryRange* pMemoryRanges)
3251 {
3252 ANV_FROM_HANDLE(anv_device, device, _device);
3253
3254 if (device->info.has_llc)
3255 return VK_SUCCESS;
3256
3257 clflush_mapped_ranges(device, memoryRangeCount, pMemoryRanges);
3258
3259 /* Make sure no reads get moved up above the invalidate. */
3260 __builtin_ia32_mfence();
3261
3262 return VK_SUCCESS;
3263 }
3264
3265 void anv_GetBufferMemoryRequirements(
3266 VkDevice _device,
3267 VkBuffer _buffer,
3268 VkMemoryRequirements* pMemoryRequirements)
3269 {
3270 ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
3271 ANV_FROM_HANDLE(anv_device, device, _device);
3272 struct anv_physical_device *pdevice = &device->instance->physicalDevice;
3273
3274 /* The Vulkan spec (git aaed022) says:
3275 *
3276 * memoryTypeBits is a bitfield and contains one bit set for every
3277 * supported memory type for the resource. The bit `1<<i` is set if and
3278 * only if the memory type `i` in the VkPhysicalDeviceMemoryProperties
3279 * structure for the physical device is supported.
3280 */
3281 uint32_t memory_types = 0;
3282 for (uint32_t i = 0; i < pdevice->memory.type_count; i++) {
3283 uint32_t valid_usage = pdevice->memory.types[i].valid_buffer_usage;
3284 if ((valid_usage & buffer->usage) == buffer->usage)
3285 memory_types |= (1u << i);
3286 }
3287
3288 /* Base alignment requirement of a cache line */
3289 uint32_t alignment = 16;
3290
3291 /* We need an alignment of 32 for pushing UBOs */
3292 if (buffer->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT)
3293 alignment = MAX2(alignment, 32);
3294
3295 pMemoryRequirements->size = buffer->size;
3296 pMemoryRequirements->alignment = alignment;
3297
3298 /* Storage and Uniform buffers should have their size aligned to
3299 * 32-bits to avoid boundary checks when last DWord is not complete.
3300 * This would ensure that not internal padding would be needed for
3301 * 16-bit types.
3302 */
3303 if (device->robust_buffer_access &&
3304 (buffer->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT ||
3305 buffer->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT))
3306 pMemoryRequirements->size = align_u64(buffer->size, 4);
3307
3308 pMemoryRequirements->memoryTypeBits = memory_types;
3309 }
3310
3311 void anv_GetBufferMemoryRequirements2(
3312 VkDevice _device,
3313 const VkBufferMemoryRequirementsInfo2* pInfo,
3314 VkMemoryRequirements2* pMemoryRequirements)
3315 {
3316 anv_GetBufferMemoryRequirements(_device, pInfo->buffer,
3317 &pMemoryRequirements->memoryRequirements);
3318
3319 vk_foreach_struct(ext, pMemoryRequirements->pNext) {
3320 switch (ext->sType) {
3321 case VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS: {
3322 VkMemoryDedicatedRequirements *requirements = (void *)ext;
3323 requirements->prefersDedicatedAllocation = false;
3324 requirements->requiresDedicatedAllocation = false;
3325 break;
3326 }
3327
3328 default:
3329 anv_debug_ignored_stype(ext->sType);
3330 break;
3331 }
3332 }
3333 }
3334
3335 void anv_GetImageMemoryRequirements(
3336 VkDevice _device,
3337 VkImage _image,
3338 VkMemoryRequirements* pMemoryRequirements)
3339 {
3340 ANV_FROM_HANDLE(anv_image, image, _image);
3341 ANV_FROM_HANDLE(anv_device, device, _device);
3342 struct anv_physical_device *pdevice = &device->instance->physicalDevice;
3343
3344 /* The Vulkan spec (git aaed022) says:
3345 *
3346 * memoryTypeBits is a bitfield and contains one bit set for every
3347 * supported memory type for the resource. The bit `1<<i` is set if and
3348 * only if the memory type `i` in the VkPhysicalDeviceMemoryProperties
3349 * structure for the physical device is supported.
3350 *
3351 * All types are currently supported for images.
3352 */
3353 uint32_t memory_types = (1ull << pdevice->memory.type_count) - 1;
3354
3355 /* We must have image allocated or imported at this point. According to the
3356 * specification, external images must have been bound to memory before
3357 * calling GetImageMemoryRequirements.
3358 */
3359 assert(image->size > 0);
3360
3361 pMemoryRequirements->size = image->size;
3362 pMemoryRequirements->alignment = image->alignment;
3363 pMemoryRequirements->memoryTypeBits = memory_types;
3364 }
3365
3366 void anv_GetImageMemoryRequirements2(
3367 VkDevice _device,
3368 const VkImageMemoryRequirementsInfo2* pInfo,
3369 VkMemoryRequirements2* pMemoryRequirements)
3370 {
3371 ANV_FROM_HANDLE(anv_device, device, _device);
3372 ANV_FROM_HANDLE(anv_image, image, pInfo->image);
3373
3374 anv_GetImageMemoryRequirements(_device, pInfo->image,
3375 &pMemoryRequirements->memoryRequirements);
3376
3377 vk_foreach_struct_const(ext, pInfo->pNext) {
3378 switch (ext->sType) {
3379 case VK_STRUCTURE_TYPE_IMAGE_PLANE_MEMORY_REQUIREMENTS_INFO: {
3380 struct anv_physical_device *pdevice = &device->instance->physicalDevice;
3381 const VkImagePlaneMemoryRequirementsInfo *plane_reqs =
3382 (const VkImagePlaneMemoryRequirementsInfo *) ext;
3383 uint32_t plane = anv_image_aspect_to_plane(image->aspects,
3384 plane_reqs->planeAspect);
3385
3386 assert(image->planes[plane].offset == 0);
3387
3388 /* The Vulkan spec (git aaed022) says:
3389 *
3390 * memoryTypeBits is a bitfield and contains one bit set for every
3391 * supported memory type for the resource. The bit `1<<i` is set
3392 * if and only if the memory type `i` in the
3393 * VkPhysicalDeviceMemoryProperties structure for the physical
3394 * device is supported.
3395 *
3396 * All types are currently supported for images.
3397 */
3398 pMemoryRequirements->memoryRequirements.memoryTypeBits =
3399 (1ull << pdevice->memory.type_count) - 1;
3400
3401 /* We must have image allocated or imported at this point. According to the
3402 * specification, external images must have been bound to memory before
3403 * calling GetImageMemoryRequirements.
3404 */
3405 assert(image->planes[plane].size > 0);
3406
3407 pMemoryRequirements->memoryRequirements.size = image->planes[plane].size;
3408 pMemoryRequirements->memoryRequirements.alignment =
3409 image->planes[plane].alignment;
3410 break;
3411 }
3412
3413 default:
3414 anv_debug_ignored_stype(ext->sType);
3415 break;
3416 }
3417 }
3418
3419 vk_foreach_struct(ext, pMemoryRequirements->pNext) {
3420 switch (ext->sType) {
3421 case VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS: {
3422 VkMemoryDedicatedRequirements *requirements = (void *)ext;
3423 if (image->needs_set_tiling || image->external_format) {
3424 /* If we need to set the tiling for external consumers, we need a
3425 * dedicated allocation.
3426 *
3427 * See also anv_AllocateMemory.
3428 */
3429 requirements->prefersDedicatedAllocation = true;
3430 requirements->requiresDedicatedAllocation = true;
3431 } else {
3432 requirements->prefersDedicatedAllocation = false;
3433 requirements->requiresDedicatedAllocation = false;
3434 }
3435 break;
3436 }
3437
3438 default:
3439 anv_debug_ignored_stype(ext->sType);
3440 break;
3441 }
3442 }
3443 }
3444
3445 void anv_GetImageSparseMemoryRequirements(
3446 VkDevice device,
3447 VkImage image,
3448 uint32_t* pSparseMemoryRequirementCount,
3449 VkSparseImageMemoryRequirements* pSparseMemoryRequirements)
3450 {
3451 *pSparseMemoryRequirementCount = 0;
3452 }
3453
3454 void anv_GetImageSparseMemoryRequirements2(
3455 VkDevice device,
3456 const VkImageSparseMemoryRequirementsInfo2* pInfo,
3457 uint32_t* pSparseMemoryRequirementCount,
3458 VkSparseImageMemoryRequirements2* pSparseMemoryRequirements)
3459 {
3460 *pSparseMemoryRequirementCount = 0;
3461 }
3462
3463 void anv_GetDeviceMemoryCommitment(
3464 VkDevice device,
3465 VkDeviceMemory memory,
3466 VkDeviceSize* pCommittedMemoryInBytes)
3467 {
3468 *pCommittedMemoryInBytes = 0;
3469 }
3470
3471 static void
3472 anv_bind_buffer_memory(const VkBindBufferMemoryInfo *pBindInfo)
3473 {
3474 ANV_FROM_HANDLE(anv_device_memory, mem, pBindInfo->memory);
3475 ANV_FROM_HANDLE(anv_buffer, buffer, pBindInfo->buffer);
3476
3477 assert(pBindInfo->sType == VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO);
3478
3479 if (mem) {
3480 assert((buffer->usage & mem->type->valid_buffer_usage) == buffer->usage);
3481 buffer->address = (struct anv_address) {
3482 .bo = mem->bo,
3483 .offset = pBindInfo->memoryOffset,
3484 };
3485 } else {
3486 buffer->address = ANV_NULL_ADDRESS;
3487 }
3488 }
3489
3490 VkResult anv_BindBufferMemory(
3491 VkDevice device,
3492 VkBuffer buffer,
3493 VkDeviceMemory memory,
3494 VkDeviceSize memoryOffset)
3495 {
3496 anv_bind_buffer_memory(
3497 &(VkBindBufferMemoryInfo) {
3498 .sType = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO,
3499 .buffer = buffer,
3500 .memory = memory,
3501 .memoryOffset = memoryOffset,
3502 });
3503
3504 return VK_SUCCESS;
3505 }
3506
3507 VkResult anv_BindBufferMemory2(
3508 VkDevice device,
3509 uint32_t bindInfoCount,
3510 const VkBindBufferMemoryInfo* pBindInfos)
3511 {
3512 for (uint32_t i = 0; i < bindInfoCount; i++)
3513 anv_bind_buffer_memory(&pBindInfos[i]);
3514
3515 return VK_SUCCESS;
3516 }
3517
3518 VkResult anv_QueueBindSparse(
3519 VkQueue _queue,
3520 uint32_t bindInfoCount,
3521 const VkBindSparseInfo* pBindInfo,
3522 VkFence fence)
3523 {
3524 ANV_FROM_HANDLE(anv_queue, queue, _queue);
3525 if (anv_device_is_lost(queue->device))
3526 return VK_ERROR_DEVICE_LOST;
3527
3528 return vk_error(VK_ERROR_FEATURE_NOT_PRESENT);
3529 }
3530
3531 // Event functions
3532
3533 VkResult anv_CreateEvent(
3534 VkDevice _device,
3535 const VkEventCreateInfo* pCreateInfo,
3536 const VkAllocationCallbacks* pAllocator,
3537 VkEvent* pEvent)
3538 {
3539 ANV_FROM_HANDLE(anv_device, device, _device);
3540 struct anv_state state;
3541 struct anv_event *event;
3542
3543 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_EVENT_CREATE_INFO);
3544
3545 state = anv_state_pool_alloc(&device->dynamic_state_pool,
3546 sizeof(*event), 8);
3547 event = state.map;
3548 event->state = state;
3549 event->semaphore = VK_EVENT_RESET;
3550
3551 if (!device->info.has_llc) {
3552 /* Make sure the writes we're flushing have landed. */
3553 __builtin_ia32_mfence();
3554 __builtin_ia32_clflush(event);
3555 }
3556
3557 *pEvent = anv_event_to_handle(event);
3558
3559 return VK_SUCCESS;
3560 }
3561
3562 void anv_DestroyEvent(
3563 VkDevice _device,
3564 VkEvent _event,
3565 const VkAllocationCallbacks* pAllocator)
3566 {
3567 ANV_FROM_HANDLE(anv_device, device, _device);
3568 ANV_FROM_HANDLE(anv_event, event, _event);
3569
3570 if (!event)
3571 return;
3572
3573 anv_state_pool_free(&device->dynamic_state_pool, event->state);
3574 }
3575
3576 VkResult anv_GetEventStatus(
3577 VkDevice _device,
3578 VkEvent _event)
3579 {
3580 ANV_FROM_HANDLE(anv_device, device, _device);
3581 ANV_FROM_HANDLE(anv_event, event, _event);
3582
3583 if (anv_device_is_lost(device))
3584 return VK_ERROR_DEVICE_LOST;
3585
3586 if (!device->info.has_llc) {
3587 /* Invalidate read cache before reading event written by GPU. */
3588 __builtin_ia32_clflush(event);
3589 __builtin_ia32_mfence();
3590
3591 }
3592
3593 return event->semaphore;
3594 }
3595
3596 VkResult anv_SetEvent(
3597 VkDevice _device,
3598 VkEvent _event)
3599 {
3600 ANV_FROM_HANDLE(anv_device, device, _device);
3601 ANV_FROM_HANDLE(anv_event, event, _event);
3602
3603 event->semaphore = VK_EVENT_SET;
3604
3605 if (!device->info.has_llc) {
3606 /* Make sure the writes we're flushing have landed. */
3607 __builtin_ia32_mfence();
3608 __builtin_ia32_clflush(event);
3609 }
3610
3611 return VK_SUCCESS;
3612 }
3613
3614 VkResult anv_ResetEvent(
3615 VkDevice _device,
3616 VkEvent _event)
3617 {
3618 ANV_FROM_HANDLE(anv_device, device, _device);
3619 ANV_FROM_HANDLE(anv_event, event, _event);
3620
3621 event->semaphore = VK_EVENT_RESET;
3622
3623 if (!device->info.has_llc) {
3624 /* Make sure the writes we're flushing have landed. */
3625 __builtin_ia32_mfence();
3626 __builtin_ia32_clflush(event);
3627 }
3628
3629 return VK_SUCCESS;
3630 }
3631
3632 // Buffer functions
3633
3634 VkResult anv_CreateBuffer(
3635 VkDevice _device,
3636 const VkBufferCreateInfo* pCreateInfo,
3637 const VkAllocationCallbacks* pAllocator,
3638 VkBuffer* pBuffer)
3639 {
3640 ANV_FROM_HANDLE(anv_device, device, _device);
3641 struct anv_buffer *buffer;
3642
3643 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO);
3644
3645 buffer = vk_alloc2(&device->alloc, pAllocator, sizeof(*buffer), 8,
3646 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
3647 if (buffer == NULL)
3648 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
3649
3650 buffer->size = pCreateInfo->size;
3651 buffer->usage = pCreateInfo->usage;
3652 buffer->address = ANV_NULL_ADDRESS;
3653
3654 *pBuffer = anv_buffer_to_handle(buffer);
3655
3656 return VK_SUCCESS;
3657 }
3658
3659 void anv_DestroyBuffer(
3660 VkDevice _device,
3661 VkBuffer _buffer,
3662 const VkAllocationCallbacks* pAllocator)
3663 {
3664 ANV_FROM_HANDLE(anv_device, device, _device);
3665 ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
3666
3667 if (!buffer)
3668 return;
3669
3670 vk_free2(&device->alloc, pAllocator, buffer);
3671 }
3672
3673 VkDeviceAddress anv_GetBufferDeviceAddressEXT(
3674 VkDevice device,
3675 const VkBufferDeviceAddressInfoEXT* pInfo)
3676 {
3677 ANV_FROM_HANDLE(anv_buffer, buffer, pInfo->buffer);
3678
3679 assert(buffer->address.bo->flags & EXEC_OBJECT_PINNED);
3680
3681 return anv_address_physical(buffer->address);
3682 }
3683
3684 void
3685 anv_fill_buffer_surface_state(struct anv_device *device, struct anv_state state,
3686 enum isl_format format,
3687 struct anv_address address,
3688 uint32_t range, uint32_t stride)
3689 {
3690 isl_buffer_fill_state(&device->isl_dev, state.map,
3691 .address = anv_address_physical(address),
3692 .mocs = device->default_mocs,
3693 .size_B = range,
3694 .format = format,
3695 .swizzle = ISL_SWIZZLE_IDENTITY,
3696 .stride_B = stride);
3697 }
3698
3699 void anv_DestroySampler(
3700 VkDevice _device,
3701 VkSampler _sampler,
3702 const VkAllocationCallbacks* pAllocator)
3703 {
3704 ANV_FROM_HANDLE(anv_device, device, _device);
3705 ANV_FROM_HANDLE(anv_sampler, sampler, _sampler);
3706
3707 if (!sampler)
3708 return;
3709
3710 if (sampler->bindless_state.map) {
3711 anv_state_pool_free(&device->dynamic_state_pool,
3712 sampler->bindless_state);
3713 }
3714
3715 vk_free2(&device->alloc, pAllocator, sampler);
3716 }
3717
3718 VkResult anv_CreateFramebuffer(
3719 VkDevice _device,
3720 const VkFramebufferCreateInfo* pCreateInfo,
3721 const VkAllocationCallbacks* pAllocator,
3722 VkFramebuffer* pFramebuffer)
3723 {
3724 ANV_FROM_HANDLE(anv_device, device, _device);
3725 struct anv_framebuffer *framebuffer;
3726
3727 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO);
3728
3729 size_t size = sizeof(*framebuffer);
3730
3731 /* VK_KHR_imageless_framebuffer extension says:
3732 *
3733 * If flags includes VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT_KHR,
3734 * parameter pAttachments is ignored.
3735 */
3736 if (!(pCreateInfo->flags & VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT_KHR)) {
3737 size += sizeof(struct anv_image_view *) * pCreateInfo->attachmentCount;
3738 framebuffer = vk_alloc2(&device->alloc, pAllocator, size, 8,
3739 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
3740 if (framebuffer == NULL)
3741 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
3742
3743 for (uint32_t i = 0; i < pCreateInfo->attachmentCount; i++) {
3744 ANV_FROM_HANDLE(anv_image_view, iview, pCreateInfo->pAttachments[i]);
3745 framebuffer->attachments[i] = iview;
3746 }
3747 framebuffer->attachment_count = pCreateInfo->attachmentCount;
3748 } else {
3749 assert(device->enabled_extensions.KHR_imageless_framebuffer);
3750 framebuffer = vk_alloc2(&device->alloc, pAllocator, size, 8,
3751 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
3752 if (framebuffer == NULL)
3753 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
3754
3755 framebuffer->attachment_count = 0;
3756 }
3757
3758 framebuffer->width = pCreateInfo->width;
3759 framebuffer->height = pCreateInfo->height;
3760 framebuffer->layers = pCreateInfo->layers;
3761
3762 *pFramebuffer = anv_framebuffer_to_handle(framebuffer);
3763
3764 return VK_SUCCESS;
3765 }
3766
3767 void anv_DestroyFramebuffer(
3768 VkDevice _device,
3769 VkFramebuffer _fb,
3770 const VkAllocationCallbacks* pAllocator)
3771 {
3772 ANV_FROM_HANDLE(anv_device, device, _device);
3773 ANV_FROM_HANDLE(anv_framebuffer, fb, _fb);
3774
3775 if (!fb)
3776 return;
3777
3778 vk_free2(&device->alloc, pAllocator, fb);
3779 }
3780
3781 static const VkTimeDomainEXT anv_time_domains[] = {
3782 VK_TIME_DOMAIN_DEVICE_EXT,
3783 VK_TIME_DOMAIN_CLOCK_MONOTONIC_EXT,
3784 VK_TIME_DOMAIN_CLOCK_MONOTONIC_RAW_EXT,
3785 };
3786
3787 VkResult anv_GetPhysicalDeviceCalibrateableTimeDomainsEXT(
3788 VkPhysicalDevice physicalDevice,
3789 uint32_t *pTimeDomainCount,
3790 VkTimeDomainEXT *pTimeDomains)
3791 {
3792 int d;
3793 VK_OUTARRAY_MAKE(out, pTimeDomains, pTimeDomainCount);
3794
3795 for (d = 0; d < ARRAY_SIZE(anv_time_domains); d++) {
3796 vk_outarray_append(&out, i) {
3797 *i = anv_time_domains[d];
3798 }
3799 }
3800
3801 return vk_outarray_status(&out);
3802 }
3803
3804 static uint64_t
3805 anv_clock_gettime(clockid_t clock_id)
3806 {
3807 struct timespec current;
3808 int ret;
3809
3810 ret = clock_gettime(clock_id, &current);
3811 if (ret < 0 && clock_id == CLOCK_MONOTONIC_RAW)
3812 ret = clock_gettime(CLOCK_MONOTONIC, &current);
3813 if (ret < 0)
3814 return 0;
3815
3816 return (uint64_t) current.tv_sec * 1000000000ULL + current.tv_nsec;
3817 }
3818
3819 #define TIMESTAMP 0x2358
3820
3821 VkResult anv_GetCalibratedTimestampsEXT(
3822 VkDevice _device,
3823 uint32_t timestampCount,
3824 const VkCalibratedTimestampInfoEXT *pTimestampInfos,
3825 uint64_t *pTimestamps,
3826 uint64_t *pMaxDeviation)
3827 {
3828 ANV_FROM_HANDLE(anv_device, device, _device);
3829 uint64_t timestamp_frequency = device->info.timestamp_frequency;
3830 int ret;
3831 int d;
3832 uint64_t begin, end;
3833 uint64_t max_clock_period = 0;
3834
3835 begin = anv_clock_gettime(CLOCK_MONOTONIC_RAW);
3836
3837 for (d = 0; d < timestampCount; d++) {
3838 switch (pTimestampInfos[d].timeDomain) {
3839 case VK_TIME_DOMAIN_DEVICE_EXT:
3840 ret = anv_gem_reg_read(device, TIMESTAMP | 1,
3841 &pTimestamps[d]);
3842
3843 if (ret != 0) {
3844 return anv_device_set_lost(device, "Failed to read the TIMESTAMP "
3845 "register: %m");
3846 }
3847 uint64_t device_period = DIV_ROUND_UP(1000000000, timestamp_frequency);
3848 max_clock_period = MAX2(max_clock_period, device_period);
3849 break;
3850 case VK_TIME_DOMAIN_CLOCK_MONOTONIC_EXT:
3851 pTimestamps[d] = anv_clock_gettime(CLOCK_MONOTONIC);
3852 max_clock_period = MAX2(max_clock_period, 1);
3853 break;
3854
3855 case VK_TIME_DOMAIN_CLOCK_MONOTONIC_RAW_EXT:
3856 pTimestamps[d] = begin;
3857 break;
3858 default:
3859 pTimestamps[d] = 0;
3860 break;
3861 }
3862 }
3863
3864 end = anv_clock_gettime(CLOCK_MONOTONIC_RAW);
3865
3866 /*
3867 * The maximum deviation is the sum of the interval over which we
3868 * perform the sampling and the maximum period of any sampled
3869 * clock. That's because the maximum skew between any two sampled
3870 * clock edges is when the sampled clock with the largest period is
3871 * sampled at the end of that period but right at the beginning of the
3872 * sampling interval and some other clock is sampled right at the
3873 * begining of its sampling period and right at the end of the
3874 * sampling interval. Let's assume the GPU has the longest clock
3875 * period and that the application is sampling GPU and monotonic:
3876 *
3877 * s e
3878 * w x y z 0 1 2 3 4 5 6 7 8 9 a b c d e f
3879 * Raw -_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-
3880 *
3881 * g
3882 * 0 1 2 3
3883 * GPU -----_____-----_____-----_____-----_____
3884 *
3885 * m
3886 * x y z 0 1 2 3 4 5 6 7 8 9 a b c
3887 * Monotonic -_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-
3888 *
3889 * Interval <----------------->
3890 * Deviation <-------------------------->
3891 *
3892 * s = read(raw) 2
3893 * g = read(GPU) 1
3894 * m = read(monotonic) 2
3895 * e = read(raw) b
3896 *
3897 * We round the sample interval up by one tick to cover sampling error
3898 * in the interval clock
3899 */
3900
3901 uint64_t sample_interval = end - begin + 1;
3902
3903 *pMaxDeviation = sample_interval + max_clock_period;
3904
3905 return VK_SUCCESS;
3906 }
3907
3908 /* vk_icd.h does not declare this function, so we declare it here to
3909 * suppress Wmissing-prototypes.
3910 */
3911 PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
3912 vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t* pSupportedVersion);
3913
3914 PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
3915 vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t* pSupportedVersion)
3916 {
3917 /* For the full details on loader interface versioning, see
3918 * <https://github.com/KhronosGroup/Vulkan-LoaderAndValidationLayers/blob/master/loader/LoaderAndLayerInterface.md>.
3919 * What follows is a condensed summary, to help you navigate the large and
3920 * confusing official doc.
3921 *
3922 * - Loader interface v0 is incompatible with later versions. We don't
3923 * support it.
3924 *
3925 * - In loader interface v1:
3926 * - The first ICD entrypoint called by the loader is
3927 * vk_icdGetInstanceProcAddr(). The ICD must statically expose this
3928 * entrypoint.
3929 * - The ICD must statically expose no other Vulkan symbol unless it is
3930 * linked with -Bsymbolic.
3931 * - Each dispatchable Vulkan handle created by the ICD must be
3932 * a pointer to a struct whose first member is VK_LOADER_DATA. The
3933 * ICD must initialize VK_LOADER_DATA.loadMagic to ICD_LOADER_MAGIC.
3934 * - The loader implements vkCreate{PLATFORM}SurfaceKHR() and
3935 * vkDestroySurfaceKHR(). The ICD must be capable of working with
3936 * such loader-managed surfaces.
3937 *
3938 * - Loader interface v2 differs from v1 in:
3939 * - The first ICD entrypoint called by the loader is
3940 * vk_icdNegotiateLoaderICDInterfaceVersion(). The ICD must
3941 * statically expose this entrypoint.
3942 *
3943 * - Loader interface v3 differs from v2 in:
3944 * - The ICD must implement vkCreate{PLATFORM}SurfaceKHR(),
3945 * vkDestroySurfaceKHR(), and other API which uses VKSurfaceKHR,
3946 * because the loader no longer does so.
3947 */
3948 *pSupportedVersion = MIN2(*pSupportedVersion, 3u);
3949 return VK_SUCCESS;
3950 }