b0a02b02a6d1b939216d7d5107c3ff1feb644ef6
[mesa.git] / src / intel / vulkan / anv_device.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <assert.h>
25 #include <stdbool.h>
26 #include <string.h>
27 #include <sys/mman.h>
28 #include <sys/sysinfo.h>
29 #include <unistd.h>
30 #include <fcntl.h>
31 #include <xf86drm.h>
32 #include "drm-uapi/drm_fourcc.h"
33
34 #include "anv_private.h"
35 #include "util/debug.h"
36 #include "util/build_id.h"
37 #include "util/disk_cache.h"
38 #include "util/mesa-sha1.h"
39 #include "util/os_file.h"
40 #include "util/u_atomic.h"
41 #include "util/u_string.h"
42 #include "util/xmlpool.h"
43 #include "git_sha1.h"
44 #include "vk_util.h"
45 #include "common/gen_aux_map.h"
46 #include "common/gen_defines.h"
47 #include "compiler/glsl_types.h"
48
49 #include "genxml/gen7_pack.h"
50
51 static const char anv_dri_options_xml[] =
52 DRI_CONF_BEGIN
53 DRI_CONF_SECTION_PERFORMANCE
54 DRI_CONF_VK_X11_OVERRIDE_MIN_IMAGE_COUNT(0)
55 DRI_CONF_VK_X11_STRICT_IMAGE_COUNT("false")
56 DRI_CONF_SECTION_END
57
58 DRI_CONF_SECTION_DEBUG
59 DRI_CONF_ALWAYS_FLUSH_CACHE("false")
60 DRI_CONF_VK_WSI_FORCE_BGRA8_UNORM_FIRST("false")
61 DRI_CONF_SECTION_END
62 DRI_CONF_END;
63
64 /* This is probably far to big but it reflects the max size used for messages
65 * in OpenGLs KHR_debug.
66 */
67 #define MAX_DEBUG_MESSAGE_LENGTH 4096
68
69 static void
70 compiler_debug_log(void *data, const char *fmt, ...)
71 {
72 char str[MAX_DEBUG_MESSAGE_LENGTH];
73 struct anv_device *device = (struct anv_device *)data;
74 struct anv_instance *instance = device->physical->instance;
75
76 if (list_is_empty(&instance->debug_report_callbacks.callbacks))
77 return;
78
79 va_list args;
80 va_start(args, fmt);
81 (void) vsnprintf(str, MAX_DEBUG_MESSAGE_LENGTH, fmt, args);
82 va_end(args);
83
84 vk_debug_report(&instance->debug_report_callbacks,
85 VK_DEBUG_REPORT_DEBUG_BIT_EXT,
86 VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
87 0, 0, 0, "anv", str);
88 }
89
90 static void
91 compiler_perf_log(void *data, const char *fmt, ...)
92 {
93 va_list args;
94 va_start(args, fmt);
95
96 if (unlikely(INTEL_DEBUG & DEBUG_PERF))
97 intel_logd_v(fmt, args);
98
99 va_end(args);
100 }
101
102 static uint64_t
103 anv_compute_heap_size(int fd, uint64_t gtt_size)
104 {
105 /* Query the total ram from the system */
106 struct sysinfo info;
107 sysinfo(&info);
108
109 uint64_t total_ram = (uint64_t)info.totalram * (uint64_t)info.mem_unit;
110
111 /* We don't want to burn too much ram with the GPU. If the user has 4GiB
112 * or less, we use at most half. If they have more than 4GiB, we use 3/4.
113 */
114 uint64_t available_ram;
115 if (total_ram <= 4ull * 1024ull * 1024ull * 1024ull)
116 available_ram = total_ram / 2;
117 else
118 available_ram = total_ram * 3 / 4;
119
120 /* We also want to leave some padding for things we allocate in the driver,
121 * so don't go over 3/4 of the GTT either.
122 */
123 uint64_t available_gtt = gtt_size * 3 / 4;
124
125 return MIN2(available_ram, available_gtt);
126 }
127
128 static VkResult
129 anv_physical_device_init_heaps(struct anv_physical_device *device, int fd)
130 {
131 if (anv_gem_get_context_param(fd, 0, I915_CONTEXT_PARAM_GTT_SIZE,
132 &device->gtt_size) == -1) {
133 /* If, for whatever reason, we can't actually get the GTT size from the
134 * kernel (too old?) fall back to the aperture size.
135 */
136 anv_perf_warn(NULL, NULL,
137 "Failed to get I915_CONTEXT_PARAM_GTT_SIZE: %m");
138
139 if (anv_gem_get_aperture(fd, &device->gtt_size) == -1) {
140 return vk_errorfi(device->instance, NULL,
141 VK_ERROR_INITIALIZATION_FAILED,
142 "failed to get aperture size: %m");
143 }
144 }
145
146 /* We only allow 48-bit addresses with softpin because knowing the actual
147 * address is required for the vertex cache flush workaround.
148 */
149 device->supports_48bit_addresses = (device->info.gen >= 8) &&
150 device->has_softpin &&
151 device->gtt_size > (4ULL << 30 /* GiB */);
152
153 uint64_t heap_size = anv_compute_heap_size(fd, device->gtt_size);
154
155 if (heap_size > (2ull << 30) && !device->supports_48bit_addresses) {
156 /* When running with an overridden PCI ID, we may get a GTT size from
157 * the kernel that is greater than 2 GiB but the execbuf check for 48bit
158 * address support can still fail. Just clamp the address space size to
159 * 2 GiB if we don't have 48-bit support.
160 */
161 intel_logw("%s:%d: The kernel reported a GTT size larger than 2 GiB but "
162 "not support for 48-bit addresses",
163 __FILE__, __LINE__);
164 heap_size = 2ull << 30;
165 }
166
167 device->memory.heap_count = 1;
168 device->memory.heaps[0] = (struct anv_memory_heap) {
169 .size = heap_size,
170 .flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT,
171 };
172
173 uint32_t type_count = 0;
174 for (uint32_t heap = 0; heap < device->memory.heap_count; heap++) {
175 if (device->info.has_llc) {
176 /* Big core GPUs share LLC with the CPU and thus one memory type can be
177 * both cached and coherent at the same time.
178 */
179 device->memory.types[type_count++] = (struct anv_memory_type) {
180 .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
181 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
182 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT |
183 VK_MEMORY_PROPERTY_HOST_CACHED_BIT,
184 .heapIndex = heap,
185 };
186 } else {
187 /* The spec requires that we expose a host-visible, coherent memory
188 * type, but Atom GPUs don't share LLC. Thus we offer two memory types
189 * to give the application a choice between cached, but not coherent and
190 * coherent but uncached (WC though).
191 */
192 device->memory.types[type_count++] = (struct anv_memory_type) {
193 .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
194 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
195 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
196 .heapIndex = heap,
197 };
198 device->memory.types[type_count++] = (struct anv_memory_type) {
199 .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
200 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
201 VK_MEMORY_PROPERTY_HOST_CACHED_BIT,
202 .heapIndex = heap,
203 };
204 }
205 }
206 device->memory.type_count = type_count;
207
208 return VK_SUCCESS;
209 }
210
211 static VkResult
212 anv_physical_device_init_uuids(struct anv_physical_device *device)
213 {
214 const struct build_id_note *note =
215 build_id_find_nhdr_for_addr(anv_physical_device_init_uuids);
216 if (!note) {
217 return vk_errorfi(device->instance, NULL,
218 VK_ERROR_INITIALIZATION_FAILED,
219 "Failed to find build-id");
220 }
221
222 unsigned build_id_len = build_id_length(note);
223 if (build_id_len < 20) {
224 return vk_errorfi(device->instance, NULL,
225 VK_ERROR_INITIALIZATION_FAILED,
226 "build-id too short. It needs to be a SHA");
227 }
228
229 memcpy(device->driver_build_sha1, build_id_data(note), 20);
230
231 struct mesa_sha1 sha1_ctx;
232 uint8_t sha1[20];
233 STATIC_ASSERT(VK_UUID_SIZE <= sizeof(sha1));
234
235 /* The pipeline cache UUID is used for determining when a pipeline cache is
236 * invalid. It needs both a driver build and the PCI ID of the device.
237 */
238 _mesa_sha1_init(&sha1_ctx);
239 _mesa_sha1_update(&sha1_ctx, build_id_data(note), build_id_len);
240 _mesa_sha1_update(&sha1_ctx, &device->info.chipset_id,
241 sizeof(device->info.chipset_id));
242 _mesa_sha1_update(&sha1_ctx, &device->always_use_bindless,
243 sizeof(device->always_use_bindless));
244 _mesa_sha1_update(&sha1_ctx, &device->has_a64_buffer_access,
245 sizeof(device->has_a64_buffer_access));
246 _mesa_sha1_update(&sha1_ctx, &device->has_bindless_images,
247 sizeof(device->has_bindless_images));
248 _mesa_sha1_update(&sha1_ctx, &device->has_bindless_samplers,
249 sizeof(device->has_bindless_samplers));
250 _mesa_sha1_final(&sha1_ctx, sha1);
251 memcpy(device->pipeline_cache_uuid, sha1, VK_UUID_SIZE);
252
253 /* The driver UUID is used for determining sharability of images and memory
254 * between two Vulkan instances in separate processes. People who want to
255 * share memory need to also check the device UUID (below) so all this
256 * needs to be is the build-id.
257 */
258 memcpy(device->driver_uuid, build_id_data(note), VK_UUID_SIZE);
259
260 /* The device UUID uniquely identifies the given device within the machine.
261 * Since we never have more than one device, this doesn't need to be a real
262 * UUID. However, on the off-chance that someone tries to use this to
263 * cache pre-tiled images or something of the like, we use the PCI ID and
264 * some bits of ISL info to ensure that this is safe.
265 */
266 _mesa_sha1_init(&sha1_ctx);
267 _mesa_sha1_update(&sha1_ctx, &device->info.chipset_id,
268 sizeof(device->info.chipset_id));
269 _mesa_sha1_update(&sha1_ctx, &device->isl_dev.has_bit6_swizzling,
270 sizeof(device->isl_dev.has_bit6_swizzling));
271 _mesa_sha1_final(&sha1_ctx, sha1);
272 memcpy(device->device_uuid, sha1, VK_UUID_SIZE);
273
274 return VK_SUCCESS;
275 }
276
277 static void
278 anv_physical_device_init_disk_cache(struct anv_physical_device *device)
279 {
280 #ifdef ENABLE_SHADER_CACHE
281 char renderer[10];
282 ASSERTED int len = snprintf(renderer, sizeof(renderer), "anv_%04x",
283 device->info.chipset_id);
284 assert(len == sizeof(renderer) - 2);
285
286 char timestamp[41];
287 _mesa_sha1_format(timestamp, device->driver_build_sha1);
288
289 const uint64_t driver_flags =
290 brw_get_compiler_config_value(device->compiler);
291 device->disk_cache = disk_cache_create(renderer, timestamp, driver_flags);
292 #else
293 device->disk_cache = NULL;
294 #endif
295 }
296
297 static void
298 anv_physical_device_free_disk_cache(struct anv_physical_device *device)
299 {
300 #ifdef ENABLE_SHADER_CACHE
301 if (device->disk_cache)
302 disk_cache_destroy(device->disk_cache);
303 #else
304 assert(device->disk_cache == NULL);
305 #endif
306 }
307
308 static uint64_t
309 get_available_system_memory()
310 {
311 char *meminfo = os_read_file("/proc/meminfo", NULL);
312 if (!meminfo)
313 return 0;
314
315 char *str = strstr(meminfo, "MemAvailable:");
316 if (!str) {
317 free(meminfo);
318 return 0;
319 }
320
321 uint64_t kb_mem_available;
322 if (sscanf(str, "MemAvailable: %" PRIx64, &kb_mem_available) == 1) {
323 free(meminfo);
324 return kb_mem_available << 10;
325 }
326
327 free(meminfo);
328 return 0;
329 }
330
331 static VkResult
332 anv_physical_device_try_create(struct anv_instance *instance,
333 drmDevicePtr drm_device,
334 struct anv_physical_device **device_out)
335 {
336 const char *primary_path = drm_device->nodes[DRM_NODE_PRIMARY];
337 const char *path = drm_device->nodes[DRM_NODE_RENDER];
338 VkResult result;
339 int fd;
340 int master_fd = -1;
341
342 brw_process_intel_debug_variable();
343
344 fd = open(path, O_RDWR | O_CLOEXEC);
345 if (fd < 0)
346 return vk_error(VK_ERROR_INCOMPATIBLE_DRIVER);
347
348 struct gen_device_info devinfo;
349 if (!gen_get_device_info_from_fd(fd, &devinfo)) {
350 result = vk_error(VK_ERROR_INCOMPATIBLE_DRIVER);
351 goto fail_fd;
352 }
353
354 const char *device_name = gen_get_device_name(devinfo.chipset_id);
355
356 if (devinfo.is_haswell) {
357 intel_logw("Haswell Vulkan support is incomplete");
358 } else if (devinfo.gen == 7 && !devinfo.is_baytrail) {
359 intel_logw("Ivy Bridge Vulkan support is incomplete");
360 } else if (devinfo.gen == 7 && devinfo.is_baytrail) {
361 intel_logw("Bay Trail Vulkan support is incomplete");
362 } else if (devinfo.gen >= 8 && devinfo.gen <= 11) {
363 /* Gen8-11 fully supported */
364 } else if (devinfo.gen == 12) {
365 intel_logw("Vulkan is not yet fully supported on gen12");
366 } else {
367 result = vk_errorfi(instance, NULL, VK_ERROR_INCOMPATIBLE_DRIVER,
368 "Vulkan not yet supported on %s", device_name);
369 goto fail_fd;
370 }
371
372 struct anv_physical_device *device =
373 vk_alloc(&instance->alloc, sizeof(*device), 8,
374 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
375 if (device == NULL) {
376 result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
377 goto fail_fd;
378 }
379
380 device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
381 device->instance = instance;
382
383 assert(strlen(path) < ARRAY_SIZE(device->path));
384 snprintf(device->path, ARRAY_SIZE(device->path), "%s", path);
385
386 device->info = devinfo;
387 device->name = device_name;
388
389 device->no_hw = device->info.no_hw;
390 if (getenv("INTEL_NO_HW") != NULL)
391 device->no_hw = true;
392
393 device->pci_info.domain = drm_device->businfo.pci->domain;
394 device->pci_info.bus = drm_device->businfo.pci->bus;
395 device->pci_info.device = drm_device->businfo.pci->dev;
396 device->pci_info.function = drm_device->businfo.pci->func;
397
398 device->cmd_parser_version = -1;
399 if (device->info.gen == 7) {
400 device->cmd_parser_version =
401 anv_gem_get_param(fd, I915_PARAM_CMD_PARSER_VERSION);
402 if (device->cmd_parser_version == -1) {
403 result = vk_errorfi(device->instance, NULL,
404 VK_ERROR_INITIALIZATION_FAILED,
405 "failed to get command parser version");
406 goto fail_alloc;
407 }
408 }
409
410 if (!anv_gem_get_param(fd, I915_PARAM_HAS_WAIT_TIMEOUT)) {
411 result = vk_errorfi(device->instance, NULL,
412 VK_ERROR_INITIALIZATION_FAILED,
413 "kernel missing gem wait");
414 goto fail_alloc;
415 }
416
417 if (!anv_gem_get_param(fd, I915_PARAM_HAS_EXECBUF2)) {
418 result = vk_errorfi(device->instance, NULL,
419 VK_ERROR_INITIALIZATION_FAILED,
420 "kernel missing execbuf2");
421 goto fail_alloc;
422 }
423
424 if (!device->info.has_llc &&
425 anv_gem_get_param(fd, I915_PARAM_MMAP_VERSION) < 1) {
426 result = vk_errorfi(device->instance, NULL,
427 VK_ERROR_INITIALIZATION_FAILED,
428 "kernel missing wc mmap");
429 goto fail_alloc;
430 }
431
432 device->has_softpin = anv_gem_get_param(fd, I915_PARAM_HAS_EXEC_SOFTPIN);
433 device->has_exec_async = anv_gem_get_param(fd, I915_PARAM_HAS_EXEC_ASYNC);
434 device->has_exec_capture = anv_gem_get_param(fd, I915_PARAM_HAS_EXEC_CAPTURE);
435 device->has_exec_fence = anv_gem_get_param(fd, I915_PARAM_HAS_EXEC_FENCE);
436 device->has_syncobj = anv_gem_get_param(fd, I915_PARAM_HAS_EXEC_FENCE_ARRAY);
437 device->has_syncobj_wait = device->has_syncobj &&
438 anv_gem_supports_syncobj_wait(fd);
439 device->has_context_priority = anv_gem_has_context_priority(fd);
440
441 result = anv_physical_device_init_heaps(device, fd);
442 if (result != VK_SUCCESS)
443 goto fail_alloc;
444
445 device->use_softpin = device->has_softpin &&
446 device->supports_48bit_addresses;
447
448 device->has_context_isolation =
449 anv_gem_get_param(fd, I915_PARAM_HAS_CONTEXT_ISOLATION);
450
451 device->always_use_bindless =
452 env_var_as_boolean("ANV_ALWAYS_BINDLESS", false);
453
454 /* We first got the A64 messages on broadwell and we can only use them if
455 * we can pass addresses directly into the shader which requires softpin.
456 */
457 device->has_a64_buffer_access = device->info.gen >= 8 &&
458 device->use_softpin;
459
460 /* We first get bindless image access on Skylake and we can only really do
461 * it if we don't have any relocations so we need softpin.
462 */
463 device->has_bindless_images = device->info.gen >= 9 &&
464 device->use_softpin;
465
466 /* We've had bindless samplers since Ivy Bridge (forever in Vulkan terms)
467 * because it's just a matter of setting the sampler address in the sample
468 * message header. However, we've not bothered to wire it up for vec4 so
469 * we leave it disabled on gen7.
470 */
471 device->has_bindless_samplers = device->info.gen >= 8;
472
473 device->has_implicit_ccs = device->info.has_aux_map;
474
475 device->has_mem_available = get_available_system_memory() != 0;
476
477 device->always_flush_cache =
478 driQueryOptionb(&instance->dri_options, "always_flush_cache");
479
480 device->has_mmap_offset =
481 anv_gem_get_param(fd, I915_PARAM_MMAP_GTT_VERSION) >= 4;
482
483 /* GENs prior to 8 do not support EU/Subslice info */
484 if (device->info.gen >= 8) {
485 device->subslice_total = anv_gem_get_param(fd, I915_PARAM_SUBSLICE_TOTAL);
486 device->eu_total = anv_gem_get_param(fd, I915_PARAM_EU_TOTAL);
487
488 /* Without this information, we cannot get the right Braswell
489 * brandstrings, and we have to use conservative numbers for GPGPU on
490 * many platforms, but otherwise, things will just work.
491 */
492 if (device->subslice_total < 1 || device->eu_total < 1) {
493 intel_logw("Kernel 4.1 required to properly query GPU properties");
494 }
495 } else if (device->info.gen == 7) {
496 device->subslice_total = 1 << (device->info.gt - 1);
497 }
498
499 if (device->info.is_cherryview &&
500 device->subslice_total > 0 && device->eu_total > 0) {
501 /* Logical CS threads = EUs per subslice * num threads per EU */
502 uint32_t max_cs_threads =
503 device->eu_total / device->subslice_total * device->info.num_thread_per_eu;
504
505 /* Fuse configurations may give more threads than expected, never less. */
506 if (max_cs_threads > device->info.max_cs_threads)
507 device->info.max_cs_threads = max_cs_threads;
508 }
509
510 device->compiler = brw_compiler_create(NULL, &device->info);
511 if (device->compiler == NULL) {
512 result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
513 goto fail_alloc;
514 }
515 device->compiler->shader_debug_log = compiler_debug_log;
516 device->compiler->shader_perf_log = compiler_perf_log;
517 device->compiler->supports_pull_constants = false;
518 device->compiler->constant_buffer_0_is_relative =
519 device->info.gen < 8 || !device->has_context_isolation;
520 device->compiler->supports_shader_constants = true;
521 device->compiler->compact_params = false;
522
523 /* Broadwell PRM says:
524 *
525 * "Before Gen8, there was a historical configuration control field to
526 * swizzle address bit[6] for in X/Y tiling modes. This was set in three
527 * different places: TILECTL[1:0], ARB_MODE[5:4], and
528 * DISP_ARB_CTL[14:13].
529 *
530 * For Gen8 and subsequent generations, the swizzle fields are all
531 * reserved, and the CPU's memory controller performs all address
532 * swizzling modifications."
533 */
534 bool swizzled =
535 device->info.gen < 8 && anv_gem_get_bit6_swizzle(fd, I915_TILING_X);
536
537 isl_device_init(&device->isl_dev, &device->info, swizzled);
538
539 result = anv_physical_device_init_uuids(device);
540 if (result != VK_SUCCESS)
541 goto fail_compiler;
542
543 anv_physical_device_init_disk_cache(device);
544
545 if (instance->enabled_extensions.KHR_display) {
546 master_fd = open(primary_path, O_RDWR | O_CLOEXEC);
547 if (master_fd >= 0) {
548 /* prod the device with a GETPARAM call which will fail if
549 * we don't have permission to even render on this device
550 */
551 if (anv_gem_get_param(master_fd, I915_PARAM_CHIPSET_ID) == 0) {
552 close(master_fd);
553 master_fd = -1;
554 }
555 }
556 }
557 device->master_fd = master_fd;
558
559 result = anv_init_wsi(device);
560 if (result != VK_SUCCESS)
561 goto fail_disk_cache;
562
563 device->perf = anv_get_perf(&device->info, fd);
564
565 anv_physical_device_get_supported_extensions(device,
566 &device->supported_extensions);
567
568
569 device->local_fd = fd;
570
571 *device_out = device;
572
573 return VK_SUCCESS;
574
575 fail_disk_cache:
576 anv_physical_device_free_disk_cache(device);
577 fail_compiler:
578 ralloc_free(device->compiler);
579 fail_alloc:
580 vk_free(&instance->alloc, device);
581 fail_fd:
582 close(fd);
583 if (master_fd != -1)
584 close(master_fd);
585 return result;
586 }
587
588 static void
589 anv_physical_device_destroy(struct anv_physical_device *device)
590 {
591 anv_finish_wsi(device);
592 anv_physical_device_free_disk_cache(device);
593 ralloc_free(device->compiler);
594 ralloc_free(device->perf);
595 close(device->local_fd);
596 if (device->master_fd >= 0)
597 close(device->master_fd);
598 vk_free(&device->instance->alloc, device);
599 }
600
601 static void *
602 default_alloc_func(void *pUserData, size_t size, size_t align,
603 VkSystemAllocationScope allocationScope)
604 {
605 return malloc(size);
606 }
607
608 static void *
609 default_realloc_func(void *pUserData, void *pOriginal, size_t size,
610 size_t align, VkSystemAllocationScope allocationScope)
611 {
612 return realloc(pOriginal, size);
613 }
614
615 static void
616 default_free_func(void *pUserData, void *pMemory)
617 {
618 free(pMemory);
619 }
620
621 static const VkAllocationCallbacks default_alloc = {
622 .pUserData = NULL,
623 .pfnAllocation = default_alloc_func,
624 .pfnReallocation = default_realloc_func,
625 .pfnFree = default_free_func,
626 };
627
628 VkResult anv_EnumerateInstanceExtensionProperties(
629 const char* pLayerName,
630 uint32_t* pPropertyCount,
631 VkExtensionProperties* pProperties)
632 {
633 VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
634
635 for (int i = 0; i < ANV_INSTANCE_EXTENSION_COUNT; i++) {
636 if (anv_instance_extensions_supported.extensions[i]) {
637 vk_outarray_append(&out, prop) {
638 *prop = anv_instance_extensions[i];
639 }
640 }
641 }
642
643 return vk_outarray_status(&out);
644 }
645
646 VkResult anv_CreateInstance(
647 const VkInstanceCreateInfo* pCreateInfo,
648 const VkAllocationCallbacks* pAllocator,
649 VkInstance* pInstance)
650 {
651 struct anv_instance *instance;
652 VkResult result;
653
654 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO);
655
656 struct anv_instance_extension_table enabled_extensions = {};
657 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
658 int idx;
659 for (idx = 0; idx < ANV_INSTANCE_EXTENSION_COUNT; idx++) {
660 if (strcmp(pCreateInfo->ppEnabledExtensionNames[i],
661 anv_instance_extensions[idx].extensionName) == 0)
662 break;
663 }
664
665 if (idx >= ANV_INSTANCE_EXTENSION_COUNT)
666 return vk_error(VK_ERROR_EXTENSION_NOT_PRESENT);
667
668 if (!anv_instance_extensions_supported.extensions[idx])
669 return vk_error(VK_ERROR_EXTENSION_NOT_PRESENT);
670
671 enabled_extensions.extensions[idx] = true;
672 }
673
674 instance = vk_alloc2(&default_alloc, pAllocator, sizeof(*instance), 8,
675 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
676 if (!instance)
677 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
678
679 instance->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
680
681 if (pAllocator)
682 instance->alloc = *pAllocator;
683 else
684 instance->alloc = default_alloc;
685
686 instance->app_info = (struct anv_app_info) { .api_version = 0 };
687 if (pCreateInfo->pApplicationInfo) {
688 const VkApplicationInfo *app = pCreateInfo->pApplicationInfo;
689
690 instance->app_info.app_name =
691 vk_strdup(&instance->alloc, app->pApplicationName,
692 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
693 instance->app_info.app_version = app->applicationVersion;
694
695 instance->app_info.engine_name =
696 vk_strdup(&instance->alloc, app->pEngineName,
697 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
698 instance->app_info.engine_version = app->engineVersion;
699
700 instance->app_info.api_version = app->apiVersion;
701 }
702
703 if (instance->app_info.api_version == 0)
704 instance->app_info.api_version = VK_API_VERSION_1_0;
705
706 instance->enabled_extensions = enabled_extensions;
707
708 for (unsigned i = 0; i < ARRAY_SIZE(instance->dispatch.entrypoints); i++) {
709 /* Vulkan requires that entrypoints for extensions which have not been
710 * enabled must not be advertised.
711 */
712 if (!anv_instance_entrypoint_is_enabled(i, instance->app_info.api_version,
713 &instance->enabled_extensions)) {
714 instance->dispatch.entrypoints[i] = NULL;
715 } else {
716 instance->dispatch.entrypoints[i] =
717 anv_instance_dispatch_table.entrypoints[i];
718 }
719 }
720
721 for (unsigned i = 0; i < ARRAY_SIZE(instance->physical_device_dispatch.entrypoints); i++) {
722 /* Vulkan requires that entrypoints for extensions which have not been
723 * enabled must not be advertised.
724 */
725 if (!anv_physical_device_entrypoint_is_enabled(i, instance->app_info.api_version,
726 &instance->enabled_extensions)) {
727 instance->physical_device_dispatch.entrypoints[i] = NULL;
728 } else {
729 instance->physical_device_dispatch.entrypoints[i] =
730 anv_physical_device_dispatch_table.entrypoints[i];
731 }
732 }
733
734 for (unsigned i = 0; i < ARRAY_SIZE(instance->device_dispatch.entrypoints); i++) {
735 /* Vulkan requires that entrypoints for extensions which have not been
736 * enabled must not be advertised.
737 */
738 if (!anv_device_entrypoint_is_enabled(i, instance->app_info.api_version,
739 &instance->enabled_extensions, NULL)) {
740 instance->device_dispatch.entrypoints[i] = NULL;
741 } else {
742 instance->device_dispatch.entrypoints[i] =
743 anv_device_dispatch_table.entrypoints[i];
744 }
745 }
746
747 instance->physical_devices_enumerated = false;
748 list_inithead(&instance->physical_devices);
749
750 result = vk_debug_report_instance_init(&instance->debug_report_callbacks);
751 if (result != VK_SUCCESS) {
752 vk_free2(&default_alloc, pAllocator, instance);
753 return vk_error(result);
754 }
755
756 instance->pipeline_cache_enabled =
757 env_var_as_boolean("ANV_ENABLE_PIPELINE_CACHE", true);
758
759 glsl_type_singleton_init_or_ref();
760
761 VG(VALGRIND_CREATE_MEMPOOL(instance, 0, false));
762
763 driParseOptionInfo(&instance->available_dri_options, anv_dri_options_xml);
764 driParseConfigFiles(&instance->dri_options, &instance->available_dri_options,
765 0, "anv", NULL,
766 instance->app_info.engine_name,
767 instance->app_info.engine_version);
768
769 *pInstance = anv_instance_to_handle(instance);
770
771 return VK_SUCCESS;
772 }
773
774 void anv_DestroyInstance(
775 VkInstance _instance,
776 const VkAllocationCallbacks* pAllocator)
777 {
778 ANV_FROM_HANDLE(anv_instance, instance, _instance);
779
780 if (!instance)
781 return;
782
783 list_for_each_entry_safe(struct anv_physical_device, pdevice,
784 &instance->physical_devices, link)
785 anv_physical_device_destroy(pdevice);
786
787 vk_free(&instance->alloc, (char *)instance->app_info.app_name);
788 vk_free(&instance->alloc, (char *)instance->app_info.engine_name);
789
790 VG(VALGRIND_DESTROY_MEMPOOL(instance));
791
792 vk_debug_report_instance_destroy(&instance->debug_report_callbacks);
793
794 glsl_type_singleton_decref();
795
796 driDestroyOptionCache(&instance->dri_options);
797 driDestroyOptionInfo(&instance->available_dri_options);
798
799 vk_free(&instance->alloc, instance);
800 }
801
802 static VkResult
803 anv_enumerate_physical_devices(struct anv_instance *instance)
804 {
805 if (instance->physical_devices_enumerated)
806 return VK_SUCCESS;
807
808 instance->physical_devices_enumerated = true;
809
810 /* TODO: Check for more devices ? */
811 drmDevicePtr devices[8];
812 int max_devices;
813
814 max_devices = drmGetDevices2(0, devices, ARRAY_SIZE(devices));
815 if (max_devices < 1)
816 return VK_SUCCESS;
817
818 VkResult result = VK_SUCCESS;
819 for (unsigned i = 0; i < (unsigned)max_devices; i++) {
820 if (devices[i]->available_nodes & 1 << DRM_NODE_RENDER &&
821 devices[i]->bustype == DRM_BUS_PCI &&
822 devices[i]->deviceinfo.pci->vendor_id == 0x8086) {
823
824 struct anv_physical_device *pdevice;
825 result = anv_physical_device_try_create(instance, devices[i],
826 &pdevice);
827 /* Incompatible DRM device, skip. */
828 if (result == VK_ERROR_INCOMPATIBLE_DRIVER) {
829 result = VK_SUCCESS;
830 continue;
831 }
832
833 /* Error creating the physical device, report the error. */
834 if (result != VK_SUCCESS)
835 break;
836
837 list_addtail(&pdevice->link, &instance->physical_devices);
838 }
839 }
840 drmFreeDevices(devices, max_devices);
841
842 /* If we successfully enumerated any devices, call it success */
843 return result;
844 }
845
846 VkResult anv_EnumeratePhysicalDevices(
847 VkInstance _instance,
848 uint32_t* pPhysicalDeviceCount,
849 VkPhysicalDevice* pPhysicalDevices)
850 {
851 ANV_FROM_HANDLE(anv_instance, instance, _instance);
852 VK_OUTARRAY_MAKE(out, pPhysicalDevices, pPhysicalDeviceCount);
853
854 VkResult result = anv_enumerate_physical_devices(instance);
855 if (result != VK_SUCCESS)
856 return result;
857
858 list_for_each_entry(struct anv_physical_device, pdevice,
859 &instance->physical_devices, link) {
860 vk_outarray_append(&out, i) {
861 *i = anv_physical_device_to_handle(pdevice);
862 }
863 }
864
865 return vk_outarray_status(&out);
866 }
867
868 VkResult anv_EnumeratePhysicalDeviceGroups(
869 VkInstance _instance,
870 uint32_t* pPhysicalDeviceGroupCount,
871 VkPhysicalDeviceGroupProperties* pPhysicalDeviceGroupProperties)
872 {
873 ANV_FROM_HANDLE(anv_instance, instance, _instance);
874 VK_OUTARRAY_MAKE(out, pPhysicalDeviceGroupProperties,
875 pPhysicalDeviceGroupCount);
876
877 VkResult result = anv_enumerate_physical_devices(instance);
878 if (result != VK_SUCCESS)
879 return result;
880
881 list_for_each_entry(struct anv_physical_device, pdevice,
882 &instance->physical_devices, link) {
883 vk_outarray_append(&out, p) {
884 p->physicalDeviceCount = 1;
885 memset(p->physicalDevices, 0, sizeof(p->physicalDevices));
886 p->physicalDevices[0] = anv_physical_device_to_handle(pdevice);
887 p->subsetAllocation = false;
888
889 vk_foreach_struct(ext, p->pNext)
890 anv_debug_ignored_stype(ext->sType);
891 }
892 }
893
894 return vk_outarray_status(&out);
895 }
896
897 void anv_GetPhysicalDeviceFeatures(
898 VkPhysicalDevice physicalDevice,
899 VkPhysicalDeviceFeatures* pFeatures)
900 {
901 ANV_FROM_HANDLE(anv_physical_device, pdevice, physicalDevice);
902
903 *pFeatures = (VkPhysicalDeviceFeatures) {
904 .robustBufferAccess = true,
905 .fullDrawIndexUint32 = true,
906 .imageCubeArray = true,
907 .independentBlend = true,
908 .geometryShader = true,
909 .tessellationShader = true,
910 .sampleRateShading = true,
911 .dualSrcBlend = true,
912 .logicOp = true,
913 .multiDrawIndirect = true,
914 .drawIndirectFirstInstance = true,
915 .depthClamp = true,
916 .depthBiasClamp = true,
917 .fillModeNonSolid = true,
918 .depthBounds = pdevice->info.gen >= 12,
919 .wideLines = true,
920 .largePoints = true,
921 .alphaToOne = true,
922 .multiViewport = true,
923 .samplerAnisotropy = true,
924 .textureCompressionETC2 = pdevice->info.gen >= 8 ||
925 pdevice->info.is_baytrail,
926 .textureCompressionASTC_LDR = pdevice->info.gen >= 9, /* FINISHME CHV */
927 .textureCompressionBC = true,
928 .occlusionQueryPrecise = true,
929 .pipelineStatisticsQuery = true,
930 .fragmentStoresAndAtomics = true,
931 .shaderTessellationAndGeometryPointSize = true,
932 .shaderImageGatherExtended = true,
933 .shaderStorageImageExtendedFormats = true,
934 .shaderStorageImageMultisample = false,
935 .shaderStorageImageReadWithoutFormat = false,
936 .shaderStorageImageWriteWithoutFormat = true,
937 .shaderUniformBufferArrayDynamicIndexing = true,
938 .shaderSampledImageArrayDynamicIndexing = true,
939 .shaderStorageBufferArrayDynamicIndexing = true,
940 .shaderStorageImageArrayDynamicIndexing = true,
941 .shaderClipDistance = true,
942 .shaderCullDistance = true,
943 .shaderFloat64 = pdevice->info.gen >= 8 &&
944 pdevice->info.has_64bit_float,
945 .shaderInt64 = pdevice->info.gen >= 8 &&
946 pdevice->info.has_64bit_int,
947 .shaderInt16 = pdevice->info.gen >= 8,
948 .shaderResourceMinLod = pdevice->info.gen >= 9,
949 .variableMultisampleRate = true,
950 .inheritedQueries = true,
951 };
952
953 /* We can't do image stores in vec4 shaders */
954 pFeatures->vertexPipelineStoresAndAtomics =
955 pdevice->compiler->scalar_stage[MESA_SHADER_VERTEX] &&
956 pdevice->compiler->scalar_stage[MESA_SHADER_GEOMETRY];
957
958 struct anv_app_info *app_info = &pdevice->instance->app_info;
959
960 /* The new DOOM and Wolfenstein games require depthBounds without
961 * checking for it. They seem to run fine without it so just claim it's
962 * there and accept the consequences.
963 */
964 if (app_info->engine_name && strcmp(app_info->engine_name, "idTech") == 0)
965 pFeatures->depthBounds = true;
966 }
967
968 static void
969 anv_get_physical_device_features_1_1(struct anv_physical_device *pdevice,
970 VkPhysicalDeviceVulkan11Features *f)
971 {
972 assert(f->sType == VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_FEATURES);
973
974 f->storageBuffer16BitAccess = pdevice->info.gen >= 8;
975 f->uniformAndStorageBuffer16BitAccess = pdevice->info.gen >= 8;
976 f->storagePushConstant16 = pdevice->info.gen >= 8;
977 f->storageInputOutput16 = false;
978 f->multiview = true;
979 f->multiviewGeometryShader = true;
980 f->multiviewTessellationShader = true;
981 f->variablePointersStorageBuffer = true;
982 f->variablePointers = true;
983 f->protectedMemory = false;
984 f->samplerYcbcrConversion = true;
985 f->shaderDrawParameters = true;
986 }
987
988 static void
989 anv_get_physical_device_features_1_2(struct anv_physical_device *pdevice,
990 VkPhysicalDeviceVulkan12Features *f)
991 {
992 assert(f->sType == VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_FEATURES);
993
994 f->samplerMirrorClampToEdge = true;
995 f->drawIndirectCount = true;
996 f->storageBuffer8BitAccess = pdevice->info.gen >= 8;
997 f->uniformAndStorageBuffer8BitAccess = pdevice->info.gen >= 8;
998 f->storagePushConstant8 = pdevice->info.gen >= 8;
999 f->shaderBufferInt64Atomics = pdevice->info.gen >= 9 &&
1000 pdevice->use_softpin;
1001 f->shaderSharedInt64Atomics = false;
1002 f->shaderFloat16 = pdevice->info.gen >= 8;
1003 f->shaderInt8 = pdevice->info.gen >= 8;
1004
1005 bool descIndexing = pdevice->has_a64_buffer_access &&
1006 pdevice->has_bindless_images;
1007 f->descriptorIndexing = descIndexing;
1008 f->shaderInputAttachmentArrayDynamicIndexing = false;
1009 f->shaderUniformTexelBufferArrayDynamicIndexing = descIndexing;
1010 f->shaderStorageTexelBufferArrayDynamicIndexing = descIndexing;
1011 f->shaderUniformBufferArrayNonUniformIndexing = false;
1012 f->shaderSampledImageArrayNonUniformIndexing = descIndexing;
1013 f->shaderStorageBufferArrayNonUniformIndexing = descIndexing;
1014 f->shaderStorageImageArrayNonUniformIndexing = descIndexing;
1015 f->shaderInputAttachmentArrayNonUniformIndexing = false;
1016 f->shaderUniformTexelBufferArrayNonUniformIndexing = descIndexing;
1017 f->shaderStorageTexelBufferArrayNonUniformIndexing = descIndexing;
1018 f->descriptorBindingUniformBufferUpdateAfterBind = false;
1019 f->descriptorBindingSampledImageUpdateAfterBind = descIndexing;
1020 f->descriptorBindingStorageImageUpdateAfterBind = descIndexing;
1021 f->descriptorBindingStorageBufferUpdateAfterBind = descIndexing;
1022 f->descriptorBindingUniformTexelBufferUpdateAfterBind = descIndexing;
1023 f->descriptorBindingStorageTexelBufferUpdateAfterBind = descIndexing;
1024 f->descriptorBindingUpdateUnusedWhilePending = descIndexing;
1025 f->descriptorBindingPartiallyBound = descIndexing;
1026 f->descriptorBindingVariableDescriptorCount = false;
1027 f->runtimeDescriptorArray = descIndexing;
1028
1029 f->samplerFilterMinmax = pdevice->info.gen >= 9;
1030 f->scalarBlockLayout = true;
1031 f->imagelessFramebuffer = true;
1032 f->uniformBufferStandardLayout = true;
1033 f->shaderSubgroupExtendedTypes = true;
1034 f->separateDepthStencilLayouts = true;
1035 f->hostQueryReset = true;
1036 f->timelineSemaphore = true;
1037 f->bufferDeviceAddress = pdevice->has_a64_buffer_access;
1038 f->bufferDeviceAddressCaptureReplay = pdevice->has_a64_buffer_access;
1039 f->bufferDeviceAddressMultiDevice = false;
1040 f->vulkanMemoryModel = true;
1041 f->vulkanMemoryModelDeviceScope = true;
1042 f->vulkanMemoryModelAvailabilityVisibilityChains = true;
1043 f->shaderOutputViewportIndex = true;
1044 f->shaderOutputLayer = true;
1045 f->subgroupBroadcastDynamicId = true;
1046 }
1047
1048 void anv_GetPhysicalDeviceFeatures2(
1049 VkPhysicalDevice physicalDevice,
1050 VkPhysicalDeviceFeatures2* pFeatures)
1051 {
1052 ANV_FROM_HANDLE(anv_physical_device, pdevice, physicalDevice);
1053 anv_GetPhysicalDeviceFeatures(physicalDevice, &pFeatures->features);
1054
1055 VkPhysicalDeviceVulkan11Features core_1_1 = {
1056 .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_FEATURES,
1057 };
1058 anv_get_physical_device_features_1_1(pdevice, &core_1_1);
1059
1060 VkPhysicalDeviceVulkan12Features core_1_2 = {
1061 .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_FEATURES,
1062 };
1063 anv_get_physical_device_features_1_2(pdevice, &core_1_2);
1064
1065 #define CORE_FEATURE(major, minor, feature) \
1066 features->feature = core_##major##_##minor.feature
1067
1068
1069 vk_foreach_struct(ext, pFeatures->pNext) {
1070 switch (ext->sType) {
1071 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_8BIT_STORAGE_FEATURES_KHR: {
1072 VkPhysicalDevice8BitStorageFeaturesKHR *features =
1073 (VkPhysicalDevice8BitStorageFeaturesKHR *)ext;
1074 CORE_FEATURE(1, 2, storageBuffer8BitAccess);
1075 CORE_FEATURE(1, 2, uniformAndStorageBuffer8BitAccess);
1076 CORE_FEATURE(1, 2, storagePushConstant8);
1077 break;
1078 }
1079
1080 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES: {
1081 VkPhysicalDevice16BitStorageFeatures *features =
1082 (VkPhysicalDevice16BitStorageFeatures *)ext;
1083 CORE_FEATURE(1, 1, storageBuffer16BitAccess);
1084 CORE_FEATURE(1, 1, uniformAndStorageBuffer16BitAccess);
1085 CORE_FEATURE(1, 1, storagePushConstant16);
1086 CORE_FEATURE(1, 1, storageInputOutput16);
1087 break;
1088 }
1089
1090 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES_EXT: {
1091 VkPhysicalDeviceBufferDeviceAddressFeaturesEXT *features = (void *)ext;
1092 features->bufferDeviceAddress = pdevice->has_a64_buffer_access;
1093 features->bufferDeviceAddressCaptureReplay = false;
1094 features->bufferDeviceAddressMultiDevice = false;
1095 break;
1096 }
1097
1098 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES_KHR: {
1099 VkPhysicalDeviceBufferDeviceAddressFeaturesKHR *features = (void *)ext;
1100 CORE_FEATURE(1, 2, bufferDeviceAddress);
1101 CORE_FEATURE(1, 2, bufferDeviceAddressCaptureReplay);
1102 CORE_FEATURE(1, 2, bufferDeviceAddressMultiDevice);
1103 break;
1104 }
1105
1106 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COMPUTE_SHADER_DERIVATIVES_FEATURES_NV: {
1107 VkPhysicalDeviceComputeShaderDerivativesFeaturesNV *features =
1108 (VkPhysicalDeviceComputeShaderDerivativesFeaturesNV *)ext;
1109 features->computeDerivativeGroupQuads = true;
1110 features->computeDerivativeGroupLinear = true;
1111 break;
1112 }
1113
1114 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONDITIONAL_RENDERING_FEATURES_EXT: {
1115 VkPhysicalDeviceConditionalRenderingFeaturesEXT *features =
1116 (VkPhysicalDeviceConditionalRenderingFeaturesEXT*)ext;
1117 features->conditionalRendering = pdevice->info.gen >= 8 ||
1118 pdevice->info.is_haswell;
1119 features->inheritedConditionalRendering = pdevice->info.gen >= 8 ||
1120 pdevice->info.is_haswell;
1121 break;
1122 }
1123
1124 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_CLIP_ENABLE_FEATURES_EXT: {
1125 VkPhysicalDeviceDepthClipEnableFeaturesEXT *features =
1126 (VkPhysicalDeviceDepthClipEnableFeaturesEXT *)ext;
1127 features->depthClipEnable = true;
1128 break;
1129 }
1130
1131 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FLOAT16_INT8_FEATURES_KHR: {
1132 VkPhysicalDeviceFloat16Int8FeaturesKHR *features = (void *)ext;
1133 CORE_FEATURE(1, 2, shaderFloat16);
1134 CORE_FEATURE(1, 2, shaderInt8);
1135 break;
1136 }
1137
1138 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADER_INTERLOCK_FEATURES_EXT: {
1139 VkPhysicalDeviceFragmentShaderInterlockFeaturesEXT *features =
1140 (VkPhysicalDeviceFragmentShaderInterlockFeaturesEXT *)ext;
1141 features->fragmentShaderSampleInterlock = pdevice->info.gen >= 9;
1142 features->fragmentShaderPixelInterlock = pdevice->info.gen >= 9;
1143 features->fragmentShaderShadingRateInterlock = false;
1144 break;
1145 }
1146
1147 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_HOST_QUERY_RESET_FEATURES_EXT: {
1148 VkPhysicalDeviceHostQueryResetFeaturesEXT *features =
1149 (VkPhysicalDeviceHostQueryResetFeaturesEXT *)ext;
1150 CORE_FEATURE(1, 2, hostQueryReset);
1151 break;
1152 }
1153
1154 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES_EXT: {
1155 VkPhysicalDeviceDescriptorIndexingFeaturesEXT *features =
1156 (VkPhysicalDeviceDescriptorIndexingFeaturesEXT *)ext;
1157 CORE_FEATURE(1, 2, shaderInputAttachmentArrayDynamicIndexing);
1158 CORE_FEATURE(1, 2, shaderUniformTexelBufferArrayDynamicIndexing);
1159 CORE_FEATURE(1, 2, shaderStorageTexelBufferArrayDynamicIndexing);
1160 CORE_FEATURE(1, 2, shaderUniformBufferArrayNonUniformIndexing);
1161 CORE_FEATURE(1, 2, shaderSampledImageArrayNonUniformIndexing);
1162 CORE_FEATURE(1, 2, shaderStorageBufferArrayNonUniformIndexing);
1163 CORE_FEATURE(1, 2, shaderStorageImageArrayNonUniformIndexing);
1164 CORE_FEATURE(1, 2, shaderInputAttachmentArrayNonUniformIndexing);
1165 CORE_FEATURE(1, 2, shaderUniformTexelBufferArrayNonUniformIndexing);
1166 CORE_FEATURE(1, 2, shaderStorageTexelBufferArrayNonUniformIndexing);
1167 CORE_FEATURE(1, 2, descriptorBindingUniformBufferUpdateAfterBind);
1168 CORE_FEATURE(1, 2, descriptorBindingSampledImageUpdateAfterBind);
1169 CORE_FEATURE(1, 2, descriptorBindingStorageImageUpdateAfterBind);
1170 CORE_FEATURE(1, 2, descriptorBindingStorageBufferUpdateAfterBind);
1171 CORE_FEATURE(1, 2, descriptorBindingUniformTexelBufferUpdateAfterBind);
1172 CORE_FEATURE(1, 2, descriptorBindingStorageTexelBufferUpdateAfterBind);
1173 CORE_FEATURE(1, 2, descriptorBindingUpdateUnusedWhilePending);
1174 CORE_FEATURE(1, 2, descriptorBindingPartiallyBound);
1175 CORE_FEATURE(1, 2, descriptorBindingVariableDescriptorCount);
1176 CORE_FEATURE(1, 2, runtimeDescriptorArray);
1177 break;
1178 }
1179
1180 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INDEX_TYPE_UINT8_FEATURES_EXT: {
1181 VkPhysicalDeviceIndexTypeUint8FeaturesEXT *features =
1182 (VkPhysicalDeviceIndexTypeUint8FeaturesEXT *)ext;
1183 features->indexTypeUint8 = true;
1184 break;
1185 }
1186
1187 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_FEATURES_EXT: {
1188 VkPhysicalDeviceInlineUniformBlockFeaturesEXT *features =
1189 (VkPhysicalDeviceInlineUniformBlockFeaturesEXT *)ext;
1190 features->inlineUniformBlock = true;
1191 features->descriptorBindingInlineUniformBlockUpdateAfterBind = true;
1192 break;
1193 }
1194
1195 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_FEATURES_EXT: {
1196 VkPhysicalDeviceLineRasterizationFeaturesEXT *features =
1197 (VkPhysicalDeviceLineRasterizationFeaturesEXT *)ext;
1198 features->rectangularLines = true;
1199 features->bresenhamLines = true;
1200 /* Support for Smooth lines with MSAA was removed on gen11. From the
1201 * BSpec section "Multisample ModesState" table for "AA Line Support
1202 * Requirements":
1203 *
1204 * GEN10:BUG:######## NUM_MULTISAMPLES == 1
1205 *
1206 * Fortunately, this isn't a case most people care about.
1207 */
1208 features->smoothLines = pdevice->info.gen < 10;
1209 features->stippledRectangularLines = false;
1210 features->stippledBresenhamLines = true;
1211 features->stippledSmoothLines = false;
1212 break;
1213 }
1214
1215 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES: {
1216 VkPhysicalDeviceMultiviewFeatures *features =
1217 (VkPhysicalDeviceMultiviewFeatures *)ext;
1218 CORE_FEATURE(1, 1, multiview);
1219 CORE_FEATURE(1, 1, multiviewGeometryShader);
1220 CORE_FEATURE(1, 1, multiviewTessellationShader);
1221 break;
1222 }
1223
1224 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGELESS_FRAMEBUFFER_FEATURES_KHR: {
1225 VkPhysicalDeviceImagelessFramebufferFeaturesKHR *features =
1226 (VkPhysicalDeviceImagelessFramebufferFeaturesKHR *)ext;
1227 CORE_FEATURE(1, 2, imagelessFramebuffer);
1228 break;
1229 }
1230
1231 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_EXECUTABLE_PROPERTIES_FEATURES_KHR: {
1232 VkPhysicalDevicePipelineExecutablePropertiesFeaturesKHR *features =
1233 (VkPhysicalDevicePipelineExecutablePropertiesFeaturesKHR *)ext;
1234 features->pipelineExecutableInfo = true;
1235 break;
1236 }
1237
1238 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES: {
1239 VkPhysicalDeviceProtectedMemoryFeatures *features = (void *)ext;
1240 CORE_FEATURE(1, 1, protectedMemory);
1241 break;
1242 }
1243
1244 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ROBUSTNESS_2_FEATURES_EXT: {
1245 VkPhysicalDeviceRobustness2FeaturesEXT *features = (void *)ext;
1246 features->robustBufferAccess2 = true;
1247 features->robustImageAccess2 = true;
1248 features->nullDescriptor = true;
1249 break;
1250 }
1251
1252 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES: {
1253 VkPhysicalDeviceSamplerYcbcrConversionFeatures *features =
1254 (VkPhysicalDeviceSamplerYcbcrConversionFeatures *) ext;
1255 CORE_FEATURE(1, 1, samplerYcbcrConversion);
1256 break;
1257 }
1258
1259 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SCALAR_BLOCK_LAYOUT_FEATURES_EXT: {
1260 VkPhysicalDeviceScalarBlockLayoutFeaturesEXT *features =
1261 (VkPhysicalDeviceScalarBlockLayoutFeaturesEXT *)ext;
1262 CORE_FEATURE(1, 2, scalarBlockLayout);
1263 break;
1264 }
1265
1266 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SEPARATE_DEPTH_STENCIL_LAYOUTS_FEATURES_KHR: {
1267 VkPhysicalDeviceSeparateDepthStencilLayoutsFeaturesKHR *features =
1268 (VkPhysicalDeviceSeparateDepthStencilLayoutsFeaturesKHR *)ext;
1269 CORE_FEATURE(1, 2, separateDepthStencilLayouts);
1270 break;
1271 }
1272
1273 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_INT64_FEATURES_KHR: {
1274 VkPhysicalDeviceShaderAtomicInt64FeaturesKHR *features = (void *)ext;
1275 CORE_FEATURE(1, 2, shaderBufferInt64Atomics);
1276 CORE_FEATURE(1, 2, shaderSharedInt64Atomics);
1277 break;
1278 }
1279
1280 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DEMOTE_TO_HELPER_INVOCATION_FEATURES_EXT: {
1281 VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT *features = (void *)ext;
1282 features->shaderDemoteToHelperInvocation = true;
1283 break;
1284 }
1285
1286 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CLOCK_FEATURES_KHR: {
1287 VkPhysicalDeviceShaderClockFeaturesKHR *features =
1288 (VkPhysicalDeviceShaderClockFeaturesKHR *)ext;
1289 features->shaderSubgroupClock = true;
1290 features->shaderDeviceClock = false;
1291 break;
1292 }
1293
1294 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETERS_FEATURES: {
1295 VkPhysicalDeviceShaderDrawParametersFeatures *features = (void *)ext;
1296 CORE_FEATURE(1, 1, shaderDrawParameters);
1297 break;
1298 }
1299
1300 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SUBGROUP_EXTENDED_TYPES_FEATURES_KHR: {
1301 VkPhysicalDeviceShaderSubgroupExtendedTypesFeaturesKHR *features =
1302 (VkPhysicalDeviceShaderSubgroupExtendedTypesFeaturesKHR *)ext;
1303 CORE_FEATURE(1, 2, shaderSubgroupExtendedTypes);
1304 break;
1305 }
1306
1307 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_FEATURES_EXT: {
1308 VkPhysicalDeviceSubgroupSizeControlFeaturesEXT *features =
1309 (VkPhysicalDeviceSubgroupSizeControlFeaturesEXT *)ext;
1310 features->subgroupSizeControl = true;
1311 features->computeFullSubgroups = true;
1312 break;
1313 }
1314
1315 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_FEATURES_EXT: {
1316 VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT *features =
1317 (VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT *)ext;
1318 features->texelBufferAlignment = true;
1319 break;
1320 }
1321
1322 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_FEATURES_KHR: {
1323 VkPhysicalDeviceTimelineSemaphoreFeaturesKHR *features =
1324 (VkPhysicalDeviceTimelineSemaphoreFeaturesKHR *) ext;
1325 CORE_FEATURE(1, 2, timelineSemaphore);
1326 break;
1327 }
1328
1329 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTERS_FEATURES: {
1330 VkPhysicalDeviceVariablePointersFeatures *features = (void *)ext;
1331 CORE_FEATURE(1, 1, variablePointersStorageBuffer);
1332 CORE_FEATURE(1, 1, variablePointers);
1333 break;
1334 }
1335
1336 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_FEATURES_EXT: {
1337 VkPhysicalDeviceTransformFeedbackFeaturesEXT *features =
1338 (VkPhysicalDeviceTransformFeedbackFeaturesEXT *)ext;
1339 features->transformFeedback = true;
1340 features->geometryStreams = true;
1341 break;
1342 }
1343
1344 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_UNIFORM_BUFFER_STANDARD_LAYOUT_FEATURES_KHR: {
1345 VkPhysicalDeviceUniformBufferStandardLayoutFeaturesKHR *features =
1346 (VkPhysicalDeviceUniformBufferStandardLayoutFeaturesKHR *)ext;
1347 CORE_FEATURE(1, 2, uniformBufferStandardLayout);
1348 break;
1349 }
1350
1351 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_FEATURES_EXT: {
1352 VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT *features =
1353 (VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT *)ext;
1354 features->vertexAttributeInstanceRateDivisor = true;
1355 features->vertexAttributeInstanceRateZeroDivisor = true;
1356 break;
1357 }
1358
1359 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_FEATURES:
1360 anv_get_physical_device_features_1_1(pdevice, (void *)ext);
1361 break;
1362
1363 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_FEATURES:
1364 anv_get_physical_device_features_1_2(pdevice, (void *)ext);
1365 break;
1366
1367 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_MEMORY_MODEL_FEATURES_KHR: {
1368 VkPhysicalDeviceVulkanMemoryModelFeaturesKHR *features = (void *)ext;
1369 CORE_FEATURE(1, 2, vulkanMemoryModel);
1370 CORE_FEATURE(1, 2, vulkanMemoryModelDeviceScope);
1371 CORE_FEATURE(1, 2, vulkanMemoryModelAvailabilityVisibilityChains);
1372 break;
1373 }
1374
1375 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_YCBCR_IMAGE_ARRAYS_FEATURES_EXT: {
1376 VkPhysicalDeviceYcbcrImageArraysFeaturesEXT *features =
1377 (VkPhysicalDeviceYcbcrImageArraysFeaturesEXT *)ext;
1378 features->ycbcrImageArrays = true;
1379 break;
1380 }
1381
1382 default:
1383 anv_debug_ignored_stype(ext->sType);
1384 break;
1385 }
1386 }
1387
1388 #undef CORE_FEATURE
1389 }
1390
1391 #define MAX_PER_STAGE_DESCRIPTOR_UNIFORM_BUFFERS 64
1392
1393 #define MAX_PER_STAGE_DESCRIPTOR_INPUT_ATTACHMENTS 64
1394 #define MAX_DESCRIPTOR_SET_INPUT_ATTACHMENTS 256
1395
1396 void anv_GetPhysicalDeviceProperties(
1397 VkPhysicalDevice physicalDevice,
1398 VkPhysicalDeviceProperties* pProperties)
1399 {
1400 ANV_FROM_HANDLE(anv_physical_device, pdevice, physicalDevice);
1401 const struct gen_device_info *devinfo = &pdevice->info;
1402
1403 /* See assertions made when programming the buffer surface state. */
1404 const uint32_t max_raw_buffer_sz = devinfo->gen >= 7 ?
1405 (1ul << 30) : (1ul << 27);
1406
1407 const uint32_t max_ssbos = pdevice->has_a64_buffer_access ? UINT16_MAX : 64;
1408 const uint32_t max_textures =
1409 pdevice->has_bindless_images ? UINT16_MAX : 128;
1410 const uint32_t max_samplers =
1411 pdevice->has_bindless_samplers ? UINT16_MAX :
1412 (devinfo->gen >= 8 || devinfo->is_haswell) ? 128 : 16;
1413 const uint32_t max_images =
1414 pdevice->has_bindless_images ? UINT16_MAX : MAX_IMAGES;
1415
1416 /* If we can use bindless for everything, claim a high per-stage limit,
1417 * otherwise use the binding table size, minus the slots reserved for
1418 * render targets and one slot for the descriptor buffer. */
1419 const uint32_t max_per_stage =
1420 pdevice->has_bindless_images && pdevice->has_a64_buffer_access
1421 ? UINT32_MAX : MAX_BINDING_TABLE_SIZE - MAX_RTS - 1;
1422
1423 /* Limit max_threads to 64 for the GPGPU_WALKER command */
1424 const uint32_t max_workgroup_size = 32 * MIN2(64, devinfo->max_cs_threads);
1425
1426 VkSampleCountFlags sample_counts =
1427 isl_device_get_sample_counts(&pdevice->isl_dev);
1428
1429
1430 VkPhysicalDeviceLimits limits = {
1431 .maxImageDimension1D = (1 << 14),
1432 .maxImageDimension2D = (1 << 14),
1433 .maxImageDimension3D = (1 << 11),
1434 .maxImageDimensionCube = (1 << 14),
1435 .maxImageArrayLayers = (1 << 11),
1436 .maxTexelBufferElements = 128 * 1024 * 1024,
1437 .maxUniformBufferRange = (1ul << 27),
1438 .maxStorageBufferRange = max_raw_buffer_sz,
1439 .maxPushConstantsSize = MAX_PUSH_CONSTANTS_SIZE,
1440 .maxMemoryAllocationCount = UINT32_MAX,
1441 .maxSamplerAllocationCount = 64 * 1024,
1442 .bufferImageGranularity = 64, /* A cache line */
1443 .sparseAddressSpaceSize = 0,
1444 .maxBoundDescriptorSets = MAX_SETS,
1445 .maxPerStageDescriptorSamplers = max_samplers,
1446 .maxPerStageDescriptorUniformBuffers = MAX_PER_STAGE_DESCRIPTOR_UNIFORM_BUFFERS,
1447 .maxPerStageDescriptorStorageBuffers = max_ssbos,
1448 .maxPerStageDescriptorSampledImages = max_textures,
1449 .maxPerStageDescriptorStorageImages = max_images,
1450 .maxPerStageDescriptorInputAttachments = MAX_PER_STAGE_DESCRIPTOR_INPUT_ATTACHMENTS,
1451 .maxPerStageResources = max_per_stage,
1452 .maxDescriptorSetSamplers = 6 * max_samplers, /* number of stages * maxPerStageDescriptorSamplers */
1453 .maxDescriptorSetUniformBuffers = 6 * MAX_PER_STAGE_DESCRIPTOR_UNIFORM_BUFFERS, /* number of stages * maxPerStageDescriptorUniformBuffers */
1454 .maxDescriptorSetUniformBuffersDynamic = MAX_DYNAMIC_BUFFERS / 2,
1455 .maxDescriptorSetStorageBuffers = 6 * max_ssbos, /* number of stages * maxPerStageDescriptorStorageBuffers */
1456 .maxDescriptorSetStorageBuffersDynamic = MAX_DYNAMIC_BUFFERS / 2,
1457 .maxDescriptorSetSampledImages = 6 * max_textures, /* number of stages * maxPerStageDescriptorSampledImages */
1458 .maxDescriptorSetStorageImages = 6 * max_images, /* number of stages * maxPerStageDescriptorStorageImages */
1459 .maxDescriptorSetInputAttachments = MAX_DESCRIPTOR_SET_INPUT_ATTACHMENTS,
1460 .maxVertexInputAttributes = MAX_VBS,
1461 .maxVertexInputBindings = MAX_VBS,
1462 .maxVertexInputAttributeOffset = 2047,
1463 .maxVertexInputBindingStride = 2048,
1464 .maxVertexOutputComponents = 128,
1465 .maxTessellationGenerationLevel = 64,
1466 .maxTessellationPatchSize = 32,
1467 .maxTessellationControlPerVertexInputComponents = 128,
1468 .maxTessellationControlPerVertexOutputComponents = 128,
1469 .maxTessellationControlPerPatchOutputComponents = 128,
1470 .maxTessellationControlTotalOutputComponents = 2048,
1471 .maxTessellationEvaluationInputComponents = 128,
1472 .maxTessellationEvaluationOutputComponents = 128,
1473 .maxGeometryShaderInvocations = 32,
1474 .maxGeometryInputComponents = 64,
1475 .maxGeometryOutputComponents = 128,
1476 .maxGeometryOutputVertices = 256,
1477 .maxGeometryTotalOutputComponents = 1024,
1478 .maxFragmentInputComponents = 116, /* 128 components - (PSIZ, CLIP_DIST0, CLIP_DIST1) */
1479 .maxFragmentOutputAttachments = 8,
1480 .maxFragmentDualSrcAttachments = 1,
1481 .maxFragmentCombinedOutputResources = 8,
1482 .maxComputeSharedMemorySize = 64 * 1024,
1483 .maxComputeWorkGroupCount = { 65535, 65535, 65535 },
1484 .maxComputeWorkGroupInvocations = max_workgroup_size,
1485 .maxComputeWorkGroupSize = {
1486 max_workgroup_size,
1487 max_workgroup_size,
1488 max_workgroup_size,
1489 },
1490 .subPixelPrecisionBits = 8,
1491 .subTexelPrecisionBits = 8,
1492 .mipmapPrecisionBits = 8,
1493 .maxDrawIndexedIndexValue = UINT32_MAX,
1494 .maxDrawIndirectCount = UINT32_MAX,
1495 .maxSamplerLodBias = 16,
1496 .maxSamplerAnisotropy = 16,
1497 .maxViewports = MAX_VIEWPORTS,
1498 .maxViewportDimensions = { (1 << 14), (1 << 14) },
1499 .viewportBoundsRange = { INT16_MIN, INT16_MAX },
1500 .viewportSubPixelBits = 13, /* We take a float? */
1501 .minMemoryMapAlignment = 4096, /* A page */
1502 /* The dataport requires texel alignment so we need to assume a worst
1503 * case of R32G32B32A32 which is 16 bytes.
1504 */
1505 .minTexelBufferOffsetAlignment = 16,
1506 /* We need 16 for UBO block reads to work and 32 for push UBOs */
1507 .minUniformBufferOffsetAlignment = 32,
1508 .minStorageBufferOffsetAlignment = 4,
1509 .minTexelOffset = -8,
1510 .maxTexelOffset = 7,
1511 .minTexelGatherOffset = -32,
1512 .maxTexelGatherOffset = 31,
1513 .minInterpolationOffset = -0.5,
1514 .maxInterpolationOffset = 0.4375,
1515 .subPixelInterpolationOffsetBits = 4,
1516 .maxFramebufferWidth = (1 << 14),
1517 .maxFramebufferHeight = (1 << 14),
1518 .maxFramebufferLayers = (1 << 11),
1519 .framebufferColorSampleCounts = sample_counts,
1520 .framebufferDepthSampleCounts = sample_counts,
1521 .framebufferStencilSampleCounts = sample_counts,
1522 .framebufferNoAttachmentsSampleCounts = sample_counts,
1523 .maxColorAttachments = MAX_RTS,
1524 .sampledImageColorSampleCounts = sample_counts,
1525 .sampledImageIntegerSampleCounts = sample_counts,
1526 .sampledImageDepthSampleCounts = sample_counts,
1527 .sampledImageStencilSampleCounts = sample_counts,
1528 .storageImageSampleCounts = VK_SAMPLE_COUNT_1_BIT,
1529 .maxSampleMaskWords = 1,
1530 .timestampComputeAndGraphics = true,
1531 .timestampPeriod = 1000000000.0 / devinfo->timestamp_frequency,
1532 .maxClipDistances = 8,
1533 .maxCullDistances = 8,
1534 .maxCombinedClipAndCullDistances = 8,
1535 .discreteQueuePriorities = 2,
1536 .pointSizeRange = { 0.125, 255.875 },
1537 .lineWidthRange = {
1538 0.0,
1539 (devinfo->gen >= 9 || devinfo->is_cherryview) ?
1540 2047.9921875 : 7.9921875,
1541 },
1542 .pointSizeGranularity = (1.0 / 8.0),
1543 .lineWidthGranularity = (1.0 / 128.0),
1544 .strictLines = false,
1545 .standardSampleLocations = true,
1546 .optimalBufferCopyOffsetAlignment = 128,
1547 .optimalBufferCopyRowPitchAlignment = 128,
1548 .nonCoherentAtomSize = 64,
1549 };
1550
1551 *pProperties = (VkPhysicalDeviceProperties) {
1552 .apiVersion = anv_physical_device_api_version(pdevice),
1553 .driverVersion = vk_get_driver_version(),
1554 .vendorID = 0x8086,
1555 .deviceID = pdevice->info.chipset_id,
1556 .deviceType = VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU,
1557 .limits = limits,
1558 .sparseProperties = {0}, /* Broadwell doesn't do sparse. */
1559 };
1560
1561 snprintf(pProperties->deviceName, sizeof(pProperties->deviceName),
1562 "%s", pdevice->name);
1563 memcpy(pProperties->pipelineCacheUUID,
1564 pdevice->pipeline_cache_uuid, VK_UUID_SIZE);
1565 }
1566
1567 static void
1568 anv_get_physical_device_properties_1_1(struct anv_physical_device *pdevice,
1569 VkPhysicalDeviceVulkan11Properties *p)
1570 {
1571 assert(p->sType == VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_PROPERTIES);
1572
1573 memcpy(p->deviceUUID, pdevice->device_uuid, VK_UUID_SIZE);
1574 memcpy(p->driverUUID, pdevice->driver_uuid, VK_UUID_SIZE);
1575 memset(p->deviceLUID, 0, VK_LUID_SIZE);
1576 p->deviceNodeMask = 0;
1577 p->deviceLUIDValid = false;
1578
1579 p->subgroupSize = BRW_SUBGROUP_SIZE;
1580 VkShaderStageFlags scalar_stages = 0;
1581 for (unsigned stage = 0; stage < MESA_SHADER_STAGES; stage++) {
1582 if (pdevice->compiler->scalar_stage[stage])
1583 scalar_stages |= mesa_to_vk_shader_stage(stage);
1584 }
1585 p->subgroupSupportedStages = scalar_stages;
1586 p->subgroupSupportedOperations = VK_SUBGROUP_FEATURE_BASIC_BIT |
1587 VK_SUBGROUP_FEATURE_VOTE_BIT |
1588 VK_SUBGROUP_FEATURE_BALLOT_BIT |
1589 VK_SUBGROUP_FEATURE_SHUFFLE_BIT |
1590 VK_SUBGROUP_FEATURE_SHUFFLE_RELATIVE_BIT |
1591 VK_SUBGROUP_FEATURE_QUAD_BIT;
1592 if (pdevice->info.gen >= 8) {
1593 /* TODO: There's no technical reason why these can't be made to
1594 * work on gen7 but they don't at the moment so it's best to leave
1595 * the feature disabled than enabled and broken.
1596 */
1597 p->subgroupSupportedOperations |= VK_SUBGROUP_FEATURE_ARITHMETIC_BIT |
1598 VK_SUBGROUP_FEATURE_CLUSTERED_BIT;
1599 }
1600 p->subgroupQuadOperationsInAllStages = pdevice->info.gen >= 8;
1601
1602 p->pointClippingBehavior = VK_POINT_CLIPPING_BEHAVIOR_USER_CLIP_PLANES_ONLY;
1603 p->maxMultiviewViewCount = 16;
1604 p->maxMultiviewInstanceIndex = UINT32_MAX / 16;
1605 p->protectedNoFault = false;
1606 /* This value doesn't matter for us today as our per-stage descriptors are
1607 * the real limit.
1608 */
1609 p->maxPerSetDescriptors = 1024;
1610 p->maxMemoryAllocationSize = MAX_MEMORY_ALLOCATION_SIZE;
1611 }
1612
1613 static void
1614 anv_get_physical_device_properties_1_2(struct anv_physical_device *pdevice,
1615 VkPhysicalDeviceVulkan12Properties *p)
1616 {
1617 assert(p->sType == VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_PROPERTIES);
1618
1619 p->driverID = VK_DRIVER_ID_INTEL_OPEN_SOURCE_MESA_KHR;
1620 memset(p->driverName, 0, sizeof(p->driverName));
1621 snprintf(p->driverName, VK_MAX_DRIVER_NAME_SIZE_KHR,
1622 "Intel open-source Mesa driver");
1623 memset(p->driverInfo, 0, sizeof(p->driverInfo));
1624 snprintf(p->driverInfo, VK_MAX_DRIVER_INFO_SIZE_KHR,
1625 "Mesa " PACKAGE_VERSION MESA_GIT_SHA1);
1626 p->conformanceVersion = (VkConformanceVersionKHR) {
1627 .major = 1,
1628 .minor = 2,
1629 .subminor = 0,
1630 .patch = 0,
1631 };
1632
1633 p->denormBehaviorIndependence =
1634 VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_ALL_KHR;
1635 p->roundingModeIndependence =
1636 VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_NONE_KHR;
1637
1638 /* Broadwell does not support HF denorms and there are restrictions
1639 * other gens. According to Kabylake's PRM:
1640 *
1641 * "math - Extended Math Function
1642 * [...]
1643 * Restriction : Half-float denorms are always retained."
1644 */
1645 p->shaderDenormFlushToZeroFloat16 = false;
1646 p->shaderDenormPreserveFloat16 = pdevice->info.gen > 8;
1647 p->shaderRoundingModeRTEFloat16 = true;
1648 p->shaderRoundingModeRTZFloat16 = true;
1649 p->shaderSignedZeroInfNanPreserveFloat16 = true;
1650
1651 p->shaderDenormFlushToZeroFloat32 = true;
1652 p->shaderDenormPreserveFloat32 = true;
1653 p->shaderRoundingModeRTEFloat32 = true;
1654 p->shaderRoundingModeRTZFloat32 = true;
1655 p->shaderSignedZeroInfNanPreserveFloat32 = true;
1656
1657 p->shaderDenormFlushToZeroFloat64 = true;
1658 p->shaderDenormPreserveFloat64 = true;
1659 p->shaderRoundingModeRTEFloat64 = true;
1660 p->shaderRoundingModeRTZFloat64 = true;
1661 p->shaderSignedZeroInfNanPreserveFloat64 = true;
1662
1663 /* It's a bit hard to exactly map our implementation to the limits
1664 * described here. The bindless surface handle in the extended
1665 * message descriptors is 20 bits and it's an index into the table of
1666 * RENDER_SURFACE_STATE structs that starts at bindless surface base
1667 * address. Given that most things consume two surface states per
1668 * view (general/sampled for textures and write-only/read-write for
1669 * images), we claim 2^19 things.
1670 *
1671 * For SSBOs, we just use A64 messages so there is no real limit
1672 * there beyond the limit on the total size of a descriptor set.
1673 */
1674 const unsigned max_bindless_views = 1 << 19;
1675 p->maxUpdateAfterBindDescriptorsInAllPools = max_bindless_views;
1676 p->shaderUniformBufferArrayNonUniformIndexingNative = false;
1677 p->shaderSampledImageArrayNonUniformIndexingNative = false;
1678 p->shaderStorageBufferArrayNonUniformIndexingNative = true;
1679 p->shaderStorageImageArrayNonUniformIndexingNative = false;
1680 p->shaderInputAttachmentArrayNonUniformIndexingNative = false;
1681 p->robustBufferAccessUpdateAfterBind = true;
1682 p->quadDivergentImplicitLod = false;
1683 p->maxPerStageDescriptorUpdateAfterBindSamplers = max_bindless_views;
1684 p->maxPerStageDescriptorUpdateAfterBindUniformBuffers = MAX_PER_STAGE_DESCRIPTOR_UNIFORM_BUFFERS;
1685 p->maxPerStageDescriptorUpdateAfterBindStorageBuffers = UINT32_MAX;
1686 p->maxPerStageDescriptorUpdateAfterBindSampledImages = max_bindless_views;
1687 p->maxPerStageDescriptorUpdateAfterBindStorageImages = max_bindless_views;
1688 p->maxPerStageDescriptorUpdateAfterBindInputAttachments = MAX_PER_STAGE_DESCRIPTOR_INPUT_ATTACHMENTS;
1689 p->maxPerStageUpdateAfterBindResources = UINT32_MAX;
1690 p->maxDescriptorSetUpdateAfterBindSamplers = max_bindless_views;
1691 p->maxDescriptorSetUpdateAfterBindUniformBuffers = 6 * MAX_PER_STAGE_DESCRIPTOR_UNIFORM_BUFFERS;
1692 p->maxDescriptorSetUpdateAfterBindUniformBuffersDynamic = MAX_DYNAMIC_BUFFERS / 2;
1693 p->maxDescriptorSetUpdateAfterBindStorageBuffers = UINT32_MAX;
1694 p->maxDescriptorSetUpdateAfterBindStorageBuffersDynamic = MAX_DYNAMIC_BUFFERS / 2;
1695 p->maxDescriptorSetUpdateAfterBindSampledImages = max_bindless_views;
1696 p->maxDescriptorSetUpdateAfterBindStorageImages = max_bindless_views;
1697 p->maxDescriptorSetUpdateAfterBindInputAttachments = MAX_DESCRIPTOR_SET_INPUT_ATTACHMENTS;
1698
1699 /* We support all of the depth resolve modes */
1700 p->supportedDepthResolveModes = VK_RESOLVE_MODE_SAMPLE_ZERO_BIT_KHR |
1701 VK_RESOLVE_MODE_AVERAGE_BIT_KHR |
1702 VK_RESOLVE_MODE_MIN_BIT_KHR |
1703 VK_RESOLVE_MODE_MAX_BIT_KHR;
1704 /* Average doesn't make sense for stencil so we don't support that */
1705 p->supportedStencilResolveModes = VK_RESOLVE_MODE_SAMPLE_ZERO_BIT_KHR;
1706 if (pdevice->info.gen >= 8) {
1707 /* The advanced stencil resolve modes currently require stencil
1708 * sampling be supported by the hardware.
1709 */
1710 p->supportedStencilResolveModes |= VK_RESOLVE_MODE_MIN_BIT_KHR |
1711 VK_RESOLVE_MODE_MAX_BIT_KHR;
1712 }
1713 p->independentResolveNone = true;
1714 p->independentResolve = true;
1715
1716 p->filterMinmaxSingleComponentFormats = pdevice->info.gen >= 9;
1717 p->filterMinmaxImageComponentMapping = pdevice->info.gen >= 9;
1718
1719 p->maxTimelineSemaphoreValueDifference = UINT64_MAX;
1720
1721 p->framebufferIntegerColorSampleCounts =
1722 isl_device_get_sample_counts(&pdevice->isl_dev);
1723 }
1724
1725 void anv_GetPhysicalDeviceProperties2(
1726 VkPhysicalDevice physicalDevice,
1727 VkPhysicalDeviceProperties2* pProperties)
1728 {
1729 ANV_FROM_HANDLE(anv_physical_device, pdevice, physicalDevice);
1730
1731 anv_GetPhysicalDeviceProperties(physicalDevice, &pProperties->properties);
1732
1733 VkPhysicalDeviceVulkan11Properties core_1_1 = {
1734 .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_PROPERTIES,
1735 };
1736 anv_get_physical_device_properties_1_1(pdevice, &core_1_1);
1737
1738 VkPhysicalDeviceVulkan12Properties core_1_2 = {
1739 .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_PROPERTIES,
1740 };
1741 anv_get_physical_device_properties_1_2(pdevice, &core_1_2);
1742
1743 #define CORE_RENAMED_PROPERTY(major, minor, ext_property, core_property) \
1744 memcpy(&properties->ext_property, &core_##major##_##minor.core_property, \
1745 sizeof(core_##major##_##minor.core_property))
1746
1747 #define CORE_PROPERTY(major, minor, property) \
1748 CORE_RENAMED_PROPERTY(major, minor, property, property)
1749
1750 vk_foreach_struct(ext, pProperties->pNext) {
1751 switch (ext->sType) {
1752 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_STENCIL_RESOLVE_PROPERTIES_KHR: {
1753 VkPhysicalDeviceDepthStencilResolvePropertiesKHR *properties =
1754 (VkPhysicalDeviceDepthStencilResolvePropertiesKHR *)ext;
1755 CORE_PROPERTY(1, 2, supportedDepthResolveModes);
1756 CORE_PROPERTY(1, 2, supportedStencilResolveModes);
1757 CORE_PROPERTY(1, 2, independentResolveNone);
1758 CORE_PROPERTY(1, 2, independentResolve);
1759 break;
1760 }
1761
1762 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_PROPERTIES_EXT: {
1763 VkPhysicalDeviceDescriptorIndexingPropertiesEXT *properties =
1764 (VkPhysicalDeviceDescriptorIndexingPropertiesEXT *)ext;
1765 CORE_PROPERTY(1, 2, maxUpdateAfterBindDescriptorsInAllPools);
1766 CORE_PROPERTY(1, 2, shaderUniformBufferArrayNonUniformIndexingNative);
1767 CORE_PROPERTY(1, 2, shaderSampledImageArrayNonUniformIndexingNative);
1768 CORE_PROPERTY(1, 2, shaderStorageBufferArrayNonUniformIndexingNative);
1769 CORE_PROPERTY(1, 2, shaderStorageImageArrayNonUniformIndexingNative);
1770 CORE_PROPERTY(1, 2, shaderInputAttachmentArrayNonUniformIndexingNative);
1771 CORE_PROPERTY(1, 2, robustBufferAccessUpdateAfterBind);
1772 CORE_PROPERTY(1, 2, quadDivergentImplicitLod);
1773 CORE_PROPERTY(1, 2, maxPerStageDescriptorUpdateAfterBindSamplers);
1774 CORE_PROPERTY(1, 2, maxPerStageDescriptorUpdateAfterBindUniformBuffers);
1775 CORE_PROPERTY(1, 2, maxPerStageDescriptorUpdateAfterBindStorageBuffers);
1776 CORE_PROPERTY(1, 2, maxPerStageDescriptorUpdateAfterBindSampledImages);
1777 CORE_PROPERTY(1, 2, maxPerStageDescriptorUpdateAfterBindStorageImages);
1778 CORE_PROPERTY(1, 2, maxPerStageDescriptorUpdateAfterBindInputAttachments);
1779 CORE_PROPERTY(1, 2, maxPerStageUpdateAfterBindResources);
1780 CORE_PROPERTY(1, 2, maxDescriptorSetUpdateAfterBindSamplers);
1781 CORE_PROPERTY(1, 2, maxDescriptorSetUpdateAfterBindUniformBuffers);
1782 CORE_PROPERTY(1, 2, maxDescriptorSetUpdateAfterBindUniformBuffersDynamic);
1783 CORE_PROPERTY(1, 2, maxDescriptorSetUpdateAfterBindStorageBuffers);
1784 CORE_PROPERTY(1, 2, maxDescriptorSetUpdateAfterBindStorageBuffersDynamic);
1785 CORE_PROPERTY(1, 2, maxDescriptorSetUpdateAfterBindSampledImages);
1786 CORE_PROPERTY(1, 2, maxDescriptorSetUpdateAfterBindStorageImages);
1787 CORE_PROPERTY(1, 2, maxDescriptorSetUpdateAfterBindInputAttachments);
1788 break;
1789 }
1790
1791 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DRIVER_PROPERTIES_KHR: {
1792 VkPhysicalDeviceDriverPropertiesKHR *properties =
1793 (VkPhysicalDeviceDriverPropertiesKHR *) ext;
1794 CORE_PROPERTY(1, 2, driverID);
1795 CORE_PROPERTY(1, 2, driverName);
1796 CORE_PROPERTY(1, 2, driverInfo);
1797 CORE_PROPERTY(1, 2, conformanceVersion);
1798 break;
1799 }
1800
1801 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_MEMORY_HOST_PROPERTIES_EXT: {
1802 VkPhysicalDeviceExternalMemoryHostPropertiesEXT *props =
1803 (VkPhysicalDeviceExternalMemoryHostPropertiesEXT *) ext;
1804 /* Userptr needs page aligned memory. */
1805 props->minImportedHostPointerAlignment = 4096;
1806 break;
1807 }
1808
1809 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES: {
1810 VkPhysicalDeviceIDProperties *properties =
1811 (VkPhysicalDeviceIDProperties *)ext;
1812 CORE_PROPERTY(1, 1, deviceUUID);
1813 CORE_PROPERTY(1, 1, driverUUID);
1814 CORE_PROPERTY(1, 1, deviceLUID);
1815 CORE_PROPERTY(1, 1, deviceLUIDValid);
1816 break;
1817 }
1818
1819 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_PROPERTIES_EXT: {
1820 VkPhysicalDeviceInlineUniformBlockPropertiesEXT *props =
1821 (VkPhysicalDeviceInlineUniformBlockPropertiesEXT *)ext;
1822 props->maxInlineUniformBlockSize = MAX_INLINE_UNIFORM_BLOCK_SIZE;
1823 props->maxPerStageDescriptorInlineUniformBlocks =
1824 MAX_INLINE_UNIFORM_BLOCK_DESCRIPTORS;
1825 props->maxPerStageDescriptorUpdateAfterBindInlineUniformBlocks =
1826 MAX_INLINE_UNIFORM_BLOCK_DESCRIPTORS;
1827 props->maxDescriptorSetInlineUniformBlocks =
1828 MAX_INLINE_UNIFORM_BLOCK_DESCRIPTORS;
1829 props->maxDescriptorSetUpdateAfterBindInlineUniformBlocks =
1830 MAX_INLINE_UNIFORM_BLOCK_DESCRIPTORS;
1831 break;
1832 }
1833
1834 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_PROPERTIES_EXT: {
1835 VkPhysicalDeviceLineRasterizationPropertiesEXT *props =
1836 (VkPhysicalDeviceLineRasterizationPropertiesEXT *)ext;
1837 /* In the Skylake PRM Vol. 7, subsection titled "GIQ (Diamond)
1838 * Sampling Rules - Legacy Mode", it says the following:
1839 *
1840 * "Note that the device divides a pixel into a 16x16 array of
1841 * subpixels, referenced by their upper left corners."
1842 *
1843 * This is the only known reference in the PRMs to the subpixel
1844 * precision of line rasterization and a "16x16 array of subpixels"
1845 * implies 4 subpixel precision bits. Empirical testing has shown
1846 * that 4 subpixel precision bits applies to all line rasterization
1847 * types.
1848 */
1849 props->lineSubPixelPrecisionBits = 4;
1850 break;
1851 }
1852
1853 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES: {
1854 VkPhysicalDeviceMaintenance3Properties *properties =
1855 (VkPhysicalDeviceMaintenance3Properties *)ext;
1856 /* This value doesn't matter for us today as our per-stage
1857 * descriptors are the real limit.
1858 */
1859 CORE_PROPERTY(1, 1, maxPerSetDescriptors);
1860 CORE_PROPERTY(1, 1, maxMemoryAllocationSize);
1861 break;
1862 }
1863
1864 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES: {
1865 VkPhysicalDeviceMultiviewProperties *properties =
1866 (VkPhysicalDeviceMultiviewProperties *)ext;
1867 CORE_PROPERTY(1, 1, maxMultiviewViewCount);
1868 CORE_PROPERTY(1, 1, maxMultiviewInstanceIndex);
1869 break;
1870 }
1871
1872 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PCI_BUS_INFO_PROPERTIES_EXT: {
1873 VkPhysicalDevicePCIBusInfoPropertiesEXT *properties =
1874 (VkPhysicalDevicePCIBusInfoPropertiesEXT *)ext;
1875 properties->pciDomain = pdevice->pci_info.domain;
1876 properties->pciBus = pdevice->pci_info.bus;
1877 properties->pciDevice = pdevice->pci_info.device;
1878 properties->pciFunction = pdevice->pci_info.function;
1879 break;
1880 }
1881
1882 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES: {
1883 VkPhysicalDevicePointClippingProperties *properties =
1884 (VkPhysicalDevicePointClippingProperties *) ext;
1885 CORE_PROPERTY(1, 1, pointClippingBehavior);
1886 break;
1887 }
1888
1889 #pragma GCC diagnostic push
1890 #pragma GCC diagnostic ignored "-Wswitch"
1891 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRESENTATION_PROPERTIES_ANDROID: {
1892 VkPhysicalDevicePresentationPropertiesANDROID *props =
1893 (VkPhysicalDevicePresentationPropertiesANDROID *)ext;
1894 props->sharedImage = VK_FALSE;
1895 break;
1896 }
1897 #pragma GCC diagnostic pop
1898
1899 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_PROPERTIES: {
1900 VkPhysicalDeviceProtectedMemoryProperties *properties =
1901 (VkPhysicalDeviceProtectedMemoryProperties *)ext;
1902 CORE_PROPERTY(1, 1, protectedNoFault);
1903 break;
1904 }
1905
1906 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR: {
1907 VkPhysicalDevicePushDescriptorPropertiesKHR *properties =
1908 (VkPhysicalDevicePushDescriptorPropertiesKHR *) ext;
1909 properties->maxPushDescriptors = MAX_PUSH_DESCRIPTORS;
1910 break;
1911 }
1912
1913 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ROBUSTNESS_2_PROPERTIES_EXT: {
1914 VkPhysicalDeviceRobustness2PropertiesEXT *properties = (void *)ext;
1915 properties->robustStorageBufferAccessSizeAlignment =
1916 ANV_SSBO_BOUNDS_CHECK_ALIGNMENT;
1917 properties->robustUniformBufferAccessSizeAlignment =
1918 ANV_UBO_BOUNDS_CHECK_ALIGNMENT;
1919 break;
1920 }
1921
1922 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_FILTER_MINMAX_PROPERTIES_EXT: {
1923 VkPhysicalDeviceSamplerFilterMinmaxPropertiesEXT *properties =
1924 (VkPhysicalDeviceSamplerFilterMinmaxPropertiesEXT *)ext;
1925 CORE_PROPERTY(1, 2, filterMinmaxImageComponentMapping);
1926 CORE_PROPERTY(1, 2, filterMinmaxSingleComponentFormats);
1927 break;
1928 }
1929
1930 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_PROPERTIES: {
1931 VkPhysicalDeviceSubgroupProperties *properties = (void *)ext;
1932 CORE_PROPERTY(1, 1, subgroupSize);
1933 CORE_RENAMED_PROPERTY(1, 1, supportedStages,
1934 subgroupSupportedStages);
1935 CORE_RENAMED_PROPERTY(1, 1, supportedOperations,
1936 subgroupSupportedOperations);
1937 CORE_RENAMED_PROPERTY(1, 1, quadOperationsInAllStages,
1938 subgroupQuadOperationsInAllStages);
1939 break;
1940 }
1941
1942 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES_EXT: {
1943 VkPhysicalDeviceSubgroupSizeControlPropertiesEXT *props =
1944 (VkPhysicalDeviceSubgroupSizeControlPropertiesEXT *)ext;
1945 STATIC_ASSERT(8 <= BRW_SUBGROUP_SIZE && BRW_SUBGROUP_SIZE <= 32);
1946 props->minSubgroupSize = 8;
1947 props->maxSubgroupSize = 32;
1948 props->maxComputeWorkgroupSubgroups = pdevice->info.max_cs_threads;
1949 props->requiredSubgroupSizeStages = VK_SHADER_STAGE_COMPUTE_BIT;
1950 break;
1951 }
1952 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FLOAT_CONTROLS_PROPERTIES_KHR : {
1953 VkPhysicalDeviceFloatControlsPropertiesKHR *properties = (void *)ext;
1954 CORE_PROPERTY(1, 2, denormBehaviorIndependence);
1955 CORE_PROPERTY(1, 2, roundingModeIndependence);
1956 CORE_PROPERTY(1, 2, shaderDenormFlushToZeroFloat16);
1957 CORE_PROPERTY(1, 2, shaderDenormPreserveFloat16);
1958 CORE_PROPERTY(1, 2, shaderRoundingModeRTEFloat16);
1959 CORE_PROPERTY(1, 2, shaderRoundingModeRTZFloat16);
1960 CORE_PROPERTY(1, 2, shaderSignedZeroInfNanPreserveFloat16);
1961 CORE_PROPERTY(1, 2, shaderDenormFlushToZeroFloat32);
1962 CORE_PROPERTY(1, 2, shaderDenormPreserveFloat32);
1963 CORE_PROPERTY(1, 2, shaderRoundingModeRTEFloat32);
1964 CORE_PROPERTY(1, 2, shaderRoundingModeRTZFloat32);
1965 CORE_PROPERTY(1, 2, shaderSignedZeroInfNanPreserveFloat32);
1966 CORE_PROPERTY(1, 2, shaderDenormFlushToZeroFloat64);
1967 CORE_PROPERTY(1, 2, shaderDenormPreserveFloat64);
1968 CORE_PROPERTY(1, 2, shaderRoundingModeRTEFloat64);
1969 CORE_PROPERTY(1, 2, shaderRoundingModeRTZFloat64);
1970 CORE_PROPERTY(1, 2, shaderSignedZeroInfNanPreserveFloat64);
1971 break;
1972 }
1973
1974 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_PROPERTIES_EXT: {
1975 VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT *props =
1976 (VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT *)ext;
1977
1978 /* From the SKL PRM Vol. 2d, docs for RENDER_SURFACE_STATE::Surface
1979 * Base Address:
1980 *
1981 * "For SURFTYPE_BUFFER non-rendertarget surfaces, this field
1982 * specifies the base address of the first element of the surface,
1983 * computed in software by adding the surface base address to the
1984 * byte offset of the element in the buffer. The base address must
1985 * be aligned to element size."
1986 *
1987 * The typed dataport messages require that things be texel aligned.
1988 * Otherwise, we may just load/store the wrong data or, in the worst
1989 * case, there may be hangs.
1990 */
1991 props->storageTexelBufferOffsetAlignmentBytes = 16;
1992 props->storageTexelBufferOffsetSingleTexelAlignment = true;
1993
1994 /* The sampler, however, is much more forgiving and it can handle
1995 * arbitrary byte alignment for linear and buffer surfaces. It's
1996 * hard to find a good PRM citation for this but years of empirical
1997 * experience demonstrate that this is true.
1998 */
1999 props->uniformTexelBufferOffsetAlignmentBytes = 1;
2000 props->uniformTexelBufferOffsetSingleTexelAlignment = false;
2001 break;
2002 }
2003
2004 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_PROPERTIES_KHR: {
2005 VkPhysicalDeviceTimelineSemaphorePropertiesKHR *properties =
2006 (VkPhysicalDeviceTimelineSemaphorePropertiesKHR *) ext;
2007 CORE_PROPERTY(1, 2, maxTimelineSemaphoreValueDifference);
2008 break;
2009 }
2010
2011 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_PROPERTIES_EXT: {
2012 VkPhysicalDeviceTransformFeedbackPropertiesEXT *props =
2013 (VkPhysicalDeviceTransformFeedbackPropertiesEXT *)ext;
2014
2015 props->maxTransformFeedbackStreams = MAX_XFB_STREAMS;
2016 props->maxTransformFeedbackBuffers = MAX_XFB_BUFFERS;
2017 props->maxTransformFeedbackBufferSize = (1ull << 32);
2018 props->maxTransformFeedbackStreamDataSize = 128 * 4;
2019 props->maxTransformFeedbackBufferDataSize = 128 * 4;
2020 props->maxTransformFeedbackBufferDataStride = 2048;
2021 props->transformFeedbackQueries = true;
2022 props->transformFeedbackStreamsLinesTriangles = false;
2023 props->transformFeedbackRasterizationStreamSelect = false;
2024 props->transformFeedbackDraw = true;
2025 break;
2026 }
2027
2028 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_PROPERTIES_EXT: {
2029 VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT *props =
2030 (VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT *)ext;
2031 /* We have to restrict this a bit for multiview */
2032 props->maxVertexAttribDivisor = UINT32_MAX / 16;
2033 break;
2034 }
2035
2036 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_PROPERTIES:
2037 anv_get_physical_device_properties_1_1(pdevice, (void *)ext);
2038 break;
2039
2040 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_PROPERTIES:
2041 anv_get_physical_device_properties_1_2(pdevice, (void *)ext);
2042 break;
2043
2044 default:
2045 anv_debug_ignored_stype(ext->sType);
2046 break;
2047 }
2048 }
2049
2050 #undef CORE_RENAMED_PROPERTY
2051 #undef CORE_PROPERTY
2052 }
2053
2054 /* We support exactly one queue family. */
2055 static const VkQueueFamilyProperties
2056 anv_queue_family_properties = {
2057 .queueFlags = VK_QUEUE_GRAPHICS_BIT |
2058 VK_QUEUE_COMPUTE_BIT |
2059 VK_QUEUE_TRANSFER_BIT,
2060 .queueCount = 1,
2061 .timestampValidBits = 36, /* XXX: Real value here */
2062 .minImageTransferGranularity = { 1, 1, 1 },
2063 };
2064
2065 void anv_GetPhysicalDeviceQueueFamilyProperties(
2066 VkPhysicalDevice physicalDevice,
2067 uint32_t* pCount,
2068 VkQueueFamilyProperties* pQueueFamilyProperties)
2069 {
2070 VK_OUTARRAY_MAKE(out, pQueueFamilyProperties, pCount);
2071
2072 vk_outarray_append(&out, p) {
2073 *p = anv_queue_family_properties;
2074 }
2075 }
2076
2077 void anv_GetPhysicalDeviceQueueFamilyProperties2(
2078 VkPhysicalDevice physicalDevice,
2079 uint32_t* pQueueFamilyPropertyCount,
2080 VkQueueFamilyProperties2* pQueueFamilyProperties)
2081 {
2082
2083 VK_OUTARRAY_MAKE(out, pQueueFamilyProperties, pQueueFamilyPropertyCount);
2084
2085 vk_outarray_append(&out, p) {
2086 p->queueFamilyProperties = anv_queue_family_properties;
2087
2088 vk_foreach_struct(s, p->pNext) {
2089 anv_debug_ignored_stype(s->sType);
2090 }
2091 }
2092 }
2093
2094 void anv_GetPhysicalDeviceMemoryProperties(
2095 VkPhysicalDevice physicalDevice,
2096 VkPhysicalDeviceMemoryProperties* pMemoryProperties)
2097 {
2098 ANV_FROM_HANDLE(anv_physical_device, physical_device, physicalDevice);
2099
2100 pMemoryProperties->memoryTypeCount = physical_device->memory.type_count;
2101 for (uint32_t i = 0; i < physical_device->memory.type_count; i++) {
2102 pMemoryProperties->memoryTypes[i] = (VkMemoryType) {
2103 .propertyFlags = physical_device->memory.types[i].propertyFlags,
2104 .heapIndex = physical_device->memory.types[i].heapIndex,
2105 };
2106 }
2107
2108 pMemoryProperties->memoryHeapCount = physical_device->memory.heap_count;
2109 for (uint32_t i = 0; i < physical_device->memory.heap_count; i++) {
2110 pMemoryProperties->memoryHeaps[i] = (VkMemoryHeap) {
2111 .size = physical_device->memory.heaps[i].size,
2112 .flags = physical_device->memory.heaps[i].flags,
2113 };
2114 }
2115 }
2116
2117 static void
2118 anv_get_memory_budget(VkPhysicalDevice physicalDevice,
2119 VkPhysicalDeviceMemoryBudgetPropertiesEXT *memoryBudget)
2120 {
2121 ANV_FROM_HANDLE(anv_physical_device, device, physicalDevice);
2122 uint64_t sys_available = get_available_system_memory();
2123 assert(sys_available > 0);
2124
2125 VkDeviceSize total_heaps_size = 0;
2126 for (size_t i = 0; i < device->memory.heap_count; i++)
2127 total_heaps_size += device->memory.heaps[i].size;
2128
2129 for (size_t i = 0; i < device->memory.heap_count; i++) {
2130 VkDeviceSize heap_size = device->memory.heaps[i].size;
2131 VkDeviceSize heap_used = device->memory.heaps[i].used;
2132 VkDeviceSize heap_budget;
2133
2134 double heap_proportion = (double) heap_size / total_heaps_size;
2135 VkDeviceSize sys_available_prop = sys_available * heap_proportion;
2136
2137 /*
2138 * Let's not incite the app to starve the system: report at most 90% of
2139 * available system memory.
2140 */
2141 uint64_t heap_available = sys_available_prop * 9 / 10;
2142 heap_budget = MIN2(heap_size, heap_used + heap_available);
2143
2144 /*
2145 * Round down to the nearest MB
2146 */
2147 heap_budget &= ~((1ull << 20) - 1);
2148
2149 /*
2150 * The heapBudget value must be non-zero for array elements less than
2151 * VkPhysicalDeviceMemoryProperties::memoryHeapCount. The heapBudget
2152 * value must be less than or equal to VkMemoryHeap::size for each heap.
2153 */
2154 assert(0 < heap_budget && heap_budget <= heap_size);
2155
2156 memoryBudget->heapUsage[i] = heap_used;
2157 memoryBudget->heapBudget[i] = heap_budget;
2158 }
2159
2160 /* The heapBudget and heapUsage values must be zero for array elements
2161 * greater than or equal to VkPhysicalDeviceMemoryProperties::memoryHeapCount
2162 */
2163 for (uint32_t i = device->memory.heap_count; i < VK_MAX_MEMORY_HEAPS; i++) {
2164 memoryBudget->heapBudget[i] = 0;
2165 memoryBudget->heapUsage[i] = 0;
2166 }
2167 }
2168
2169 void anv_GetPhysicalDeviceMemoryProperties2(
2170 VkPhysicalDevice physicalDevice,
2171 VkPhysicalDeviceMemoryProperties2* pMemoryProperties)
2172 {
2173 anv_GetPhysicalDeviceMemoryProperties(physicalDevice,
2174 &pMemoryProperties->memoryProperties);
2175
2176 vk_foreach_struct(ext, pMemoryProperties->pNext) {
2177 switch (ext->sType) {
2178 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_BUDGET_PROPERTIES_EXT:
2179 anv_get_memory_budget(physicalDevice, (void*)ext);
2180 break;
2181 default:
2182 anv_debug_ignored_stype(ext->sType);
2183 break;
2184 }
2185 }
2186 }
2187
2188 void
2189 anv_GetDeviceGroupPeerMemoryFeatures(
2190 VkDevice device,
2191 uint32_t heapIndex,
2192 uint32_t localDeviceIndex,
2193 uint32_t remoteDeviceIndex,
2194 VkPeerMemoryFeatureFlags* pPeerMemoryFeatures)
2195 {
2196 assert(localDeviceIndex == 0 && remoteDeviceIndex == 0);
2197 *pPeerMemoryFeatures = VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT |
2198 VK_PEER_MEMORY_FEATURE_COPY_DST_BIT |
2199 VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT |
2200 VK_PEER_MEMORY_FEATURE_GENERIC_DST_BIT;
2201 }
2202
2203 PFN_vkVoidFunction anv_GetInstanceProcAddr(
2204 VkInstance _instance,
2205 const char* pName)
2206 {
2207 ANV_FROM_HANDLE(anv_instance, instance, _instance);
2208
2209 /* The Vulkan 1.0 spec for vkGetInstanceProcAddr has a table of exactly
2210 * when we have to return valid function pointers, NULL, or it's left
2211 * undefined. See the table for exact details.
2212 */
2213 if (pName == NULL)
2214 return NULL;
2215
2216 #define LOOKUP_ANV_ENTRYPOINT(entrypoint) \
2217 if (strcmp(pName, "vk" #entrypoint) == 0) \
2218 return (PFN_vkVoidFunction)anv_##entrypoint
2219
2220 LOOKUP_ANV_ENTRYPOINT(EnumerateInstanceExtensionProperties);
2221 LOOKUP_ANV_ENTRYPOINT(EnumerateInstanceLayerProperties);
2222 LOOKUP_ANV_ENTRYPOINT(EnumerateInstanceVersion);
2223 LOOKUP_ANV_ENTRYPOINT(CreateInstance);
2224
2225 /* GetInstanceProcAddr() can also be called with a NULL instance.
2226 * See https://gitlab.khronos.org/vulkan/vulkan/issues/2057
2227 */
2228 LOOKUP_ANV_ENTRYPOINT(GetInstanceProcAddr);
2229
2230 #undef LOOKUP_ANV_ENTRYPOINT
2231
2232 if (instance == NULL)
2233 return NULL;
2234
2235 int idx = anv_get_instance_entrypoint_index(pName);
2236 if (idx >= 0)
2237 return instance->dispatch.entrypoints[idx];
2238
2239 idx = anv_get_physical_device_entrypoint_index(pName);
2240 if (idx >= 0)
2241 return instance->physical_device_dispatch.entrypoints[idx];
2242
2243 idx = anv_get_device_entrypoint_index(pName);
2244 if (idx >= 0)
2245 return instance->device_dispatch.entrypoints[idx];
2246
2247 return NULL;
2248 }
2249
2250 /* With version 1+ of the loader interface the ICD should expose
2251 * vk_icdGetInstanceProcAddr to work around certain LD_PRELOAD issues seen in apps.
2252 */
2253 PUBLIC
2254 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vk_icdGetInstanceProcAddr(
2255 VkInstance instance,
2256 const char* pName);
2257
2258 PUBLIC
2259 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vk_icdGetInstanceProcAddr(
2260 VkInstance instance,
2261 const char* pName)
2262 {
2263 return anv_GetInstanceProcAddr(instance, pName);
2264 }
2265
2266 PFN_vkVoidFunction anv_GetDeviceProcAddr(
2267 VkDevice _device,
2268 const char* pName)
2269 {
2270 ANV_FROM_HANDLE(anv_device, device, _device);
2271
2272 if (!device || !pName)
2273 return NULL;
2274
2275 int idx = anv_get_device_entrypoint_index(pName);
2276 if (idx < 0)
2277 return NULL;
2278
2279 return device->dispatch.entrypoints[idx];
2280 }
2281
2282 /* With version 4+ of the loader interface the ICD should expose
2283 * vk_icdGetPhysicalDeviceProcAddr()
2284 */
2285 PUBLIC
2286 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vk_icdGetPhysicalDeviceProcAddr(
2287 VkInstance _instance,
2288 const char* pName);
2289
2290 PFN_vkVoidFunction vk_icdGetPhysicalDeviceProcAddr(
2291 VkInstance _instance,
2292 const char* pName)
2293 {
2294 ANV_FROM_HANDLE(anv_instance, instance, _instance);
2295
2296 if (!pName || !instance)
2297 return NULL;
2298
2299 int idx = anv_get_physical_device_entrypoint_index(pName);
2300 if (idx < 0)
2301 return NULL;
2302
2303 return instance->physical_device_dispatch.entrypoints[idx];
2304 }
2305
2306
2307 VkResult
2308 anv_CreateDebugReportCallbackEXT(VkInstance _instance,
2309 const VkDebugReportCallbackCreateInfoEXT* pCreateInfo,
2310 const VkAllocationCallbacks* pAllocator,
2311 VkDebugReportCallbackEXT* pCallback)
2312 {
2313 ANV_FROM_HANDLE(anv_instance, instance, _instance);
2314 return vk_create_debug_report_callback(&instance->debug_report_callbacks,
2315 pCreateInfo, pAllocator, &instance->alloc,
2316 pCallback);
2317 }
2318
2319 void
2320 anv_DestroyDebugReportCallbackEXT(VkInstance _instance,
2321 VkDebugReportCallbackEXT _callback,
2322 const VkAllocationCallbacks* pAllocator)
2323 {
2324 ANV_FROM_HANDLE(anv_instance, instance, _instance);
2325 vk_destroy_debug_report_callback(&instance->debug_report_callbacks,
2326 _callback, pAllocator, &instance->alloc);
2327 }
2328
2329 void
2330 anv_DebugReportMessageEXT(VkInstance _instance,
2331 VkDebugReportFlagsEXT flags,
2332 VkDebugReportObjectTypeEXT objectType,
2333 uint64_t object,
2334 size_t location,
2335 int32_t messageCode,
2336 const char* pLayerPrefix,
2337 const char* pMessage)
2338 {
2339 ANV_FROM_HANDLE(anv_instance, instance, _instance);
2340 vk_debug_report(&instance->debug_report_callbacks, flags, objectType,
2341 object, location, messageCode, pLayerPrefix, pMessage);
2342 }
2343
2344 static struct anv_state
2345 anv_state_pool_emit_data(struct anv_state_pool *pool, size_t size, size_t align, const void *p)
2346 {
2347 struct anv_state state;
2348
2349 state = anv_state_pool_alloc(pool, size, align);
2350 memcpy(state.map, p, size);
2351
2352 return state;
2353 }
2354
2355 /* Haswell border color is a bit of a disaster. Float and unorm formats use a
2356 * straightforward 32-bit float color in the first 64 bytes. Instead of using
2357 * a nice float/integer union like Gen8+, Haswell specifies the integer border
2358 * color as a separate entry /after/ the float color. The layout of this entry
2359 * also depends on the format's bpp (with extra hacks for RG32), and overlaps.
2360 *
2361 * Since we don't know the format/bpp, we can't make any of the border colors
2362 * containing '1' work for all formats, as it would be in the wrong place for
2363 * some of them. We opt to make 32-bit integers work as this seems like the
2364 * most common option. Fortunately, transparent black works regardless, as
2365 * all zeroes is the same in every bit-size.
2366 */
2367 struct hsw_border_color {
2368 float float32[4];
2369 uint32_t _pad0[12];
2370 uint32_t uint32[4];
2371 uint32_t _pad1[108];
2372 };
2373
2374 struct gen8_border_color {
2375 union {
2376 float float32[4];
2377 uint32_t uint32[4];
2378 };
2379 /* Pad out to 64 bytes */
2380 uint32_t _pad[12];
2381 };
2382
2383 static void
2384 anv_device_init_border_colors(struct anv_device *device)
2385 {
2386 if (device->info.is_haswell) {
2387 static const struct hsw_border_color border_colors[] = {
2388 [VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK] = { .float32 = { 0.0, 0.0, 0.0, 0.0 } },
2389 [VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK] = { .float32 = { 0.0, 0.0, 0.0, 1.0 } },
2390 [VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE] = { .float32 = { 1.0, 1.0, 1.0, 1.0 } },
2391 [VK_BORDER_COLOR_INT_TRANSPARENT_BLACK] = { .uint32 = { 0, 0, 0, 0 } },
2392 [VK_BORDER_COLOR_INT_OPAQUE_BLACK] = { .uint32 = { 0, 0, 0, 1 } },
2393 [VK_BORDER_COLOR_INT_OPAQUE_WHITE] = { .uint32 = { 1, 1, 1, 1 } },
2394 };
2395
2396 device->border_colors =
2397 anv_state_pool_emit_data(&device->dynamic_state_pool,
2398 sizeof(border_colors), 512, border_colors);
2399 } else {
2400 static const struct gen8_border_color border_colors[] = {
2401 [VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK] = { .float32 = { 0.0, 0.0, 0.0, 0.0 } },
2402 [VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK] = { .float32 = { 0.0, 0.0, 0.0, 1.0 } },
2403 [VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE] = { .float32 = { 1.0, 1.0, 1.0, 1.0 } },
2404 [VK_BORDER_COLOR_INT_TRANSPARENT_BLACK] = { .uint32 = { 0, 0, 0, 0 } },
2405 [VK_BORDER_COLOR_INT_OPAQUE_BLACK] = { .uint32 = { 0, 0, 0, 1 } },
2406 [VK_BORDER_COLOR_INT_OPAQUE_WHITE] = { .uint32 = { 1, 1, 1, 1 } },
2407 };
2408
2409 device->border_colors =
2410 anv_state_pool_emit_data(&device->dynamic_state_pool,
2411 sizeof(border_colors), 64, border_colors);
2412 }
2413 }
2414
2415 static VkResult
2416 anv_device_init_trivial_batch(struct anv_device *device)
2417 {
2418 VkResult result = anv_device_alloc_bo(device, 4096,
2419 ANV_BO_ALLOC_MAPPED,
2420 0 /* explicit_address */,
2421 &device->trivial_batch_bo);
2422 if (result != VK_SUCCESS)
2423 return result;
2424
2425 struct anv_batch batch = {
2426 .start = device->trivial_batch_bo->map,
2427 .next = device->trivial_batch_bo->map,
2428 .end = device->trivial_batch_bo->map + 4096,
2429 };
2430
2431 anv_batch_emit(&batch, GEN7_MI_BATCH_BUFFER_END, bbe);
2432 anv_batch_emit(&batch, GEN7_MI_NOOP, noop);
2433
2434 if (!device->info.has_llc)
2435 gen_clflush_range(batch.start, batch.next - batch.start);
2436
2437 return VK_SUCCESS;
2438 }
2439
2440 VkResult anv_EnumerateDeviceExtensionProperties(
2441 VkPhysicalDevice physicalDevice,
2442 const char* pLayerName,
2443 uint32_t* pPropertyCount,
2444 VkExtensionProperties* pProperties)
2445 {
2446 ANV_FROM_HANDLE(anv_physical_device, device, physicalDevice);
2447 VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
2448
2449 for (int i = 0; i < ANV_DEVICE_EXTENSION_COUNT; i++) {
2450 if (device->supported_extensions.extensions[i]) {
2451 vk_outarray_append(&out, prop) {
2452 *prop = anv_device_extensions[i];
2453 }
2454 }
2455 }
2456
2457 return vk_outarray_status(&out);
2458 }
2459
2460 static void
2461 anv_device_init_dispatch(struct anv_device *device)
2462 {
2463 const struct anv_instance *instance = device->physical->instance;
2464
2465 const struct anv_device_dispatch_table *genX_table;
2466 switch (device->info.gen) {
2467 case 12:
2468 genX_table = &gen12_device_dispatch_table;
2469 break;
2470 case 11:
2471 genX_table = &gen11_device_dispatch_table;
2472 break;
2473 case 10:
2474 genX_table = &gen10_device_dispatch_table;
2475 break;
2476 case 9:
2477 genX_table = &gen9_device_dispatch_table;
2478 break;
2479 case 8:
2480 genX_table = &gen8_device_dispatch_table;
2481 break;
2482 case 7:
2483 if (device->info.is_haswell)
2484 genX_table = &gen75_device_dispatch_table;
2485 else
2486 genX_table = &gen7_device_dispatch_table;
2487 break;
2488 default:
2489 unreachable("unsupported gen\n");
2490 }
2491
2492 for (unsigned i = 0; i < ARRAY_SIZE(device->dispatch.entrypoints); i++) {
2493 /* Vulkan requires that entrypoints for extensions which have not been
2494 * enabled must not be advertised.
2495 */
2496 if (!anv_device_entrypoint_is_enabled(i, instance->app_info.api_version,
2497 &instance->enabled_extensions,
2498 &device->enabled_extensions)) {
2499 device->dispatch.entrypoints[i] = NULL;
2500 } else if (genX_table->entrypoints[i]) {
2501 device->dispatch.entrypoints[i] = genX_table->entrypoints[i];
2502 } else {
2503 device->dispatch.entrypoints[i] =
2504 anv_device_dispatch_table.entrypoints[i];
2505 }
2506 }
2507 }
2508
2509 static int
2510 vk_priority_to_gen(int priority)
2511 {
2512 switch (priority) {
2513 case VK_QUEUE_GLOBAL_PRIORITY_LOW_EXT:
2514 return GEN_CONTEXT_LOW_PRIORITY;
2515 case VK_QUEUE_GLOBAL_PRIORITY_MEDIUM_EXT:
2516 return GEN_CONTEXT_MEDIUM_PRIORITY;
2517 case VK_QUEUE_GLOBAL_PRIORITY_HIGH_EXT:
2518 return GEN_CONTEXT_HIGH_PRIORITY;
2519 case VK_QUEUE_GLOBAL_PRIORITY_REALTIME_EXT:
2520 return GEN_CONTEXT_REALTIME_PRIORITY;
2521 default:
2522 unreachable("Invalid priority");
2523 }
2524 }
2525
2526 static VkResult
2527 anv_device_init_hiz_clear_value_bo(struct anv_device *device)
2528 {
2529 VkResult result = anv_device_alloc_bo(device, 4096,
2530 ANV_BO_ALLOC_MAPPED,
2531 0 /* explicit_address */,
2532 &device->hiz_clear_bo);
2533 if (result != VK_SUCCESS)
2534 return result;
2535
2536 union isl_color_value hiz_clear = { .u32 = { 0, } };
2537 hiz_clear.f32[0] = ANV_HZ_FC_VAL;
2538
2539 memcpy(device->hiz_clear_bo->map, hiz_clear.u32, sizeof(hiz_clear.u32));
2540
2541 if (!device->info.has_llc)
2542 gen_clflush_range(device->hiz_clear_bo->map, sizeof(hiz_clear.u32));
2543
2544 return VK_SUCCESS;
2545 }
2546
2547 static bool
2548 get_bo_from_pool(struct gen_batch_decode_bo *ret,
2549 struct anv_block_pool *pool,
2550 uint64_t address)
2551 {
2552 anv_block_pool_foreach_bo(bo, pool) {
2553 uint64_t bo_address = gen_48b_address(bo->offset);
2554 if (address >= bo_address && address < (bo_address + bo->size)) {
2555 *ret = (struct gen_batch_decode_bo) {
2556 .addr = bo_address,
2557 .size = bo->size,
2558 .map = bo->map,
2559 };
2560 return true;
2561 }
2562 }
2563 return false;
2564 }
2565
2566 /* Finding a buffer for batch decoding */
2567 static struct gen_batch_decode_bo
2568 decode_get_bo(void *v_batch, bool ppgtt, uint64_t address)
2569 {
2570 struct anv_device *device = v_batch;
2571 struct gen_batch_decode_bo ret_bo = {};
2572
2573 assert(ppgtt);
2574
2575 if (get_bo_from_pool(&ret_bo, &device->dynamic_state_pool.block_pool, address))
2576 return ret_bo;
2577 if (get_bo_from_pool(&ret_bo, &device->instruction_state_pool.block_pool, address))
2578 return ret_bo;
2579 if (get_bo_from_pool(&ret_bo, &device->binding_table_pool.block_pool, address))
2580 return ret_bo;
2581 if (get_bo_from_pool(&ret_bo, &device->surface_state_pool.block_pool, address))
2582 return ret_bo;
2583
2584 if (!device->cmd_buffer_being_decoded)
2585 return (struct gen_batch_decode_bo) { };
2586
2587 struct anv_batch_bo **bo;
2588
2589 u_vector_foreach(bo, &device->cmd_buffer_being_decoded->seen_bbos) {
2590 /* The decoder zeroes out the top 16 bits, so we need to as well */
2591 uint64_t bo_address = (*bo)->bo->offset & (~0ull >> 16);
2592
2593 if (address >= bo_address && address < bo_address + (*bo)->bo->size) {
2594 return (struct gen_batch_decode_bo) {
2595 .addr = bo_address,
2596 .size = (*bo)->bo->size,
2597 .map = (*bo)->bo->map,
2598 };
2599 }
2600 }
2601
2602 return (struct gen_batch_decode_bo) { };
2603 }
2604
2605 struct gen_aux_map_buffer {
2606 struct gen_buffer base;
2607 struct anv_state state;
2608 };
2609
2610 static struct gen_buffer *
2611 gen_aux_map_buffer_alloc(void *driver_ctx, uint32_t size)
2612 {
2613 struct gen_aux_map_buffer *buf = malloc(sizeof(struct gen_aux_map_buffer));
2614 if (!buf)
2615 return NULL;
2616
2617 struct anv_device *device = (struct anv_device*)driver_ctx;
2618 assert(device->physical->supports_48bit_addresses &&
2619 device->physical->use_softpin);
2620
2621 struct anv_state_pool *pool = &device->dynamic_state_pool;
2622 buf->state = anv_state_pool_alloc(pool, size, size);
2623
2624 buf->base.gpu = pool->block_pool.bo->offset + buf->state.offset;
2625 buf->base.gpu_end = buf->base.gpu + buf->state.alloc_size;
2626 buf->base.map = buf->state.map;
2627 buf->base.driver_bo = &buf->state;
2628 return &buf->base;
2629 }
2630
2631 static void
2632 gen_aux_map_buffer_free(void *driver_ctx, struct gen_buffer *buffer)
2633 {
2634 struct gen_aux_map_buffer *buf = (struct gen_aux_map_buffer*)buffer;
2635 struct anv_device *device = (struct anv_device*)driver_ctx;
2636 struct anv_state_pool *pool = &device->dynamic_state_pool;
2637 anv_state_pool_free(pool, buf->state);
2638 free(buf);
2639 }
2640
2641 static struct gen_mapped_pinned_buffer_alloc aux_map_allocator = {
2642 .alloc = gen_aux_map_buffer_alloc,
2643 .free = gen_aux_map_buffer_free,
2644 };
2645
2646 static VkResult
2647 check_physical_device_features(VkPhysicalDevice physicalDevice,
2648 const VkPhysicalDeviceFeatures *features)
2649 {
2650 VkPhysicalDeviceFeatures supported_features;
2651 anv_GetPhysicalDeviceFeatures(physicalDevice, &supported_features);
2652 VkBool32 *supported_feature = (VkBool32 *)&supported_features;
2653 VkBool32 *enabled_feature = (VkBool32 *)features;
2654 unsigned num_features = sizeof(VkPhysicalDeviceFeatures) / sizeof(VkBool32);
2655 for (uint32_t i = 0; i < num_features; i++) {
2656 if (enabled_feature[i] && !supported_feature[i])
2657 return vk_error(VK_ERROR_FEATURE_NOT_PRESENT);
2658 }
2659
2660 return VK_SUCCESS;
2661 }
2662
2663 VkResult anv_CreateDevice(
2664 VkPhysicalDevice physicalDevice,
2665 const VkDeviceCreateInfo* pCreateInfo,
2666 const VkAllocationCallbacks* pAllocator,
2667 VkDevice* pDevice)
2668 {
2669 ANV_FROM_HANDLE(anv_physical_device, physical_device, physicalDevice);
2670 VkResult result;
2671 struct anv_device *device;
2672
2673 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO);
2674
2675 struct anv_device_extension_table enabled_extensions = { };
2676 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
2677 int idx;
2678 for (idx = 0; idx < ANV_DEVICE_EXTENSION_COUNT; idx++) {
2679 if (strcmp(pCreateInfo->ppEnabledExtensionNames[i],
2680 anv_device_extensions[idx].extensionName) == 0)
2681 break;
2682 }
2683
2684 if (idx >= ANV_DEVICE_EXTENSION_COUNT)
2685 return vk_error(VK_ERROR_EXTENSION_NOT_PRESENT);
2686
2687 if (!physical_device->supported_extensions.extensions[idx])
2688 return vk_error(VK_ERROR_EXTENSION_NOT_PRESENT);
2689
2690 enabled_extensions.extensions[idx] = true;
2691 }
2692
2693 /* Check enabled features */
2694 bool robust_buffer_access = false;
2695 if (pCreateInfo->pEnabledFeatures) {
2696 result = check_physical_device_features(physicalDevice,
2697 pCreateInfo->pEnabledFeatures);
2698 if (result != VK_SUCCESS)
2699 return result;
2700
2701 if (pCreateInfo->pEnabledFeatures->robustBufferAccess)
2702 robust_buffer_access = true;
2703 }
2704
2705 vk_foreach_struct_const(ext, pCreateInfo->pNext) {
2706 switch (ext->sType) {
2707 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2: {
2708 const VkPhysicalDeviceFeatures2 *features = (const void *)ext;
2709 result = check_physical_device_features(physicalDevice,
2710 &features->features);
2711 if (result != VK_SUCCESS)
2712 return result;
2713
2714 if (features->features.robustBufferAccess)
2715 robust_buffer_access = true;
2716 break;
2717 }
2718
2719 default:
2720 /* Don't warn */
2721 break;
2722 }
2723 }
2724
2725 /* Check requested queues and fail if we are requested to create any
2726 * queues with flags we don't support.
2727 */
2728 assert(pCreateInfo->queueCreateInfoCount > 0);
2729 for (uint32_t i = 0; i < pCreateInfo->queueCreateInfoCount; i++) {
2730 if (pCreateInfo->pQueueCreateInfos[i].flags != 0)
2731 return vk_error(VK_ERROR_INITIALIZATION_FAILED);
2732 }
2733
2734 /* Check if client specified queue priority. */
2735 const VkDeviceQueueGlobalPriorityCreateInfoEXT *queue_priority =
2736 vk_find_struct_const(pCreateInfo->pQueueCreateInfos[0].pNext,
2737 DEVICE_QUEUE_GLOBAL_PRIORITY_CREATE_INFO_EXT);
2738
2739 VkQueueGlobalPriorityEXT priority =
2740 queue_priority ? queue_priority->globalPriority :
2741 VK_QUEUE_GLOBAL_PRIORITY_MEDIUM_EXT;
2742
2743 device = vk_alloc2(&physical_device->instance->alloc, pAllocator,
2744 sizeof(*device), 8,
2745 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
2746 if (!device)
2747 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
2748
2749 if (INTEL_DEBUG & DEBUG_BATCH) {
2750 const unsigned decode_flags =
2751 GEN_BATCH_DECODE_FULL |
2752 ((INTEL_DEBUG & DEBUG_COLOR) ? GEN_BATCH_DECODE_IN_COLOR : 0) |
2753 GEN_BATCH_DECODE_OFFSETS |
2754 GEN_BATCH_DECODE_FLOATS;
2755
2756 gen_batch_decode_ctx_init(&device->decoder_ctx,
2757 &physical_device->info,
2758 stderr, decode_flags, NULL,
2759 decode_get_bo, NULL, device);
2760 }
2761
2762 device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
2763 device->physical = physical_device;
2764 device->no_hw = physical_device->no_hw;
2765 device->_lost = false;
2766
2767 if (pAllocator)
2768 device->alloc = *pAllocator;
2769 else
2770 device->alloc = physical_device->instance->alloc;
2771
2772 /* XXX(chadv): Can we dup() physicalDevice->fd here? */
2773 device->fd = open(physical_device->path, O_RDWR | O_CLOEXEC);
2774 if (device->fd == -1) {
2775 result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
2776 goto fail_device;
2777 }
2778
2779 device->context_id = anv_gem_create_context(device);
2780 if (device->context_id == -1) {
2781 result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
2782 goto fail_fd;
2783 }
2784
2785 result = anv_queue_init(device, &device->queue);
2786 if (result != VK_SUCCESS)
2787 goto fail_context_id;
2788
2789 if (physical_device->use_softpin) {
2790 if (pthread_mutex_init(&device->vma_mutex, NULL) != 0) {
2791 result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
2792 goto fail_queue;
2793 }
2794
2795 /* keep the page with address zero out of the allocator */
2796 util_vma_heap_init(&device->vma_lo,
2797 LOW_HEAP_MIN_ADDRESS, LOW_HEAP_SIZE);
2798
2799 util_vma_heap_init(&device->vma_cva, CLIENT_VISIBLE_HEAP_MIN_ADDRESS,
2800 CLIENT_VISIBLE_HEAP_SIZE);
2801
2802 /* Leave the last 4GiB out of the high vma range, so that no state
2803 * base address + size can overflow 48 bits. For more information see
2804 * the comment about Wa32bitGeneralStateOffset in anv_allocator.c
2805 */
2806 util_vma_heap_init(&device->vma_hi, HIGH_HEAP_MIN_ADDRESS,
2807 physical_device->gtt_size - (1ull << 32) -
2808 HIGH_HEAP_MIN_ADDRESS);
2809 }
2810
2811 list_inithead(&device->memory_objects);
2812
2813 /* As per spec, the driver implementation may deny requests to acquire
2814 * a priority above the default priority (MEDIUM) if the caller does not
2815 * have sufficient privileges. In this scenario VK_ERROR_NOT_PERMITTED_EXT
2816 * is returned.
2817 */
2818 if (physical_device->has_context_priority) {
2819 int err = anv_gem_set_context_param(device->fd, device->context_id,
2820 I915_CONTEXT_PARAM_PRIORITY,
2821 vk_priority_to_gen(priority));
2822 if (err != 0 && priority > VK_QUEUE_GLOBAL_PRIORITY_MEDIUM_EXT) {
2823 result = vk_error(VK_ERROR_NOT_PERMITTED_EXT);
2824 goto fail_vmas;
2825 }
2826 }
2827
2828 device->info = physical_device->info;
2829 device->isl_dev = physical_device->isl_dev;
2830
2831 /* On Broadwell and later, we can use batch chaining to more efficiently
2832 * implement growing command buffers. Prior to Haswell, the kernel
2833 * command parser gets in the way and we have to fall back to growing
2834 * the batch.
2835 */
2836 device->can_chain_batches = device->info.gen >= 8;
2837
2838 device->robust_buffer_access = robust_buffer_access;
2839 device->enabled_extensions = enabled_extensions;
2840
2841 anv_device_init_dispatch(device);
2842
2843 if (pthread_mutex_init(&device->mutex, NULL) != 0) {
2844 result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
2845 goto fail_queue;
2846 }
2847
2848 pthread_condattr_t condattr;
2849 if (pthread_condattr_init(&condattr) != 0) {
2850 result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
2851 goto fail_mutex;
2852 }
2853 if (pthread_condattr_setclock(&condattr, CLOCK_MONOTONIC) != 0) {
2854 pthread_condattr_destroy(&condattr);
2855 result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
2856 goto fail_mutex;
2857 }
2858 if (pthread_cond_init(&device->queue_submit, &condattr) != 0) {
2859 pthread_condattr_destroy(&condattr);
2860 result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
2861 goto fail_mutex;
2862 }
2863 pthread_condattr_destroy(&condattr);
2864
2865 result = anv_bo_cache_init(&device->bo_cache);
2866 if (result != VK_SUCCESS)
2867 goto fail_queue_cond;
2868
2869 anv_bo_pool_init(&device->batch_bo_pool, device);
2870
2871 result = anv_state_pool_init(&device->dynamic_state_pool, device,
2872 DYNAMIC_STATE_POOL_MIN_ADDRESS, 16384);
2873 if (result != VK_SUCCESS)
2874 goto fail_batch_bo_pool;
2875
2876 result = anv_state_pool_init(&device->instruction_state_pool, device,
2877 INSTRUCTION_STATE_POOL_MIN_ADDRESS, 16384);
2878 if (result != VK_SUCCESS)
2879 goto fail_dynamic_state_pool;
2880
2881 result = anv_state_pool_init(&device->surface_state_pool, device,
2882 SURFACE_STATE_POOL_MIN_ADDRESS, 4096);
2883 if (result != VK_SUCCESS)
2884 goto fail_instruction_state_pool;
2885
2886 if (physical_device->use_softpin) {
2887 result = anv_state_pool_init(&device->binding_table_pool, device,
2888 BINDING_TABLE_POOL_MIN_ADDRESS, 4096);
2889 if (result != VK_SUCCESS)
2890 goto fail_surface_state_pool;
2891 }
2892
2893 if (device->info.gen >= 12) {
2894 device->aux_map_ctx = gen_aux_map_init(device, &aux_map_allocator,
2895 &physical_device->info);
2896 if (!device->aux_map_ctx)
2897 goto fail_binding_table_pool;
2898 }
2899
2900 result = anv_device_alloc_bo(device, 4096, 0 /* flags */,
2901 0 /* explicit_address */,
2902 &device->workaround_bo);
2903 if (result != VK_SUCCESS)
2904 goto fail_surface_aux_map_pool;
2905
2906 result = anv_device_init_trivial_batch(device);
2907 if (result != VK_SUCCESS)
2908 goto fail_workaround_bo;
2909
2910 /* Allocate a null surface state at surface state offset 0. This makes
2911 * NULL descriptor handling trivial because we can just memset structures
2912 * to zero and they have a valid descriptor.
2913 */
2914 device->null_surface_state =
2915 anv_state_pool_alloc(&device->surface_state_pool,
2916 device->isl_dev.ss.size,
2917 device->isl_dev.ss.align);
2918 isl_null_fill_state(&device->isl_dev, device->null_surface_state.map,
2919 isl_extent3d(1, 1, 1) /* This shouldn't matter */);
2920 assert(device->null_surface_state.offset == 0);
2921
2922 if (device->info.gen >= 10) {
2923 result = anv_device_init_hiz_clear_value_bo(device);
2924 if (result != VK_SUCCESS)
2925 goto fail_trivial_batch_bo;
2926 }
2927
2928 anv_scratch_pool_init(device, &device->scratch_pool);
2929
2930 switch (device->info.gen) {
2931 case 7:
2932 if (!device->info.is_haswell)
2933 result = gen7_init_device_state(device);
2934 else
2935 result = gen75_init_device_state(device);
2936 break;
2937 case 8:
2938 result = gen8_init_device_state(device);
2939 break;
2940 case 9:
2941 result = gen9_init_device_state(device);
2942 break;
2943 case 10:
2944 result = gen10_init_device_state(device);
2945 break;
2946 case 11:
2947 result = gen11_init_device_state(device);
2948 break;
2949 case 12:
2950 result = gen12_init_device_state(device);
2951 break;
2952 default:
2953 /* Shouldn't get here as we don't create physical devices for any other
2954 * gens. */
2955 unreachable("unhandled gen");
2956 }
2957 if (result != VK_SUCCESS)
2958 goto fail_workaround_bo;
2959
2960 anv_pipeline_cache_init(&device->default_pipeline_cache, device, true);
2961
2962 anv_device_init_blorp(device);
2963
2964 anv_device_init_border_colors(device);
2965
2966 anv_device_perf_init(device);
2967
2968 *pDevice = anv_device_to_handle(device);
2969
2970 return VK_SUCCESS;
2971
2972 fail_workaround_bo:
2973 anv_scratch_pool_finish(device, &device->scratch_pool);
2974 if (device->info.gen >= 10)
2975 anv_device_release_bo(device, device->hiz_clear_bo);
2976 anv_device_release_bo(device, device->workaround_bo);
2977 fail_trivial_batch_bo:
2978 anv_device_release_bo(device, device->trivial_batch_bo);
2979 fail_surface_aux_map_pool:
2980 if (device->info.gen >= 12) {
2981 gen_aux_map_finish(device->aux_map_ctx);
2982 device->aux_map_ctx = NULL;
2983 }
2984 fail_binding_table_pool:
2985 if (physical_device->use_softpin)
2986 anv_state_pool_finish(&device->binding_table_pool);
2987 fail_surface_state_pool:
2988 anv_state_pool_finish(&device->surface_state_pool);
2989 fail_instruction_state_pool:
2990 anv_state_pool_finish(&device->instruction_state_pool);
2991 fail_dynamic_state_pool:
2992 anv_state_pool_finish(&device->dynamic_state_pool);
2993 fail_batch_bo_pool:
2994 anv_bo_pool_finish(&device->batch_bo_pool);
2995 anv_bo_cache_finish(&device->bo_cache);
2996 fail_queue_cond:
2997 pthread_cond_destroy(&device->queue_submit);
2998 fail_mutex:
2999 pthread_mutex_destroy(&device->mutex);
3000 fail_vmas:
3001 if (physical_device->use_softpin) {
3002 util_vma_heap_finish(&device->vma_hi);
3003 util_vma_heap_finish(&device->vma_cva);
3004 util_vma_heap_finish(&device->vma_lo);
3005 }
3006 fail_queue:
3007 anv_queue_finish(&device->queue);
3008 fail_context_id:
3009 anv_gem_destroy_context(device, device->context_id);
3010 fail_fd:
3011 close(device->fd);
3012 fail_device:
3013 vk_free(&device->alloc, device);
3014
3015 return result;
3016 }
3017
3018 void anv_DestroyDevice(
3019 VkDevice _device,
3020 const VkAllocationCallbacks* pAllocator)
3021 {
3022 ANV_FROM_HANDLE(anv_device, device, _device);
3023
3024 if (!device)
3025 return;
3026
3027 anv_device_finish_blorp(device);
3028
3029 anv_pipeline_cache_finish(&device->default_pipeline_cache);
3030
3031 anv_queue_finish(&device->queue);
3032
3033 #ifdef HAVE_VALGRIND
3034 /* We only need to free these to prevent valgrind errors. The backing
3035 * BO will go away in a couple of lines so we don't actually leak.
3036 */
3037 anv_state_pool_free(&device->dynamic_state_pool, device->border_colors);
3038 anv_state_pool_free(&device->dynamic_state_pool, device->slice_hash);
3039 #endif
3040
3041 anv_scratch_pool_finish(device, &device->scratch_pool);
3042
3043 anv_device_release_bo(device, device->workaround_bo);
3044 anv_device_release_bo(device, device->trivial_batch_bo);
3045 if (device->info.gen >= 10)
3046 anv_device_release_bo(device, device->hiz_clear_bo);
3047
3048 if (device->info.gen >= 12) {
3049 gen_aux_map_finish(device->aux_map_ctx);
3050 device->aux_map_ctx = NULL;
3051 }
3052
3053 if (device->physical->use_softpin)
3054 anv_state_pool_finish(&device->binding_table_pool);
3055 anv_state_pool_finish(&device->surface_state_pool);
3056 anv_state_pool_finish(&device->instruction_state_pool);
3057 anv_state_pool_finish(&device->dynamic_state_pool);
3058
3059 anv_bo_pool_finish(&device->batch_bo_pool);
3060
3061 anv_bo_cache_finish(&device->bo_cache);
3062
3063 if (device->physical->use_softpin) {
3064 util_vma_heap_finish(&device->vma_hi);
3065 util_vma_heap_finish(&device->vma_cva);
3066 util_vma_heap_finish(&device->vma_lo);
3067 }
3068
3069 pthread_cond_destroy(&device->queue_submit);
3070 pthread_mutex_destroy(&device->mutex);
3071
3072 anv_gem_destroy_context(device, device->context_id);
3073
3074 if (INTEL_DEBUG & DEBUG_BATCH)
3075 gen_batch_decode_ctx_finish(&device->decoder_ctx);
3076
3077 close(device->fd);
3078
3079 vk_free(&device->alloc, device);
3080 }
3081
3082 VkResult anv_EnumerateInstanceLayerProperties(
3083 uint32_t* pPropertyCount,
3084 VkLayerProperties* pProperties)
3085 {
3086 if (pProperties == NULL) {
3087 *pPropertyCount = 0;
3088 return VK_SUCCESS;
3089 }
3090
3091 /* None supported at this time */
3092 return vk_error(VK_ERROR_LAYER_NOT_PRESENT);
3093 }
3094
3095 VkResult anv_EnumerateDeviceLayerProperties(
3096 VkPhysicalDevice physicalDevice,
3097 uint32_t* pPropertyCount,
3098 VkLayerProperties* pProperties)
3099 {
3100 if (pProperties == NULL) {
3101 *pPropertyCount = 0;
3102 return VK_SUCCESS;
3103 }
3104
3105 /* None supported at this time */
3106 return vk_error(VK_ERROR_LAYER_NOT_PRESENT);
3107 }
3108
3109 void anv_GetDeviceQueue(
3110 VkDevice _device,
3111 uint32_t queueNodeIndex,
3112 uint32_t queueIndex,
3113 VkQueue* pQueue)
3114 {
3115 const VkDeviceQueueInfo2 info = {
3116 .sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2,
3117 .pNext = NULL,
3118 .flags = 0,
3119 .queueFamilyIndex = queueNodeIndex,
3120 .queueIndex = queueIndex,
3121 };
3122
3123 anv_GetDeviceQueue2(_device, &info, pQueue);
3124 }
3125
3126 void anv_GetDeviceQueue2(
3127 VkDevice _device,
3128 const VkDeviceQueueInfo2* pQueueInfo,
3129 VkQueue* pQueue)
3130 {
3131 ANV_FROM_HANDLE(anv_device, device, _device);
3132
3133 assert(pQueueInfo->queueIndex == 0);
3134
3135 if (pQueueInfo->flags == device->queue.flags)
3136 *pQueue = anv_queue_to_handle(&device->queue);
3137 else
3138 *pQueue = NULL;
3139 }
3140
3141 VkResult
3142 _anv_device_set_lost(struct anv_device *device,
3143 const char *file, int line,
3144 const char *msg, ...)
3145 {
3146 VkResult err;
3147 va_list ap;
3148
3149 p_atomic_inc(&device->_lost);
3150
3151 va_start(ap, msg);
3152 err = __vk_errorv(device->physical->instance, device,
3153 VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
3154 VK_ERROR_DEVICE_LOST, file, line, msg, ap);
3155 va_end(ap);
3156
3157 if (env_var_as_boolean("ANV_ABORT_ON_DEVICE_LOSS", false))
3158 abort();
3159
3160 return err;
3161 }
3162
3163 VkResult
3164 _anv_queue_set_lost(struct anv_queue *queue,
3165 const char *file, int line,
3166 const char *msg, ...)
3167 {
3168 VkResult err;
3169 va_list ap;
3170
3171 p_atomic_inc(&queue->device->_lost);
3172
3173 va_start(ap, msg);
3174 err = __vk_errorv(queue->device->physical->instance, queue->device,
3175 VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
3176 VK_ERROR_DEVICE_LOST, file, line, msg, ap);
3177 va_end(ap);
3178
3179 if (env_var_as_boolean("ANV_ABORT_ON_DEVICE_LOSS", false))
3180 abort();
3181
3182 return err;
3183 }
3184
3185 VkResult
3186 anv_device_query_status(struct anv_device *device)
3187 {
3188 /* This isn't likely as most of the callers of this function already check
3189 * for it. However, it doesn't hurt to check and it potentially lets us
3190 * avoid an ioctl.
3191 */
3192 if (anv_device_is_lost(device))
3193 return VK_ERROR_DEVICE_LOST;
3194
3195 uint32_t active, pending;
3196 int ret = anv_gem_gpu_get_reset_stats(device, &active, &pending);
3197 if (ret == -1) {
3198 /* We don't know the real error. */
3199 return anv_device_set_lost(device, "get_reset_stats failed: %m");
3200 }
3201
3202 if (active) {
3203 return anv_device_set_lost(device, "GPU hung on one of our command buffers");
3204 } else if (pending) {
3205 return anv_device_set_lost(device, "GPU hung with commands in-flight");
3206 }
3207
3208 return VK_SUCCESS;
3209 }
3210
3211 VkResult
3212 anv_device_bo_busy(struct anv_device *device, struct anv_bo *bo)
3213 {
3214 /* Note: This only returns whether or not the BO is in use by an i915 GPU.
3215 * Other usages of the BO (such as on different hardware) will not be
3216 * flagged as "busy" by this ioctl. Use with care.
3217 */
3218 int ret = anv_gem_busy(device, bo->gem_handle);
3219 if (ret == 1) {
3220 return VK_NOT_READY;
3221 } else if (ret == -1) {
3222 /* We don't know the real error. */
3223 return anv_device_set_lost(device, "gem wait failed: %m");
3224 }
3225
3226 /* Query for device status after the busy call. If the BO we're checking
3227 * got caught in a GPU hang we don't want to return VK_SUCCESS to the
3228 * client because it clearly doesn't have valid data. Yes, this most
3229 * likely means an ioctl, but we just did an ioctl to query the busy status
3230 * so it's no great loss.
3231 */
3232 return anv_device_query_status(device);
3233 }
3234
3235 VkResult
3236 anv_device_wait(struct anv_device *device, struct anv_bo *bo,
3237 int64_t timeout)
3238 {
3239 int ret = anv_gem_wait(device, bo->gem_handle, &timeout);
3240 if (ret == -1 && errno == ETIME) {
3241 return VK_TIMEOUT;
3242 } else if (ret == -1) {
3243 /* We don't know the real error. */
3244 return anv_device_set_lost(device, "gem wait failed: %m");
3245 }
3246
3247 /* Query for device status after the wait. If the BO we're waiting on got
3248 * caught in a GPU hang we don't want to return VK_SUCCESS to the client
3249 * because it clearly doesn't have valid data. Yes, this most likely means
3250 * an ioctl, but we just did an ioctl to wait so it's no great loss.
3251 */
3252 return anv_device_query_status(device);
3253 }
3254
3255 VkResult anv_DeviceWaitIdle(
3256 VkDevice _device)
3257 {
3258 ANV_FROM_HANDLE(anv_device, device, _device);
3259
3260 if (anv_device_is_lost(device))
3261 return VK_ERROR_DEVICE_LOST;
3262
3263 return anv_queue_submit_simple_batch(&device->queue, NULL);
3264 }
3265
3266 uint64_t
3267 anv_vma_alloc(struct anv_device *device,
3268 uint64_t size, uint64_t align,
3269 enum anv_bo_alloc_flags alloc_flags,
3270 uint64_t client_address)
3271 {
3272 pthread_mutex_lock(&device->vma_mutex);
3273
3274 uint64_t addr = 0;
3275
3276 if (alloc_flags & ANV_BO_ALLOC_CLIENT_VISIBLE_ADDRESS) {
3277 if (client_address) {
3278 if (util_vma_heap_alloc_addr(&device->vma_cva,
3279 client_address, size)) {
3280 addr = client_address;
3281 }
3282 } else {
3283 addr = util_vma_heap_alloc(&device->vma_cva, size, align);
3284 }
3285 /* We don't want to fall back to other heaps */
3286 goto done;
3287 }
3288
3289 assert(client_address == 0);
3290
3291 if (!(alloc_flags & ANV_BO_ALLOC_32BIT_ADDRESS))
3292 addr = util_vma_heap_alloc(&device->vma_hi, size, align);
3293
3294 if (addr == 0)
3295 addr = util_vma_heap_alloc(&device->vma_lo, size, align);
3296
3297 done:
3298 pthread_mutex_unlock(&device->vma_mutex);
3299
3300 assert(addr == gen_48b_address(addr));
3301 return gen_canonical_address(addr);
3302 }
3303
3304 void
3305 anv_vma_free(struct anv_device *device,
3306 uint64_t address, uint64_t size)
3307 {
3308 const uint64_t addr_48b = gen_48b_address(address);
3309
3310 pthread_mutex_lock(&device->vma_mutex);
3311
3312 if (addr_48b >= LOW_HEAP_MIN_ADDRESS &&
3313 addr_48b <= LOW_HEAP_MAX_ADDRESS) {
3314 util_vma_heap_free(&device->vma_lo, addr_48b, size);
3315 } else if (addr_48b >= CLIENT_VISIBLE_HEAP_MIN_ADDRESS &&
3316 addr_48b <= CLIENT_VISIBLE_HEAP_MAX_ADDRESS) {
3317 util_vma_heap_free(&device->vma_cva, addr_48b, size);
3318 } else {
3319 assert(addr_48b >= HIGH_HEAP_MIN_ADDRESS);
3320 util_vma_heap_free(&device->vma_hi, addr_48b, size);
3321 }
3322
3323 pthread_mutex_unlock(&device->vma_mutex);
3324 }
3325
3326 VkResult anv_AllocateMemory(
3327 VkDevice _device,
3328 const VkMemoryAllocateInfo* pAllocateInfo,
3329 const VkAllocationCallbacks* pAllocator,
3330 VkDeviceMemory* pMem)
3331 {
3332 ANV_FROM_HANDLE(anv_device, device, _device);
3333 struct anv_physical_device *pdevice = device->physical;
3334 struct anv_device_memory *mem;
3335 VkResult result = VK_SUCCESS;
3336
3337 assert(pAllocateInfo->sType == VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO);
3338
3339 /* The Vulkan 1.0.33 spec says "allocationSize must be greater than 0". */
3340 assert(pAllocateInfo->allocationSize > 0);
3341
3342 VkDeviceSize aligned_alloc_size =
3343 align_u64(pAllocateInfo->allocationSize, 4096);
3344
3345 if (aligned_alloc_size > MAX_MEMORY_ALLOCATION_SIZE)
3346 return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
3347
3348 assert(pAllocateInfo->memoryTypeIndex < pdevice->memory.type_count);
3349 struct anv_memory_type *mem_type =
3350 &pdevice->memory.types[pAllocateInfo->memoryTypeIndex];
3351 assert(mem_type->heapIndex < pdevice->memory.heap_count);
3352 struct anv_memory_heap *mem_heap =
3353 &pdevice->memory.heaps[mem_type->heapIndex];
3354
3355 uint64_t mem_heap_used = p_atomic_read(&mem_heap->used);
3356 if (mem_heap_used + aligned_alloc_size > mem_heap->size)
3357 return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
3358
3359 mem = vk_alloc2(&device->alloc, pAllocator, sizeof(*mem), 8,
3360 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
3361 if (mem == NULL)
3362 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
3363
3364 assert(pAllocateInfo->memoryTypeIndex < pdevice->memory.type_count);
3365 mem->type = mem_type;
3366 mem->map = NULL;
3367 mem->map_size = 0;
3368 mem->ahw = NULL;
3369 mem->host_ptr = NULL;
3370
3371 enum anv_bo_alloc_flags alloc_flags = 0;
3372
3373 const VkExportMemoryAllocateInfo *export_info = NULL;
3374 const VkImportAndroidHardwareBufferInfoANDROID *ahw_import_info = NULL;
3375 const VkImportMemoryFdInfoKHR *fd_info = NULL;
3376 const VkImportMemoryHostPointerInfoEXT *host_ptr_info = NULL;
3377 const VkMemoryDedicatedAllocateInfo *dedicated_info = NULL;
3378 VkMemoryAllocateFlags vk_flags = 0;
3379 uint64_t client_address = 0;
3380
3381 vk_foreach_struct_const(ext, pAllocateInfo->pNext) {
3382 switch (ext->sType) {
3383 case VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO:
3384 export_info = (void *)ext;
3385 break;
3386
3387 case VK_STRUCTURE_TYPE_IMPORT_ANDROID_HARDWARE_BUFFER_INFO_ANDROID:
3388 ahw_import_info = (void *)ext;
3389 break;
3390
3391 case VK_STRUCTURE_TYPE_IMPORT_MEMORY_FD_INFO_KHR:
3392 fd_info = (void *)ext;
3393 break;
3394
3395 case VK_STRUCTURE_TYPE_IMPORT_MEMORY_HOST_POINTER_INFO_EXT:
3396 host_ptr_info = (void *)ext;
3397 break;
3398
3399 case VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO: {
3400 const VkMemoryAllocateFlagsInfo *flags_info = (void *)ext;
3401 vk_flags = flags_info->flags;
3402 break;
3403 }
3404
3405 case VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO:
3406 dedicated_info = (void *)ext;
3407 break;
3408
3409 case VK_STRUCTURE_TYPE_MEMORY_OPAQUE_CAPTURE_ADDRESS_ALLOCATE_INFO_KHR: {
3410 const VkMemoryOpaqueCaptureAddressAllocateInfoKHR *addr_info =
3411 (const VkMemoryOpaqueCaptureAddressAllocateInfoKHR *)ext;
3412 client_address = addr_info->opaqueCaptureAddress;
3413 break;
3414 }
3415
3416 default:
3417 anv_debug_ignored_stype(ext->sType);
3418 break;
3419 }
3420 }
3421
3422 /* By default, we want all VkDeviceMemory objects to support CCS */
3423 if (device->physical->has_implicit_ccs)
3424 alloc_flags |= ANV_BO_ALLOC_IMPLICIT_CCS;
3425
3426 if (vk_flags & VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT_KHR)
3427 alloc_flags |= ANV_BO_ALLOC_CLIENT_VISIBLE_ADDRESS;
3428
3429 if ((export_info && export_info->handleTypes) ||
3430 (fd_info && fd_info->handleType) ||
3431 (host_ptr_info && host_ptr_info->handleType)) {
3432 /* Anything imported or exported is EXTERNAL */
3433 alloc_flags |= ANV_BO_ALLOC_EXTERNAL;
3434
3435 /* We can't have implicit CCS on external memory with an AUX-table.
3436 * Doing so would require us to sync the aux tables across processes
3437 * which is impractical.
3438 */
3439 if (device->info.has_aux_map)
3440 alloc_flags &= ~ANV_BO_ALLOC_IMPLICIT_CCS;
3441 }
3442
3443 /* Check if we need to support Android HW buffer export. If so,
3444 * create AHardwareBuffer and import memory from it.
3445 */
3446 bool android_export = false;
3447 if (export_info && export_info->handleTypes &
3448 VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID)
3449 android_export = true;
3450
3451 if (ahw_import_info) {
3452 result = anv_import_ahw_memory(_device, mem, ahw_import_info);
3453 if (result != VK_SUCCESS)
3454 goto fail;
3455
3456 goto success;
3457 } else if (android_export) {
3458 result = anv_create_ahw_memory(_device, mem, pAllocateInfo);
3459 if (result != VK_SUCCESS)
3460 goto fail;
3461
3462 const VkImportAndroidHardwareBufferInfoANDROID import_info = {
3463 .buffer = mem->ahw,
3464 };
3465 result = anv_import_ahw_memory(_device, mem, &import_info);
3466 if (result != VK_SUCCESS)
3467 goto fail;
3468
3469 goto success;
3470 }
3471
3472 /* The Vulkan spec permits handleType to be 0, in which case the struct is
3473 * ignored.
3474 */
3475 if (fd_info && fd_info->handleType) {
3476 /* At the moment, we support only the below handle types. */
3477 assert(fd_info->handleType ==
3478 VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT ||
3479 fd_info->handleType ==
3480 VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
3481
3482 result = anv_device_import_bo(device, fd_info->fd, alloc_flags,
3483 client_address, &mem->bo);
3484 if (result != VK_SUCCESS)
3485 goto fail;
3486
3487 /* For security purposes, we reject importing the bo if it's smaller
3488 * than the requested allocation size. This prevents a malicious client
3489 * from passing a buffer to a trusted client, lying about the size, and
3490 * telling the trusted client to try and texture from an image that goes
3491 * out-of-bounds. This sort of thing could lead to GPU hangs or worse
3492 * in the trusted client. The trusted client can protect itself against
3493 * this sort of attack but only if it can trust the buffer size.
3494 */
3495 if (mem->bo->size < aligned_alloc_size) {
3496 result = vk_errorf(device, device, VK_ERROR_INVALID_EXTERNAL_HANDLE,
3497 "aligned allocationSize too large for "
3498 "VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT: "
3499 "%"PRIu64"B > %"PRIu64"B",
3500 aligned_alloc_size, mem->bo->size);
3501 anv_device_release_bo(device, mem->bo);
3502 goto fail;
3503 }
3504
3505 /* From the Vulkan spec:
3506 *
3507 * "Importing memory from a file descriptor transfers ownership of
3508 * the file descriptor from the application to the Vulkan
3509 * implementation. The application must not perform any operations on
3510 * the file descriptor after a successful import."
3511 *
3512 * If the import fails, we leave the file descriptor open.
3513 */
3514 close(fd_info->fd);
3515 goto success;
3516 }
3517
3518 if (host_ptr_info && host_ptr_info->handleType) {
3519 if (host_ptr_info->handleType ==
3520 VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_MAPPED_FOREIGN_MEMORY_BIT_EXT) {
3521 result = vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
3522 goto fail;
3523 }
3524
3525 assert(host_ptr_info->handleType ==
3526 VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT);
3527
3528 result = anv_device_import_bo_from_host_ptr(device,
3529 host_ptr_info->pHostPointer,
3530 pAllocateInfo->allocationSize,
3531 alloc_flags,
3532 client_address,
3533 &mem->bo);
3534 if (result != VK_SUCCESS)
3535 goto fail;
3536
3537 mem->host_ptr = host_ptr_info->pHostPointer;
3538 goto success;
3539 }
3540
3541 /* Regular allocate (not importing memory). */
3542
3543 result = anv_device_alloc_bo(device, pAllocateInfo->allocationSize,
3544 alloc_flags, client_address, &mem->bo);
3545 if (result != VK_SUCCESS)
3546 goto fail;
3547
3548 if (dedicated_info && dedicated_info->image != VK_NULL_HANDLE) {
3549 ANV_FROM_HANDLE(anv_image, image, dedicated_info->image);
3550
3551 /* Some legacy (non-modifiers) consumers need the tiling to be set on
3552 * the BO. In this case, we have a dedicated allocation.
3553 */
3554 if (image->needs_set_tiling) {
3555 const uint32_t i915_tiling =
3556 isl_tiling_to_i915_tiling(image->planes[0].surface.isl.tiling);
3557 int ret = anv_gem_set_tiling(device, mem->bo->gem_handle,
3558 image->planes[0].surface.isl.row_pitch_B,
3559 i915_tiling);
3560 if (ret) {
3561 anv_device_release_bo(device, mem->bo);
3562 result = vk_errorf(device, device, VK_ERROR_OUT_OF_DEVICE_MEMORY,
3563 "failed to set BO tiling: %m");
3564 goto fail;
3565 }
3566 }
3567 }
3568
3569 success:
3570 mem_heap_used = p_atomic_add_return(&mem_heap->used, mem->bo->size);
3571 if (mem_heap_used > mem_heap->size) {
3572 p_atomic_add(&mem_heap->used, -mem->bo->size);
3573 anv_device_release_bo(device, mem->bo);
3574 result = vk_errorf(device, device, VK_ERROR_OUT_OF_DEVICE_MEMORY,
3575 "Out of heap memory");
3576 goto fail;
3577 }
3578
3579 pthread_mutex_lock(&device->mutex);
3580 list_addtail(&mem->link, &device->memory_objects);
3581 pthread_mutex_unlock(&device->mutex);
3582
3583 *pMem = anv_device_memory_to_handle(mem);
3584
3585 return VK_SUCCESS;
3586
3587 fail:
3588 vk_free2(&device->alloc, pAllocator, mem);
3589
3590 return result;
3591 }
3592
3593 VkResult anv_GetMemoryFdKHR(
3594 VkDevice device_h,
3595 const VkMemoryGetFdInfoKHR* pGetFdInfo,
3596 int* pFd)
3597 {
3598 ANV_FROM_HANDLE(anv_device, dev, device_h);
3599 ANV_FROM_HANDLE(anv_device_memory, mem, pGetFdInfo->memory);
3600
3601 assert(pGetFdInfo->sType == VK_STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR);
3602
3603 assert(pGetFdInfo->handleType == VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT ||
3604 pGetFdInfo->handleType == VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
3605
3606 return anv_device_export_bo(dev, mem->bo, pFd);
3607 }
3608
3609 VkResult anv_GetMemoryFdPropertiesKHR(
3610 VkDevice _device,
3611 VkExternalMemoryHandleTypeFlagBits handleType,
3612 int fd,
3613 VkMemoryFdPropertiesKHR* pMemoryFdProperties)
3614 {
3615 ANV_FROM_HANDLE(anv_device, device, _device);
3616
3617 switch (handleType) {
3618 case VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT:
3619 /* dma-buf can be imported as any memory type */
3620 pMemoryFdProperties->memoryTypeBits =
3621 (1 << device->physical->memory.type_count) - 1;
3622 return VK_SUCCESS;
3623
3624 default:
3625 /* The valid usage section for this function says:
3626 *
3627 * "handleType must not be one of the handle types defined as
3628 * opaque."
3629 *
3630 * So opaque handle types fall into the default "unsupported" case.
3631 */
3632 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
3633 }
3634 }
3635
3636 VkResult anv_GetMemoryHostPointerPropertiesEXT(
3637 VkDevice _device,
3638 VkExternalMemoryHandleTypeFlagBits handleType,
3639 const void* pHostPointer,
3640 VkMemoryHostPointerPropertiesEXT* pMemoryHostPointerProperties)
3641 {
3642 ANV_FROM_HANDLE(anv_device, device, _device);
3643
3644 assert(pMemoryHostPointerProperties->sType ==
3645 VK_STRUCTURE_TYPE_MEMORY_HOST_POINTER_PROPERTIES_EXT);
3646
3647 switch (handleType) {
3648 case VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT:
3649 /* Host memory can be imported as any memory type. */
3650 pMemoryHostPointerProperties->memoryTypeBits =
3651 (1ull << device->physical->memory.type_count) - 1;
3652
3653 return VK_SUCCESS;
3654
3655 default:
3656 return VK_ERROR_INVALID_EXTERNAL_HANDLE;
3657 }
3658 }
3659
3660 void anv_FreeMemory(
3661 VkDevice _device,
3662 VkDeviceMemory _mem,
3663 const VkAllocationCallbacks* pAllocator)
3664 {
3665 ANV_FROM_HANDLE(anv_device, device, _device);
3666 ANV_FROM_HANDLE(anv_device_memory, mem, _mem);
3667
3668 if (mem == NULL)
3669 return;
3670
3671 pthread_mutex_lock(&device->mutex);
3672 list_del(&mem->link);
3673 pthread_mutex_unlock(&device->mutex);
3674
3675 if (mem->map)
3676 anv_UnmapMemory(_device, _mem);
3677
3678 p_atomic_add(&device->physical->memory.heaps[mem->type->heapIndex].used,
3679 -mem->bo->size);
3680
3681 anv_device_release_bo(device, mem->bo);
3682
3683 #if defined(ANDROID) && ANDROID_API_LEVEL >= 26
3684 if (mem->ahw)
3685 AHardwareBuffer_release(mem->ahw);
3686 #endif
3687
3688 vk_free2(&device->alloc, pAllocator, mem);
3689 }
3690
3691 VkResult anv_MapMemory(
3692 VkDevice _device,
3693 VkDeviceMemory _memory,
3694 VkDeviceSize offset,
3695 VkDeviceSize size,
3696 VkMemoryMapFlags flags,
3697 void** ppData)
3698 {
3699 ANV_FROM_HANDLE(anv_device, device, _device);
3700 ANV_FROM_HANDLE(anv_device_memory, mem, _memory);
3701
3702 if (mem == NULL) {
3703 *ppData = NULL;
3704 return VK_SUCCESS;
3705 }
3706
3707 if (mem->host_ptr) {
3708 *ppData = mem->host_ptr + offset;
3709 return VK_SUCCESS;
3710 }
3711
3712 if (size == VK_WHOLE_SIZE)
3713 size = mem->bo->size - offset;
3714
3715 /* From the Vulkan spec version 1.0.32 docs for MapMemory:
3716 *
3717 * * If size is not equal to VK_WHOLE_SIZE, size must be greater than 0
3718 * assert(size != 0);
3719 * * If size is not equal to VK_WHOLE_SIZE, size must be less than or
3720 * equal to the size of the memory minus offset
3721 */
3722 assert(size > 0);
3723 assert(offset + size <= mem->bo->size);
3724
3725 /* FIXME: Is this supposed to be thread safe? Since vkUnmapMemory() only
3726 * takes a VkDeviceMemory pointer, it seems like only one map of the memory
3727 * at a time is valid. We could just mmap up front and return an offset
3728 * pointer here, but that may exhaust virtual memory on 32 bit
3729 * userspace. */
3730
3731 uint32_t gem_flags = 0;
3732
3733 if (!device->info.has_llc &&
3734 (mem->type->propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT))
3735 gem_flags |= I915_MMAP_WC;
3736
3737 /* GEM will fail to map if the offset isn't 4k-aligned. Round down. */
3738 uint64_t map_offset;
3739 if (!device->physical->has_mmap_offset)
3740 map_offset = offset & ~4095ull;
3741 else
3742 map_offset = 0;
3743 assert(offset >= map_offset);
3744 uint64_t map_size = (offset + size) - map_offset;
3745
3746 /* Let's map whole pages */
3747 map_size = align_u64(map_size, 4096);
3748
3749 void *map = anv_gem_mmap(device, mem->bo->gem_handle,
3750 map_offset, map_size, gem_flags);
3751 if (map == MAP_FAILED)
3752 return vk_error(VK_ERROR_MEMORY_MAP_FAILED);
3753
3754 mem->map = map;
3755 mem->map_size = map_size;
3756
3757 *ppData = mem->map + (offset - map_offset);
3758
3759 return VK_SUCCESS;
3760 }
3761
3762 void anv_UnmapMemory(
3763 VkDevice _device,
3764 VkDeviceMemory _memory)
3765 {
3766 ANV_FROM_HANDLE(anv_device, device, _device);
3767 ANV_FROM_HANDLE(anv_device_memory, mem, _memory);
3768
3769 if (mem == NULL || mem->host_ptr)
3770 return;
3771
3772 anv_gem_munmap(device, mem->map, mem->map_size);
3773
3774 mem->map = NULL;
3775 mem->map_size = 0;
3776 }
3777
3778 static void
3779 clflush_mapped_ranges(struct anv_device *device,
3780 uint32_t count,
3781 const VkMappedMemoryRange *ranges)
3782 {
3783 for (uint32_t i = 0; i < count; i++) {
3784 ANV_FROM_HANDLE(anv_device_memory, mem, ranges[i].memory);
3785 if (ranges[i].offset >= mem->map_size)
3786 continue;
3787
3788 gen_clflush_range(mem->map + ranges[i].offset,
3789 MIN2(ranges[i].size, mem->map_size - ranges[i].offset));
3790 }
3791 }
3792
3793 VkResult anv_FlushMappedMemoryRanges(
3794 VkDevice _device,
3795 uint32_t memoryRangeCount,
3796 const VkMappedMemoryRange* pMemoryRanges)
3797 {
3798 ANV_FROM_HANDLE(anv_device, device, _device);
3799
3800 if (device->info.has_llc)
3801 return VK_SUCCESS;
3802
3803 /* Make sure the writes we're flushing have landed. */
3804 __builtin_ia32_mfence();
3805
3806 clflush_mapped_ranges(device, memoryRangeCount, pMemoryRanges);
3807
3808 return VK_SUCCESS;
3809 }
3810
3811 VkResult anv_InvalidateMappedMemoryRanges(
3812 VkDevice _device,
3813 uint32_t memoryRangeCount,
3814 const VkMappedMemoryRange* pMemoryRanges)
3815 {
3816 ANV_FROM_HANDLE(anv_device, device, _device);
3817
3818 if (device->info.has_llc)
3819 return VK_SUCCESS;
3820
3821 clflush_mapped_ranges(device, memoryRangeCount, pMemoryRanges);
3822
3823 /* Make sure no reads get moved up above the invalidate. */
3824 __builtin_ia32_mfence();
3825
3826 return VK_SUCCESS;
3827 }
3828
3829 void anv_GetBufferMemoryRequirements(
3830 VkDevice _device,
3831 VkBuffer _buffer,
3832 VkMemoryRequirements* pMemoryRequirements)
3833 {
3834 ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
3835 ANV_FROM_HANDLE(anv_device, device, _device);
3836
3837 /* The Vulkan spec (git aaed022) says:
3838 *
3839 * memoryTypeBits is a bitfield and contains one bit set for every
3840 * supported memory type for the resource. The bit `1<<i` is set if and
3841 * only if the memory type `i` in the VkPhysicalDeviceMemoryProperties
3842 * structure for the physical device is supported.
3843 */
3844 uint32_t memory_types = (1ull << device->physical->memory.type_count) - 1;
3845
3846 /* Base alignment requirement of a cache line */
3847 uint32_t alignment = 16;
3848
3849 /* We need an alignment of 32 for pushing UBOs */
3850 if (buffer->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT)
3851 alignment = MAX2(alignment, 32);
3852
3853 pMemoryRequirements->size = buffer->size;
3854 pMemoryRequirements->alignment = alignment;
3855
3856 /* Storage and Uniform buffers should have their size aligned to
3857 * 32-bits to avoid boundary checks when last DWord is not complete.
3858 * This would ensure that not internal padding would be needed for
3859 * 16-bit types.
3860 */
3861 if (device->robust_buffer_access &&
3862 (buffer->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT ||
3863 buffer->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT))
3864 pMemoryRequirements->size = align_u64(buffer->size, 4);
3865
3866 pMemoryRequirements->memoryTypeBits = memory_types;
3867 }
3868
3869 void anv_GetBufferMemoryRequirements2(
3870 VkDevice _device,
3871 const VkBufferMemoryRequirementsInfo2* pInfo,
3872 VkMemoryRequirements2* pMemoryRequirements)
3873 {
3874 anv_GetBufferMemoryRequirements(_device, pInfo->buffer,
3875 &pMemoryRequirements->memoryRequirements);
3876
3877 vk_foreach_struct(ext, pMemoryRequirements->pNext) {
3878 switch (ext->sType) {
3879 case VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS: {
3880 VkMemoryDedicatedRequirements *requirements = (void *)ext;
3881 requirements->prefersDedicatedAllocation = false;
3882 requirements->requiresDedicatedAllocation = false;
3883 break;
3884 }
3885
3886 default:
3887 anv_debug_ignored_stype(ext->sType);
3888 break;
3889 }
3890 }
3891 }
3892
3893 void anv_GetImageMemoryRequirements(
3894 VkDevice _device,
3895 VkImage _image,
3896 VkMemoryRequirements* pMemoryRequirements)
3897 {
3898 ANV_FROM_HANDLE(anv_image, image, _image);
3899 ANV_FROM_HANDLE(anv_device, device, _device);
3900
3901 /* The Vulkan spec (git aaed022) says:
3902 *
3903 * memoryTypeBits is a bitfield and contains one bit set for every
3904 * supported memory type for the resource. The bit `1<<i` is set if and
3905 * only if the memory type `i` in the VkPhysicalDeviceMemoryProperties
3906 * structure for the physical device is supported.
3907 *
3908 * All types are currently supported for images.
3909 */
3910 uint32_t memory_types = (1ull << device->physical->memory.type_count) - 1;
3911
3912 pMemoryRequirements->size = image->size;
3913 pMemoryRequirements->alignment = image->alignment;
3914 pMemoryRequirements->memoryTypeBits = memory_types;
3915 }
3916
3917 void anv_GetImageMemoryRequirements2(
3918 VkDevice _device,
3919 const VkImageMemoryRequirementsInfo2* pInfo,
3920 VkMemoryRequirements2* pMemoryRequirements)
3921 {
3922 ANV_FROM_HANDLE(anv_device, device, _device);
3923 ANV_FROM_HANDLE(anv_image, image, pInfo->image);
3924
3925 anv_GetImageMemoryRequirements(_device, pInfo->image,
3926 &pMemoryRequirements->memoryRequirements);
3927
3928 vk_foreach_struct_const(ext, pInfo->pNext) {
3929 switch (ext->sType) {
3930 case VK_STRUCTURE_TYPE_IMAGE_PLANE_MEMORY_REQUIREMENTS_INFO: {
3931 const VkImagePlaneMemoryRequirementsInfo *plane_reqs =
3932 (const VkImagePlaneMemoryRequirementsInfo *) ext;
3933 uint32_t plane = anv_image_aspect_to_plane(image->aspects,
3934 plane_reqs->planeAspect);
3935
3936 assert(image->planes[plane].offset == 0);
3937
3938 /* The Vulkan spec (git aaed022) says:
3939 *
3940 * memoryTypeBits is a bitfield and contains one bit set for every
3941 * supported memory type for the resource. The bit `1<<i` is set
3942 * if and only if the memory type `i` in the
3943 * VkPhysicalDeviceMemoryProperties structure for the physical
3944 * device is supported.
3945 *
3946 * All types are currently supported for images.
3947 */
3948 pMemoryRequirements->memoryRequirements.memoryTypeBits =
3949 (1ull << device->physical->memory.type_count) - 1;
3950
3951 pMemoryRequirements->memoryRequirements.size = image->planes[plane].size;
3952 pMemoryRequirements->memoryRequirements.alignment =
3953 image->planes[plane].alignment;
3954 break;
3955 }
3956
3957 default:
3958 anv_debug_ignored_stype(ext->sType);
3959 break;
3960 }
3961 }
3962
3963 vk_foreach_struct(ext, pMemoryRequirements->pNext) {
3964 switch (ext->sType) {
3965 case VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS: {
3966 VkMemoryDedicatedRequirements *requirements = (void *)ext;
3967 if (image->needs_set_tiling || image->external_format) {
3968 /* If we need to set the tiling for external consumers, we need a
3969 * dedicated allocation.
3970 *
3971 * See also anv_AllocateMemory.
3972 */
3973 requirements->prefersDedicatedAllocation = true;
3974 requirements->requiresDedicatedAllocation = true;
3975 } else {
3976 requirements->prefersDedicatedAllocation = false;
3977 requirements->requiresDedicatedAllocation = false;
3978 }
3979 break;
3980 }
3981
3982 default:
3983 anv_debug_ignored_stype(ext->sType);
3984 break;
3985 }
3986 }
3987 }
3988
3989 void anv_GetImageSparseMemoryRequirements(
3990 VkDevice device,
3991 VkImage image,
3992 uint32_t* pSparseMemoryRequirementCount,
3993 VkSparseImageMemoryRequirements* pSparseMemoryRequirements)
3994 {
3995 *pSparseMemoryRequirementCount = 0;
3996 }
3997
3998 void anv_GetImageSparseMemoryRequirements2(
3999 VkDevice device,
4000 const VkImageSparseMemoryRequirementsInfo2* pInfo,
4001 uint32_t* pSparseMemoryRequirementCount,
4002 VkSparseImageMemoryRequirements2* pSparseMemoryRequirements)
4003 {
4004 *pSparseMemoryRequirementCount = 0;
4005 }
4006
4007 void anv_GetDeviceMemoryCommitment(
4008 VkDevice device,
4009 VkDeviceMemory memory,
4010 VkDeviceSize* pCommittedMemoryInBytes)
4011 {
4012 *pCommittedMemoryInBytes = 0;
4013 }
4014
4015 static void
4016 anv_bind_buffer_memory(const VkBindBufferMemoryInfo *pBindInfo)
4017 {
4018 ANV_FROM_HANDLE(anv_device_memory, mem, pBindInfo->memory);
4019 ANV_FROM_HANDLE(anv_buffer, buffer, pBindInfo->buffer);
4020
4021 assert(pBindInfo->sType == VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO);
4022
4023 if (mem) {
4024 buffer->address = (struct anv_address) {
4025 .bo = mem->bo,
4026 .offset = pBindInfo->memoryOffset,
4027 };
4028 } else {
4029 buffer->address = ANV_NULL_ADDRESS;
4030 }
4031 }
4032
4033 VkResult anv_BindBufferMemory(
4034 VkDevice device,
4035 VkBuffer buffer,
4036 VkDeviceMemory memory,
4037 VkDeviceSize memoryOffset)
4038 {
4039 anv_bind_buffer_memory(
4040 &(VkBindBufferMemoryInfo) {
4041 .sType = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO,
4042 .buffer = buffer,
4043 .memory = memory,
4044 .memoryOffset = memoryOffset,
4045 });
4046
4047 return VK_SUCCESS;
4048 }
4049
4050 VkResult anv_BindBufferMemory2(
4051 VkDevice device,
4052 uint32_t bindInfoCount,
4053 const VkBindBufferMemoryInfo* pBindInfos)
4054 {
4055 for (uint32_t i = 0; i < bindInfoCount; i++)
4056 anv_bind_buffer_memory(&pBindInfos[i]);
4057
4058 return VK_SUCCESS;
4059 }
4060
4061 VkResult anv_QueueBindSparse(
4062 VkQueue _queue,
4063 uint32_t bindInfoCount,
4064 const VkBindSparseInfo* pBindInfo,
4065 VkFence fence)
4066 {
4067 ANV_FROM_HANDLE(anv_queue, queue, _queue);
4068 if (anv_device_is_lost(queue->device))
4069 return VK_ERROR_DEVICE_LOST;
4070
4071 return vk_error(VK_ERROR_FEATURE_NOT_PRESENT);
4072 }
4073
4074 // Event functions
4075
4076 VkResult anv_CreateEvent(
4077 VkDevice _device,
4078 const VkEventCreateInfo* pCreateInfo,
4079 const VkAllocationCallbacks* pAllocator,
4080 VkEvent* pEvent)
4081 {
4082 ANV_FROM_HANDLE(anv_device, device, _device);
4083 struct anv_state state;
4084 struct anv_event *event;
4085
4086 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_EVENT_CREATE_INFO);
4087
4088 state = anv_state_pool_alloc(&device->dynamic_state_pool,
4089 sizeof(*event), 8);
4090 event = state.map;
4091 event->state = state;
4092 event->semaphore = VK_EVENT_RESET;
4093
4094 if (!device->info.has_llc) {
4095 /* Make sure the writes we're flushing have landed. */
4096 __builtin_ia32_mfence();
4097 __builtin_ia32_clflush(event);
4098 }
4099
4100 *pEvent = anv_event_to_handle(event);
4101
4102 return VK_SUCCESS;
4103 }
4104
4105 void anv_DestroyEvent(
4106 VkDevice _device,
4107 VkEvent _event,
4108 const VkAllocationCallbacks* pAllocator)
4109 {
4110 ANV_FROM_HANDLE(anv_device, device, _device);
4111 ANV_FROM_HANDLE(anv_event, event, _event);
4112
4113 if (!event)
4114 return;
4115
4116 anv_state_pool_free(&device->dynamic_state_pool, event->state);
4117 }
4118
4119 VkResult anv_GetEventStatus(
4120 VkDevice _device,
4121 VkEvent _event)
4122 {
4123 ANV_FROM_HANDLE(anv_device, device, _device);
4124 ANV_FROM_HANDLE(anv_event, event, _event);
4125
4126 if (anv_device_is_lost(device))
4127 return VK_ERROR_DEVICE_LOST;
4128
4129 if (!device->info.has_llc) {
4130 /* Invalidate read cache before reading event written by GPU. */
4131 __builtin_ia32_clflush(event);
4132 __builtin_ia32_mfence();
4133
4134 }
4135
4136 return event->semaphore;
4137 }
4138
4139 VkResult anv_SetEvent(
4140 VkDevice _device,
4141 VkEvent _event)
4142 {
4143 ANV_FROM_HANDLE(anv_device, device, _device);
4144 ANV_FROM_HANDLE(anv_event, event, _event);
4145
4146 event->semaphore = VK_EVENT_SET;
4147
4148 if (!device->info.has_llc) {
4149 /* Make sure the writes we're flushing have landed. */
4150 __builtin_ia32_mfence();
4151 __builtin_ia32_clflush(event);
4152 }
4153
4154 return VK_SUCCESS;
4155 }
4156
4157 VkResult anv_ResetEvent(
4158 VkDevice _device,
4159 VkEvent _event)
4160 {
4161 ANV_FROM_HANDLE(anv_device, device, _device);
4162 ANV_FROM_HANDLE(anv_event, event, _event);
4163
4164 event->semaphore = VK_EVENT_RESET;
4165
4166 if (!device->info.has_llc) {
4167 /* Make sure the writes we're flushing have landed. */
4168 __builtin_ia32_mfence();
4169 __builtin_ia32_clflush(event);
4170 }
4171
4172 return VK_SUCCESS;
4173 }
4174
4175 // Buffer functions
4176
4177 VkResult anv_CreateBuffer(
4178 VkDevice _device,
4179 const VkBufferCreateInfo* pCreateInfo,
4180 const VkAllocationCallbacks* pAllocator,
4181 VkBuffer* pBuffer)
4182 {
4183 ANV_FROM_HANDLE(anv_device, device, _device);
4184 struct anv_buffer *buffer;
4185
4186 /* Don't allow creating buffers bigger than our address space. The real
4187 * issue here is that we may align up the buffer size and we don't want
4188 * doing so to cause roll-over. However, no one has any business
4189 * allocating a buffer larger than our GTT size.
4190 */
4191 if (pCreateInfo->size > device->physical->gtt_size)
4192 return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
4193
4194 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO);
4195
4196 buffer = vk_alloc2(&device->alloc, pAllocator, sizeof(*buffer), 8,
4197 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
4198 if (buffer == NULL)
4199 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
4200
4201 buffer->size = pCreateInfo->size;
4202 buffer->usage = pCreateInfo->usage;
4203 buffer->address = ANV_NULL_ADDRESS;
4204
4205 *pBuffer = anv_buffer_to_handle(buffer);
4206
4207 return VK_SUCCESS;
4208 }
4209
4210 void anv_DestroyBuffer(
4211 VkDevice _device,
4212 VkBuffer _buffer,
4213 const VkAllocationCallbacks* pAllocator)
4214 {
4215 ANV_FROM_HANDLE(anv_device, device, _device);
4216 ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
4217
4218 if (!buffer)
4219 return;
4220
4221 vk_free2(&device->alloc, pAllocator, buffer);
4222 }
4223
4224 VkDeviceAddress anv_GetBufferDeviceAddress(
4225 VkDevice device,
4226 const VkBufferDeviceAddressInfoKHR* pInfo)
4227 {
4228 ANV_FROM_HANDLE(anv_buffer, buffer, pInfo->buffer);
4229
4230 assert(!anv_address_is_null(buffer->address));
4231 assert(buffer->address.bo->flags & EXEC_OBJECT_PINNED);
4232
4233 return anv_address_physical(buffer->address);
4234 }
4235
4236 uint64_t anv_GetBufferOpaqueCaptureAddress(
4237 VkDevice device,
4238 const VkBufferDeviceAddressInfoKHR* pInfo)
4239 {
4240 return 0;
4241 }
4242
4243 uint64_t anv_GetDeviceMemoryOpaqueCaptureAddress(
4244 VkDevice device,
4245 const VkDeviceMemoryOpaqueCaptureAddressInfoKHR* pInfo)
4246 {
4247 ANV_FROM_HANDLE(anv_device_memory, memory, pInfo->memory);
4248
4249 assert(memory->bo->flags & EXEC_OBJECT_PINNED);
4250 assert(memory->bo->has_client_visible_address);
4251
4252 return gen_48b_address(memory->bo->offset);
4253 }
4254
4255 void
4256 anv_fill_buffer_surface_state(struct anv_device *device, struct anv_state state,
4257 enum isl_format format,
4258 struct anv_address address,
4259 uint32_t range, uint32_t stride)
4260 {
4261 isl_buffer_fill_state(&device->isl_dev, state.map,
4262 .address = anv_address_physical(address),
4263 .mocs = device->isl_dev.mocs.internal,
4264 .size_B = range,
4265 .format = format,
4266 .swizzle = ISL_SWIZZLE_IDENTITY,
4267 .stride_B = stride);
4268 }
4269
4270 void anv_DestroySampler(
4271 VkDevice _device,
4272 VkSampler _sampler,
4273 const VkAllocationCallbacks* pAllocator)
4274 {
4275 ANV_FROM_HANDLE(anv_device, device, _device);
4276 ANV_FROM_HANDLE(anv_sampler, sampler, _sampler);
4277
4278 if (!sampler)
4279 return;
4280
4281 if (sampler->bindless_state.map) {
4282 anv_state_pool_free(&device->dynamic_state_pool,
4283 sampler->bindless_state);
4284 }
4285
4286 vk_free2(&device->alloc, pAllocator, sampler);
4287 }
4288
4289 VkResult anv_CreateFramebuffer(
4290 VkDevice _device,
4291 const VkFramebufferCreateInfo* pCreateInfo,
4292 const VkAllocationCallbacks* pAllocator,
4293 VkFramebuffer* pFramebuffer)
4294 {
4295 ANV_FROM_HANDLE(anv_device, device, _device);
4296 struct anv_framebuffer *framebuffer;
4297
4298 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO);
4299
4300 size_t size = sizeof(*framebuffer);
4301
4302 /* VK_KHR_imageless_framebuffer extension says:
4303 *
4304 * If flags includes VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT_KHR,
4305 * parameter pAttachments is ignored.
4306 */
4307 if (!(pCreateInfo->flags & VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT_KHR)) {
4308 size += sizeof(struct anv_image_view *) * pCreateInfo->attachmentCount;
4309 framebuffer = vk_alloc2(&device->alloc, pAllocator, size, 8,
4310 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
4311 if (framebuffer == NULL)
4312 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
4313
4314 for (uint32_t i = 0; i < pCreateInfo->attachmentCount; i++) {
4315 ANV_FROM_HANDLE(anv_image_view, iview, pCreateInfo->pAttachments[i]);
4316 framebuffer->attachments[i] = iview;
4317 }
4318 framebuffer->attachment_count = pCreateInfo->attachmentCount;
4319 } else {
4320 framebuffer = vk_alloc2(&device->alloc, pAllocator, size, 8,
4321 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
4322 if (framebuffer == NULL)
4323 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
4324
4325 framebuffer->attachment_count = 0;
4326 }
4327
4328 framebuffer->width = pCreateInfo->width;
4329 framebuffer->height = pCreateInfo->height;
4330 framebuffer->layers = pCreateInfo->layers;
4331
4332 *pFramebuffer = anv_framebuffer_to_handle(framebuffer);
4333
4334 return VK_SUCCESS;
4335 }
4336
4337 void anv_DestroyFramebuffer(
4338 VkDevice _device,
4339 VkFramebuffer _fb,
4340 const VkAllocationCallbacks* pAllocator)
4341 {
4342 ANV_FROM_HANDLE(anv_device, device, _device);
4343 ANV_FROM_HANDLE(anv_framebuffer, fb, _fb);
4344
4345 if (!fb)
4346 return;
4347
4348 vk_free2(&device->alloc, pAllocator, fb);
4349 }
4350
4351 static const VkTimeDomainEXT anv_time_domains[] = {
4352 VK_TIME_DOMAIN_DEVICE_EXT,
4353 VK_TIME_DOMAIN_CLOCK_MONOTONIC_EXT,
4354 VK_TIME_DOMAIN_CLOCK_MONOTONIC_RAW_EXT,
4355 };
4356
4357 VkResult anv_GetPhysicalDeviceCalibrateableTimeDomainsEXT(
4358 VkPhysicalDevice physicalDevice,
4359 uint32_t *pTimeDomainCount,
4360 VkTimeDomainEXT *pTimeDomains)
4361 {
4362 int d;
4363 VK_OUTARRAY_MAKE(out, pTimeDomains, pTimeDomainCount);
4364
4365 for (d = 0; d < ARRAY_SIZE(anv_time_domains); d++) {
4366 vk_outarray_append(&out, i) {
4367 *i = anv_time_domains[d];
4368 }
4369 }
4370
4371 return vk_outarray_status(&out);
4372 }
4373
4374 static uint64_t
4375 anv_clock_gettime(clockid_t clock_id)
4376 {
4377 struct timespec current;
4378 int ret;
4379
4380 ret = clock_gettime(clock_id, &current);
4381 if (ret < 0 && clock_id == CLOCK_MONOTONIC_RAW)
4382 ret = clock_gettime(CLOCK_MONOTONIC, &current);
4383 if (ret < 0)
4384 return 0;
4385
4386 return (uint64_t) current.tv_sec * 1000000000ULL + current.tv_nsec;
4387 }
4388
4389 #define TIMESTAMP 0x2358
4390
4391 VkResult anv_GetCalibratedTimestampsEXT(
4392 VkDevice _device,
4393 uint32_t timestampCount,
4394 const VkCalibratedTimestampInfoEXT *pTimestampInfos,
4395 uint64_t *pTimestamps,
4396 uint64_t *pMaxDeviation)
4397 {
4398 ANV_FROM_HANDLE(anv_device, device, _device);
4399 uint64_t timestamp_frequency = device->info.timestamp_frequency;
4400 int ret;
4401 int d;
4402 uint64_t begin, end;
4403 uint64_t max_clock_period = 0;
4404
4405 begin = anv_clock_gettime(CLOCK_MONOTONIC_RAW);
4406
4407 for (d = 0; d < timestampCount; d++) {
4408 switch (pTimestampInfos[d].timeDomain) {
4409 case VK_TIME_DOMAIN_DEVICE_EXT:
4410 ret = anv_gem_reg_read(device, TIMESTAMP | 1,
4411 &pTimestamps[d]);
4412
4413 if (ret != 0) {
4414 return anv_device_set_lost(device, "Failed to read the TIMESTAMP "
4415 "register: %m");
4416 }
4417 uint64_t device_period = DIV_ROUND_UP(1000000000, timestamp_frequency);
4418 max_clock_period = MAX2(max_clock_period, device_period);
4419 break;
4420 case VK_TIME_DOMAIN_CLOCK_MONOTONIC_EXT:
4421 pTimestamps[d] = anv_clock_gettime(CLOCK_MONOTONIC);
4422 max_clock_period = MAX2(max_clock_period, 1);
4423 break;
4424
4425 case VK_TIME_DOMAIN_CLOCK_MONOTONIC_RAW_EXT:
4426 pTimestamps[d] = begin;
4427 break;
4428 default:
4429 pTimestamps[d] = 0;
4430 break;
4431 }
4432 }
4433
4434 end = anv_clock_gettime(CLOCK_MONOTONIC_RAW);
4435
4436 /*
4437 * The maximum deviation is the sum of the interval over which we
4438 * perform the sampling and the maximum period of any sampled
4439 * clock. That's because the maximum skew between any two sampled
4440 * clock edges is when the sampled clock with the largest period is
4441 * sampled at the end of that period but right at the beginning of the
4442 * sampling interval and some other clock is sampled right at the
4443 * begining of its sampling period and right at the end of the
4444 * sampling interval. Let's assume the GPU has the longest clock
4445 * period and that the application is sampling GPU and monotonic:
4446 *
4447 * s e
4448 * w x y z 0 1 2 3 4 5 6 7 8 9 a b c d e f
4449 * Raw -_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-
4450 *
4451 * g
4452 * 0 1 2 3
4453 * GPU -----_____-----_____-----_____-----_____
4454 *
4455 * m
4456 * x y z 0 1 2 3 4 5 6 7 8 9 a b c
4457 * Monotonic -_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-
4458 *
4459 * Interval <----------------->
4460 * Deviation <-------------------------->
4461 *
4462 * s = read(raw) 2
4463 * g = read(GPU) 1
4464 * m = read(monotonic) 2
4465 * e = read(raw) b
4466 *
4467 * We round the sample interval up by one tick to cover sampling error
4468 * in the interval clock
4469 */
4470
4471 uint64_t sample_interval = end - begin + 1;
4472
4473 *pMaxDeviation = sample_interval + max_clock_period;
4474
4475 return VK_SUCCESS;
4476 }
4477
4478 /* vk_icd.h does not declare this function, so we declare it here to
4479 * suppress Wmissing-prototypes.
4480 */
4481 PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
4482 vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t* pSupportedVersion);
4483
4484 PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
4485 vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t* pSupportedVersion)
4486 {
4487 /* For the full details on loader interface versioning, see
4488 * <https://github.com/KhronosGroup/Vulkan-LoaderAndValidationLayers/blob/master/loader/LoaderAndLayerInterface.md>.
4489 * What follows is a condensed summary, to help you navigate the large and
4490 * confusing official doc.
4491 *
4492 * - Loader interface v0 is incompatible with later versions. We don't
4493 * support it.
4494 *
4495 * - In loader interface v1:
4496 * - The first ICD entrypoint called by the loader is
4497 * vk_icdGetInstanceProcAddr(). The ICD must statically expose this
4498 * entrypoint.
4499 * - The ICD must statically expose no other Vulkan symbol unless it is
4500 * linked with -Bsymbolic.
4501 * - Each dispatchable Vulkan handle created by the ICD must be
4502 * a pointer to a struct whose first member is VK_LOADER_DATA. The
4503 * ICD must initialize VK_LOADER_DATA.loadMagic to ICD_LOADER_MAGIC.
4504 * - The loader implements vkCreate{PLATFORM}SurfaceKHR() and
4505 * vkDestroySurfaceKHR(). The ICD must be capable of working with
4506 * such loader-managed surfaces.
4507 *
4508 * - Loader interface v2 differs from v1 in:
4509 * - The first ICD entrypoint called by the loader is
4510 * vk_icdNegotiateLoaderICDInterfaceVersion(). The ICD must
4511 * statically expose this entrypoint.
4512 *
4513 * - Loader interface v3 differs from v2 in:
4514 * - The ICD must implement vkCreate{PLATFORM}SurfaceKHR(),
4515 * vkDestroySurfaceKHR(), and other API which uses VKSurfaceKHR,
4516 * because the loader no longer does so.
4517 *
4518 * - Loader interface v4 differs from v3 in:
4519 * - The ICD must implement vk_icdGetPhysicalDeviceProcAddr().
4520 */
4521 *pSupportedVersion = MIN2(*pSupportedVersion, 4u);
4522 return VK_SUCCESS;
4523 }