anv: toggle on VK_EXT_extended_dynamic_state
[mesa.git] / src / intel / vulkan / anv_device.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <assert.h>
25 #include <stdbool.h>
26 #include <string.h>
27 #include <sys/mman.h>
28 #include <sys/sysinfo.h>
29 #include <unistd.h>
30 #include <fcntl.h>
31 #include <xf86drm.h>
32 #include "drm-uapi/drm_fourcc.h"
33
34 #include "anv_private.h"
35 #include "util/debug.h"
36 #include "util/build_id.h"
37 #include "util/disk_cache.h"
38 #include "util/mesa-sha1.h"
39 #include "util/os_file.h"
40 #include "util/u_atomic.h"
41 #include "util/u_string.h"
42 #include "util/driconf.h"
43 #include "git_sha1.h"
44 #include "vk_util.h"
45 #include "common/gen_aux_map.h"
46 #include "common/gen_defines.h"
47 #include "compiler/glsl_types.h"
48
49 #include "genxml/gen7_pack.h"
50
51 static const char anv_dri_options_xml[] =
52 DRI_CONF_BEGIN
53 DRI_CONF_SECTION_PERFORMANCE
54 DRI_CONF_VK_X11_OVERRIDE_MIN_IMAGE_COUNT(0)
55 DRI_CONF_VK_X11_STRICT_IMAGE_COUNT("false")
56 DRI_CONF_SECTION_END
57
58 DRI_CONF_SECTION_DEBUG
59 DRI_CONF_ALWAYS_FLUSH_CACHE("false")
60 DRI_CONF_VK_WSI_FORCE_BGRA8_UNORM_FIRST("false")
61 DRI_CONF_SECTION_END
62 DRI_CONF_END;
63
64 /* This is probably far to big but it reflects the max size used for messages
65 * in OpenGLs KHR_debug.
66 */
67 #define MAX_DEBUG_MESSAGE_LENGTH 4096
68
69 /* Render engine timestamp register */
70 #define TIMESTAMP 0x2358
71
72 static void
73 compiler_debug_log(void *data, const char *fmt, ...)
74 {
75 char str[MAX_DEBUG_MESSAGE_LENGTH];
76 struct anv_device *device = (struct anv_device *)data;
77 struct anv_instance *instance = device->physical->instance;
78
79 if (list_is_empty(&instance->debug_report_callbacks.callbacks))
80 return;
81
82 va_list args;
83 va_start(args, fmt);
84 (void) vsnprintf(str, MAX_DEBUG_MESSAGE_LENGTH, fmt, args);
85 va_end(args);
86
87 vk_debug_report(&instance->debug_report_callbacks,
88 VK_DEBUG_REPORT_DEBUG_BIT_EXT,
89 VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
90 0, 0, 0, "anv", str);
91 }
92
93 static void
94 compiler_perf_log(void *data, const char *fmt, ...)
95 {
96 va_list args;
97 va_start(args, fmt);
98
99 if (unlikely(INTEL_DEBUG & DEBUG_PERF))
100 intel_logd_v(fmt, args);
101
102 va_end(args);
103 }
104
105 static uint64_t
106 anv_compute_heap_size(int fd, uint64_t gtt_size)
107 {
108 /* Query the total ram from the system */
109 struct sysinfo info;
110 sysinfo(&info);
111
112 uint64_t total_ram = (uint64_t)info.totalram * (uint64_t)info.mem_unit;
113
114 /* We don't want to burn too much ram with the GPU. If the user has 4GiB
115 * or less, we use at most half. If they have more than 4GiB, we use 3/4.
116 */
117 uint64_t available_ram;
118 if (total_ram <= 4ull * 1024ull * 1024ull * 1024ull)
119 available_ram = total_ram / 2;
120 else
121 available_ram = total_ram * 3 / 4;
122
123 /* We also want to leave some padding for things we allocate in the driver,
124 * so don't go over 3/4 of the GTT either.
125 */
126 uint64_t available_gtt = gtt_size * 3 / 4;
127
128 return MIN2(available_ram, available_gtt);
129 }
130
131 static VkResult
132 anv_physical_device_init_heaps(struct anv_physical_device *device, int fd)
133 {
134 if (anv_gem_get_context_param(fd, 0, I915_CONTEXT_PARAM_GTT_SIZE,
135 &device->gtt_size) == -1) {
136 /* If, for whatever reason, we can't actually get the GTT size from the
137 * kernel (too old?) fall back to the aperture size.
138 */
139 anv_perf_warn(NULL, NULL,
140 "Failed to get I915_CONTEXT_PARAM_GTT_SIZE: %m");
141
142 if (gen_get_aperture_size(fd, &device->gtt_size) == -1) {
143 return vk_errorfi(device->instance, NULL,
144 VK_ERROR_INITIALIZATION_FAILED,
145 "failed to get aperture size: %m");
146 }
147 }
148
149 /* We only allow 48-bit addresses with softpin because knowing the actual
150 * address is required for the vertex cache flush workaround.
151 */
152 device->supports_48bit_addresses = (device->info.gen >= 8) &&
153 device->has_softpin &&
154 device->gtt_size > (4ULL << 30 /* GiB */);
155
156 uint64_t heap_size = anv_compute_heap_size(fd, device->gtt_size);
157
158 if (heap_size > (2ull << 30) && !device->supports_48bit_addresses) {
159 /* When running with an overridden PCI ID, we may get a GTT size from
160 * the kernel that is greater than 2 GiB but the execbuf check for 48bit
161 * address support can still fail. Just clamp the address space size to
162 * 2 GiB if we don't have 48-bit support.
163 */
164 intel_logw("%s:%d: The kernel reported a GTT size larger than 2 GiB but "
165 "not support for 48-bit addresses",
166 __FILE__, __LINE__);
167 heap_size = 2ull << 30;
168 }
169
170 device->memory.heap_count = 1;
171 device->memory.heaps[0] = (struct anv_memory_heap) {
172 .size = heap_size,
173 .flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT,
174 };
175
176 uint32_t type_count = 0;
177 for (uint32_t heap = 0; heap < device->memory.heap_count; heap++) {
178 if (device->info.has_llc) {
179 /* Big core GPUs share LLC with the CPU and thus one memory type can be
180 * both cached and coherent at the same time.
181 */
182 device->memory.types[type_count++] = (struct anv_memory_type) {
183 .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
184 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
185 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT |
186 VK_MEMORY_PROPERTY_HOST_CACHED_BIT,
187 .heapIndex = heap,
188 };
189 } else {
190 /* The spec requires that we expose a host-visible, coherent memory
191 * type, but Atom GPUs don't share LLC. Thus we offer two memory types
192 * to give the application a choice between cached, but not coherent and
193 * coherent but uncached (WC though).
194 */
195 device->memory.types[type_count++] = (struct anv_memory_type) {
196 .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
197 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
198 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
199 .heapIndex = heap,
200 };
201 device->memory.types[type_count++] = (struct anv_memory_type) {
202 .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
203 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
204 VK_MEMORY_PROPERTY_HOST_CACHED_BIT,
205 .heapIndex = heap,
206 };
207 }
208 }
209 device->memory.type_count = type_count;
210
211 return VK_SUCCESS;
212 }
213
214 static VkResult
215 anv_physical_device_init_uuids(struct anv_physical_device *device)
216 {
217 const struct build_id_note *note =
218 build_id_find_nhdr_for_addr(anv_physical_device_init_uuids);
219 if (!note) {
220 return vk_errorfi(device->instance, NULL,
221 VK_ERROR_INITIALIZATION_FAILED,
222 "Failed to find build-id");
223 }
224
225 unsigned build_id_len = build_id_length(note);
226 if (build_id_len < 20) {
227 return vk_errorfi(device->instance, NULL,
228 VK_ERROR_INITIALIZATION_FAILED,
229 "build-id too short. It needs to be a SHA");
230 }
231
232 memcpy(device->driver_build_sha1, build_id_data(note), 20);
233
234 struct mesa_sha1 sha1_ctx;
235 uint8_t sha1[20];
236 STATIC_ASSERT(VK_UUID_SIZE <= sizeof(sha1));
237
238 /* The pipeline cache UUID is used for determining when a pipeline cache is
239 * invalid. It needs both a driver build and the PCI ID of the device.
240 */
241 _mesa_sha1_init(&sha1_ctx);
242 _mesa_sha1_update(&sha1_ctx, build_id_data(note), build_id_len);
243 _mesa_sha1_update(&sha1_ctx, &device->info.chipset_id,
244 sizeof(device->info.chipset_id));
245 _mesa_sha1_update(&sha1_ctx, &device->always_use_bindless,
246 sizeof(device->always_use_bindless));
247 _mesa_sha1_update(&sha1_ctx, &device->has_a64_buffer_access,
248 sizeof(device->has_a64_buffer_access));
249 _mesa_sha1_update(&sha1_ctx, &device->has_bindless_images,
250 sizeof(device->has_bindless_images));
251 _mesa_sha1_update(&sha1_ctx, &device->has_bindless_samplers,
252 sizeof(device->has_bindless_samplers));
253 _mesa_sha1_final(&sha1_ctx, sha1);
254 memcpy(device->pipeline_cache_uuid, sha1, VK_UUID_SIZE);
255
256 /* The driver UUID is used for determining sharability of images and memory
257 * between two Vulkan instances in separate processes. People who want to
258 * share memory need to also check the device UUID (below) so all this
259 * needs to be is the build-id.
260 */
261 memcpy(device->driver_uuid, build_id_data(note), VK_UUID_SIZE);
262
263 /* The device UUID uniquely identifies the given device within the machine.
264 * Since we never have more than one device, this doesn't need to be a real
265 * UUID. However, on the off-chance that someone tries to use this to
266 * cache pre-tiled images or something of the like, we use the PCI ID and
267 * some bits of ISL info to ensure that this is safe.
268 */
269 _mesa_sha1_init(&sha1_ctx);
270 _mesa_sha1_update(&sha1_ctx, &device->info.chipset_id,
271 sizeof(device->info.chipset_id));
272 _mesa_sha1_update(&sha1_ctx, &device->isl_dev.has_bit6_swizzling,
273 sizeof(device->isl_dev.has_bit6_swizzling));
274 _mesa_sha1_final(&sha1_ctx, sha1);
275 memcpy(device->device_uuid, sha1, VK_UUID_SIZE);
276
277 return VK_SUCCESS;
278 }
279
280 static void
281 anv_physical_device_init_disk_cache(struct anv_physical_device *device)
282 {
283 #ifdef ENABLE_SHADER_CACHE
284 char renderer[10];
285 ASSERTED int len = snprintf(renderer, sizeof(renderer), "anv_%04x",
286 device->info.chipset_id);
287 assert(len == sizeof(renderer) - 2);
288
289 char timestamp[41];
290 _mesa_sha1_format(timestamp, device->driver_build_sha1);
291
292 const uint64_t driver_flags =
293 brw_get_compiler_config_value(device->compiler);
294 device->disk_cache = disk_cache_create(renderer, timestamp, driver_flags);
295 #else
296 device->disk_cache = NULL;
297 #endif
298 }
299
300 static void
301 anv_physical_device_free_disk_cache(struct anv_physical_device *device)
302 {
303 #ifdef ENABLE_SHADER_CACHE
304 if (device->disk_cache)
305 disk_cache_destroy(device->disk_cache);
306 #else
307 assert(device->disk_cache == NULL);
308 #endif
309 }
310
311 static uint64_t
312 get_available_system_memory()
313 {
314 char *meminfo = os_read_file("/proc/meminfo", NULL);
315 if (!meminfo)
316 return 0;
317
318 char *str = strstr(meminfo, "MemAvailable:");
319 if (!str) {
320 free(meminfo);
321 return 0;
322 }
323
324 uint64_t kb_mem_available;
325 if (sscanf(str, "MemAvailable: %" PRIx64, &kb_mem_available) == 1) {
326 free(meminfo);
327 return kb_mem_available << 10;
328 }
329
330 free(meminfo);
331 return 0;
332 }
333
334 static VkResult
335 anv_physical_device_try_create(struct anv_instance *instance,
336 drmDevicePtr drm_device,
337 struct anv_physical_device **device_out)
338 {
339 const char *primary_path = drm_device->nodes[DRM_NODE_PRIMARY];
340 const char *path = drm_device->nodes[DRM_NODE_RENDER];
341 VkResult result;
342 int fd;
343 int master_fd = -1;
344
345 brw_process_intel_debug_variable();
346
347 fd = open(path, O_RDWR | O_CLOEXEC);
348 if (fd < 0)
349 return vk_error(VK_ERROR_INCOMPATIBLE_DRIVER);
350
351 struct gen_device_info devinfo;
352 if (!gen_get_device_info_from_fd(fd, &devinfo)) {
353 result = vk_error(VK_ERROR_INCOMPATIBLE_DRIVER);
354 goto fail_fd;
355 }
356
357 const char *device_name = gen_get_device_name(devinfo.chipset_id);
358
359 if (devinfo.is_haswell) {
360 intel_logw("Haswell Vulkan support is incomplete");
361 } else if (devinfo.gen == 7 && !devinfo.is_baytrail) {
362 intel_logw("Ivy Bridge Vulkan support is incomplete");
363 } else if (devinfo.gen == 7 && devinfo.is_baytrail) {
364 intel_logw("Bay Trail Vulkan support is incomplete");
365 } else if (devinfo.gen >= 8 && devinfo.gen <= 11) {
366 /* Gen8-11 fully supported */
367 } else if (devinfo.gen == 12) {
368 intel_logw("Vulkan is not yet fully supported on gen12");
369 } else {
370 result = vk_errorfi(instance, NULL, VK_ERROR_INCOMPATIBLE_DRIVER,
371 "Vulkan not yet supported on %s", device_name);
372 goto fail_fd;
373 }
374
375 struct anv_physical_device *device =
376 vk_alloc(&instance->alloc, sizeof(*device), 8,
377 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
378 if (device == NULL) {
379 result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
380 goto fail_fd;
381 }
382
383 vk_object_base_init(NULL, &device->base, VK_OBJECT_TYPE_PHYSICAL_DEVICE);
384 device->instance = instance;
385
386 assert(strlen(path) < ARRAY_SIZE(device->path));
387 snprintf(device->path, ARRAY_SIZE(device->path), "%s", path);
388
389 device->info = devinfo;
390 device->name = device_name;
391
392 device->no_hw = device->info.no_hw;
393 if (getenv("INTEL_NO_HW") != NULL)
394 device->no_hw = true;
395
396 device->pci_info.domain = drm_device->businfo.pci->domain;
397 device->pci_info.bus = drm_device->businfo.pci->bus;
398 device->pci_info.device = drm_device->businfo.pci->dev;
399 device->pci_info.function = drm_device->businfo.pci->func;
400
401 device->cmd_parser_version = -1;
402 if (device->info.gen == 7) {
403 device->cmd_parser_version =
404 anv_gem_get_param(fd, I915_PARAM_CMD_PARSER_VERSION);
405 if (device->cmd_parser_version == -1) {
406 result = vk_errorfi(device->instance, NULL,
407 VK_ERROR_INITIALIZATION_FAILED,
408 "failed to get command parser version");
409 goto fail_alloc;
410 }
411 }
412
413 if (!anv_gem_get_param(fd, I915_PARAM_HAS_WAIT_TIMEOUT)) {
414 result = vk_errorfi(device->instance, NULL,
415 VK_ERROR_INITIALIZATION_FAILED,
416 "kernel missing gem wait");
417 goto fail_alloc;
418 }
419
420 if (!anv_gem_get_param(fd, I915_PARAM_HAS_EXECBUF2)) {
421 result = vk_errorfi(device->instance, NULL,
422 VK_ERROR_INITIALIZATION_FAILED,
423 "kernel missing execbuf2");
424 goto fail_alloc;
425 }
426
427 if (!device->info.has_llc &&
428 anv_gem_get_param(fd, I915_PARAM_MMAP_VERSION) < 1) {
429 result = vk_errorfi(device->instance, NULL,
430 VK_ERROR_INITIALIZATION_FAILED,
431 "kernel missing wc mmap");
432 goto fail_alloc;
433 }
434
435 device->has_softpin = anv_gem_get_param(fd, I915_PARAM_HAS_EXEC_SOFTPIN);
436 device->has_exec_async = anv_gem_get_param(fd, I915_PARAM_HAS_EXEC_ASYNC);
437 device->has_exec_capture = anv_gem_get_param(fd, I915_PARAM_HAS_EXEC_CAPTURE);
438 device->has_exec_fence = anv_gem_get_param(fd, I915_PARAM_HAS_EXEC_FENCE);
439 device->has_syncobj = anv_gem_get_param(fd, I915_PARAM_HAS_EXEC_FENCE_ARRAY);
440 device->has_syncobj_wait = device->has_syncobj &&
441 anv_gem_supports_syncobj_wait(fd);
442 device->has_context_priority = anv_gem_has_context_priority(fd);
443
444 result = anv_physical_device_init_heaps(device, fd);
445 if (result != VK_SUCCESS)
446 goto fail_alloc;
447
448 device->use_softpin = device->has_softpin &&
449 device->supports_48bit_addresses;
450
451 device->has_context_isolation =
452 anv_gem_get_param(fd, I915_PARAM_HAS_CONTEXT_ISOLATION);
453
454 device->always_use_bindless =
455 env_var_as_boolean("ANV_ALWAYS_BINDLESS", false);
456
457 device->use_call_secondary =
458 device->use_softpin &&
459 !env_var_as_boolean("ANV_DISABLE_SECONDARY_CMD_BUFFER_CALLS", false);
460
461 /* We first got the A64 messages on broadwell and we can only use them if
462 * we can pass addresses directly into the shader which requires softpin.
463 */
464 device->has_a64_buffer_access = device->info.gen >= 8 &&
465 device->use_softpin;
466
467 /* We first get bindless image access on Skylake and we can only really do
468 * it if we don't have any relocations so we need softpin.
469 */
470 device->has_bindless_images = device->info.gen >= 9 &&
471 device->use_softpin;
472
473 /* We've had bindless samplers since Ivy Bridge (forever in Vulkan terms)
474 * because it's just a matter of setting the sampler address in the sample
475 * message header. However, we've not bothered to wire it up for vec4 so
476 * we leave it disabled on gen7.
477 */
478 device->has_bindless_samplers = device->info.gen >= 8;
479
480 device->has_implicit_ccs = device->info.has_aux_map;
481
482 /* Check if we can read the GPU timestamp register from the CPU */
483 uint64_t u64_ignore;
484 device->has_reg_timestamp = anv_gem_reg_read(fd, TIMESTAMP | I915_REG_READ_8B_WA,
485 &u64_ignore) == 0;
486
487 device->has_mem_available = get_available_system_memory() != 0;
488
489 device->always_flush_cache =
490 driQueryOptionb(&instance->dri_options, "always_flush_cache");
491
492 device->has_mmap_offset =
493 anv_gem_get_param(fd, I915_PARAM_MMAP_GTT_VERSION) >= 4;
494
495 /* GENs prior to 8 do not support EU/Subslice info */
496 if (device->info.gen >= 8) {
497 device->subslice_total = anv_gem_get_param(fd, I915_PARAM_SUBSLICE_TOTAL);
498 device->eu_total = anv_gem_get_param(fd, I915_PARAM_EU_TOTAL);
499
500 /* Without this information, we cannot get the right Braswell
501 * brandstrings, and we have to use conservative numbers for GPGPU on
502 * many platforms, but otherwise, things will just work.
503 */
504 if (device->subslice_total < 1 || device->eu_total < 1) {
505 intel_logw("Kernel 4.1 required to properly query GPU properties");
506 }
507 } else if (device->info.gen == 7) {
508 device->subslice_total = 1 << (device->info.gt - 1);
509 }
510
511 if (device->info.is_cherryview &&
512 device->subslice_total > 0 && device->eu_total > 0) {
513 /* Logical CS threads = EUs per subslice * num threads per EU */
514 uint32_t max_cs_threads =
515 device->eu_total / device->subslice_total * device->info.num_thread_per_eu;
516
517 /* Fuse configurations may give more threads than expected, never less. */
518 if (max_cs_threads > device->info.max_cs_threads)
519 device->info.max_cs_threads = max_cs_threads;
520 }
521
522 device->compiler = brw_compiler_create(NULL, &device->info);
523 if (device->compiler == NULL) {
524 result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
525 goto fail_alloc;
526 }
527 device->compiler->shader_debug_log = compiler_debug_log;
528 device->compiler->shader_perf_log = compiler_perf_log;
529 device->compiler->supports_pull_constants = false;
530 device->compiler->constant_buffer_0_is_relative =
531 device->info.gen < 8 || !device->has_context_isolation;
532 device->compiler->supports_shader_constants = true;
533 device->compiler->compact_params = false;
534
535 /* Broadwell PRM says:
536 *
537 * "Before Gen8, there was a historical configuration control field to
538 * swizzle address bit[6] for in X/Y tiling modes. This was set in three
539 * different places: TILECTL[1:0], ARB_MODE[5:4], and
540 * DISP_ARB_CTL[14:13].
541 *
542 * For Gen8 and subsequent generations, the swizzle fields are all
543 * reserved, and the CPU's memory controller performs all address
544 * swizzling modifications."
545 */
546 bool swizzled =
547 device->info.gen < 8 && anv_gem_get_bit6_swizzle(fd, I915_TILING_X);
548
549 isl_device_init(&device->isl_dev, &device->info, swizzled);
550
551 result = anv_physical_device_init_uuids(device);
552 if (result != VK_SUCCESS)
553 goto fail_compiler;
554
555 anv_physical_device_init_disk_cache(device);
556
557 if (instance->enabled_extensions.KHR_display) {
558 master_fd = open(primary_path, O_RDWR | O_CLOEXEC);
559 if (master_fd >= 0) {
560 /* prod the device with a GETPARAM call which will fail if
561 * we don't have permission to even render on this device
562 */
563 if (anv_gem_get_param(master_fd, I915_PARAM_CHIPSET_ID) == 0) {
564 close(master_fd);
565 master_fd = -1;
566 }
567 }
568 }
569 device->master_fd = master_fd;
570
571 result = anv_init_wsi(device);
572 if (result != VK_SUCCESS)
573 goto fail_disk_cache;
574
575 device->perf = anv_get_perf(&device->info, fd);
576
577 anv_physical_device_get_supported_extensions(device,
578 &device->supported_extensions);
579
580
581 device->local_fd = fd;
582
583 *device_out = device;
584
585 return VK_SUCCESS;
586
587 fail_disk_cache:
588 anv_physical_device_free_disk_cache(device);
589 fail_compiler:
590 ralloc_free(device->compiler);
591 fail_alloc:
592 vk_free(&instance->alloc, device);
593 fail_fd:
594 close(fd);
595 if (master_fd != -1)
596 close(master_fd);
597 return result;
598 }
599
600 static void
601 anv_physical_device_destroy(struct anv_physical_device *device)
602 {
603 anv_finish_wsi(device);
604 anv_physical_device_free_disk_cache(device);
605 ralloc_free(device->compiler);
606 ralloc_free(device->perf);
607 close(device->local_fd);
608 if (device->master_fd >= 0)
609 close(device->master_fd);
610 vk_object_base_finish(&device->base);
611 vk_free(&device->instance->alloc, device);
612 }
613
614 static void *
615 default_alloc_func(void *pUserData, size_t size, size_t align,
616 VkSystemAllocationScope allocationScope)
617 {
618 return malloc(size);
619 }
620
621 static void *
622 default_realloc_func(void *pUserData, void *pOriginal, size_t size,
623 size_t align, VkSystemAllocationScope allocationScope)
624 {
625 return realloc(pOriginal, size);
626 }
627
628 static void
629 default_free_func(void *pUserData, void *pMemory)
630 {
631 free(pMemory);
632 }
633
634 static const VkAllocationCallbacks default_alloc = {
635 .pUserData = NULL,
636 .pfnAllocation = default_alloc_func,
637 .pfnReallocation = default_realloc_func,
638 .pfnFree = default_free_func,
639 };
640
641 VkResult anv_EnumerateInstanceExtensionProperties(
642 const char* pLayerName,
643 uint32_t* pPropertyCount,
644 VkExtensionProperties* pProperties)
645 {
646 VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
647
648 for (int i = 0; i < ANV_INSTANCE_EXTENSION_COUNT; i++) {
649 if (anv_instance_extensions_supported.extensions[i]) {
650 vk_outarray_append(&out, prop) {
651 *prop = anv_instance_extensions[i];
652 }
653 }
654 }
655
656 return vk_outarray_status(&out);
657 }
658
659 VkResult anv_CreateInstance(
660 const VkInstanceCreateInfo* pCreateInfo,
661 const VkAllocationCallbacks* pAllocator,
662 VkInstance* pInstance)
663 {
664 struct anv_instance *instance;
665 VkResult result;
666
667 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO);
668
669 struct anv_instance_extension_table enabled_extensions = {};
670 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
671 int idx;
672 for (idx = 0; idx < ANV_INSTANCE_EXTENSION_COUNT; idx++) {
673 if (strcmp(pCreateInfo->ppEnabledExtensionNames[i],
674 anv_instance_extensions[idx].extensionName) == 0)
675 break;
676 }
677
678 if (idx >= ANV_INSTANCE_EXTENSION_COUNT)
679 return vk_error(VK_ERROR_EXTENSION_NOT_PRESENT);
680
681 if (!anv_instance_extensions_supported.extensions[idx])
682 return vk_error(VK_ERROR_EXTENSION_NOT_PRESENT);
683
684 enabled_extensions.extensions[idx] = true;
685 }
686
687 instance = vk_alloc2(&default_alloc, pAllocator, sizeof(*instance), 8,
688 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
689 if (!instance)
690 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
691
692 vk_object_base_init(NULL, &instance->base, VK_OBJECT_TYPE_INSTANCE);
693
694 if (pAllocator)
695 instance->alloc = *pAllocator;
696 else
697 instance->alloc = default_alloc;
698
699 instance->app_info = (struct anv_app_info) { .api_version = 0 };
700 if (pCreateInfo->pApplicationInfo) {
701 const VkApplicationInfo *app = pCreateInfo->pApplicationInfo;
702
703 instance->app_info.app_name =
704 vk_strdup(&instance->alloc, app->pApplicationName,
705 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
706 instance->app_info.app_version = app->applicationVersion;
707
708 instance->app_info.engine_name =
709 vk_strdup(&instance->alloc, app->pEngineName,
710 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
711 instance->app_info.engine_version = app->engineVersion;
712
713 instance->app_info.api_version = app->apiVersion;
714 }
715
716 if (instance->app_info.api_version == 0)
717 instance->app_info.api_version = VK_API_VERSION_1_0;
718
719 instance->enabled_extensions = enabled_extensions;
720
721 for (unsigned i = 0; i < ARRAY_SIZE(instance->dispatch.entrypoints); i++) {
722 /* Vulkan requires that entrypoints for extensions which have not been
723 * enabled must not be advertised.
724 */
725 if (!anv_instance_entrypoint_is_enabled(i, instance->app_info.api_version,
726 &instance->enabled_extensions)) {
727 instance->dispatch.entrypoints[i] = NULL;
728 } else {
729 instance->dispatch.entrypoints[i] =
730 anv_instance_dispatch_table.entrypoints[i];
731 }
732 }
733
734 for (unsigned i = 0; i < ARRAY_SIZE(instance->physical_device_dispatch.entrypoints); i++) {
735 /* Vulkan requires that entrypoints for extensions which have not been
736 * enabled must not be advertised.
737 */
738 if (!anv_physical_device_entrypoint_is_enabled(i, instance->app_info.api_version,
739 &instance->enabled_extensions)) {
740 instance->physical_device_dispatch.entrypoints[i] = NULL;
741 } else {
742 instance->physical_device_dispatch.entrypoints[i] =
743 anv_physical_device_dispatch_table.entrypoints[i];
744 }
745 }
746
747 for (unsigned i = 0; i < ARRAY_SIZE(instance->device_dispatch.entrypoints); i++) {
748 /* Vulkan requires that entrypoints for extensions which have not been
749 * enabled must not be advertised.
750 */
751 if (!anv_device_entrypoint_is_enabled(i, instance->app_info.api_version,
752 &instance->enabled_extensions, NULL)) {
753 instance->device_dispatch.entrypoints[i] = NULL;
754 } else {
755 instance->device_dispatch.entrypoints[i] =
756 anv_device_dispatch_table.entrypoints[i];
757 }
758 }
759
760 instance->physical_devices_enumerated = false;
761 list_inithead(&instance->physical_devices);
762
763 result = vk_debug_report_instance_init(&instance->debug_report_callbacks);
764 if (result != VK_SUCCESS) {
765 vk_free2(&default_alloc, pAllocator, instance);
766 return vk_error(result);
767 }
768
769 instance->pipeline_cache_enabled =
770 env_var_as_boolean("ANV_ENABLE_PIPELINE_CACHE", true);
771
772 glsl_type_singleton_init_or_ref();
773
774 VG(VALGRIND_CREATE_MEMPOOL(instance, 0, false));
775
776 driParseOptionInfo(&instance->available_dri_options, anv_dri_options_xml);
777 driParseConfigFiles(&instance->dri_options, &instance->available_dri_options,
778 0, "anv", NULL,
779 instance->app_info.engine_name,
780 instance->app_info.engine_version);
781
782 *pInstance = anv_instance_to_handle(instance);
783
784 return VK_SUCCESS;
785 }
786
787 void anv_DestroyInstance(
788 VkInstance _instance,
789 const VkAllocationCallbacks* pAllocator)
790 {
791 ANV_FROM_HANDLE(anv_instance, instance, _instance);
792
793 if (!instance)
794 return;
795
796 list_for_each_entry_safe(struct anv_physical_device, pdevice,
797 &instance->physical_devices, link)
798 anv_physical_device_destroy(pdevice);
799
800 vk_free(&instance->alloc, (char *)instance->app_info.app_name);
801 vk_free(&instance->alloc, (char *)instance->app_info.engine_name);
802
803 VG(VALGRIND_DESTROY_MEMPOOL(instance));
804
805 vk_debug_report_instance_destroy(&instance->debug_report_callbacks);
806
807 glsl_type_singleton_decref();
808
809 driDestroyOptionCache(&instance->dri_options);
810 driDestroyOptionInfo(&instance->available_dri_options);
811
812 vk_object_base_finish(&instance->base);
813 vk_free(&instance->alloc, instance);
814 }
815
816 static VkResult
817 anv_enumerate_physical_devices(struct anv_instance *instance)
818 {
819 if (instance->physical_devices_enumerated)
820 return VK_SUCCESS;
821
822 instance->physical_devices_enumerated = true;
823
824 /* TODO: Check for more devices ? */
825 drmDevicePtr devices[8];
826 int max_devices;
827
828 max_devices = drmGetDevices2(0, devices, ARRAY_SIZE(devices));
829 if (max_devices < 1)
830 return VK_SUCCESS;
831
832 VkResult result = VK_SUCCESS;
833 for (unsigned i = 0; i < (unsigned)max_devices; i++) {
834 if (devices[i]->available_nodes & 1 << DRM_NODE_RENDER &&
835 devices[i]->bustype == DRM_BUS_PCI &&
836 devices[i]->deviceinfo.pci->vendor_id == 0x8086) {
837
838 struct anv_physical_device *pdevice;
839 result = anv_physical_device_try_create(instance, devices[i],
840 &pdevice);
841 /* Incompatible DRM device, skip. */
842 if (result == VK_ERROR_INCOMPATIBLE_DRIVER) {
843 result = VK_SUCCESS;
844 continue;
845 }
846
847 /* Error creating the physical device, report the error. */
848 if (result != VK_SUCCESS)
849 break;
850
851 list_addtail(&pdevice->link, &instance->physical_devices);
852 }
853 }
854 drmFreeDevices(devices, max_devices);
855
856 /* If we successfully enumerated any devices, call it success */
857 return result;
858 }
859
860 VkResult anv_EnumeratePhysicalDevices(
861 VkInstance _instance,
862 uint32_t* pPhysicalDeviceCount,
863 VkPhysicalDevice* pPhysicalDevices)
864 {
865 ANV_FROM_HANDLE(anv_instance, instance, _instance);
866 VK_OUTARRAY_MAKE(out, pPhysicalDevices, pPhysicalDeviceCount);
867
868 VkResult result = anv_enumerate_physical_devices(instance);
869 if (result != VK_SUCCESS)
870 return result;
871
872 list_for_each_entry(struct anv_physical_device, pdevice,
873 &instance->physical_devices, link) {
874 vk_outarray_append(&out, i) {
875 *i = anv_physical_device_to_handle(pdevice);
876 }
877 }
878
879 return vk_outarray_status(&out);
880 }
881
882 VkResult anv_EnumeratePhysicalDeviceGroups(
883 VkInstance _instance,
884 uint32_t* pPhysicalDeviceGroupCount,
885 VkPhysicalDeviceGroupProperties* pPhysicalDeviceGroupProperties)
886 {
887 ANV_FROM_HANDLE(anv_instance, instance, _instance);
888 VK_OUTARRAY_MAKE(out, pPhysicalDeviceGroupProperties,
889 pPhysicalDeviceGroupCount);
890
891 VkResult result = anv_enumerate_physical_devices(instance);
892 if (result != VK_SUCCESS)
893 return result;
894
895 list_for_each_entry(struct anv_physical_device, pdevice,
896 &instance->physical_devices, link) {
897 vk_outarray_append(&out, p) {
898 p->physicalDeviceCount = 1;
899 memset(p->physicalDevices, 0, sizeof(p->physicalDevices));
900 p->physicalDevices[0] = anv_physical_device_to_handle(pdevice);
901 p->subsetAllocation = false;
902
903 vk_foreach_struct(ext, p->pNext)
904 anv_debug_ignored_stype(ext->sType);
905 }
906 }
907
908 return vk_outarray_status(&out);
909 }
910
911 void anv_GetPhysicalDeviceFeatures(
912 VkPhysicalDevice physicalDevice,
913 VkPhysicalDeviceFeatures* pFeatures)
914 {
915 ANV_FROM_HANDLE(anv_physical_device, pdevice, physicalDevice);
916
917 *pFeatures = (VkPhysicalDeviceFeatures) {
918 .robustBufferAccess = true,
919 .fullDrawIndexUint32 = true,
920 .imageCubeArray = true,
921 .independentBlend = true,
922 .geometryShader = true,
923 .tessellationShader = true,
924 .sampleRateShading = true,
925 .dualSrcBlend = true,
926 .logicOp = true,
927 .multiDrawIndirect = true,
928 .drawIndirectFirstInstance = true,
929 .depthClamp = true,
930 .depthBiasClamp = true,
931 .fillModeNonSolid = true,
932 .depthBounds = pdevice->info.gen >= 12,
933 .wideLines = true,
934 .largePoints = true,
935 .alphaToOne = true,
936 .multiViewport = true,
937 .samplerAnisotropy = true,
938 .textureCompressionETC2 = pdevice->info.gen >= 8 ||
939 pdevice->info.is_baytrail,
940 .textureCompressionASTC_LDR = pdevice->info.gen >= 9, /* FINISHME CHV */
941 .textureCompressionBC = true,
942 .occlusionQueryPrecise = true,
943 .pipelineStatisticsQuery = true,
944 .fragmentStoresAndAtomics = true,
945 .shaderTessellationAndGeometryPointSize = true,
946 .shaderImageGatherExtended = true,
947 .shaderStorageImageExtendedFormats = true,
948 .shaderStorageImageMultisample = false,
949 .shaderStorageImageReadWithoutFormat = false,
950 .shaderStorageImageWriteWithoutFormat = true,
951 .shaderUniformBufferArrayDynamicIndexing = true,
952 .shaderSampledImageArrayDynamicIndexing = true,
953 .shaderStorageBufferArrayDynamicIndexing = true,
954 .shaderStorageImageArrayDynamicIndexing = true,
955 .shaderClipDistance = true,
956 .shaderCullDistance = true,
957 .shaderFloat64 = pdevice->info.gen >= 8 &&
958 pdevice->info.has_64bit_float,
959 .shaderInt64 = pdevice->info.gen >= 8 &&
960 pdevice->info.has_64bit_int,
961 .shaderInt16 = pdevice->info.gen >= 8,
962 .shaderResourceMinLod = pdevice->info.gen >= 9,
963 .variableMultisampleRate = true,
964 .inheritedQueries = true,
965 };
966
967 /* We can't do image stores in vec4 shaders */
968 pFeatures->vertexPipelineStoresAndAtomics =
969 pdevice->compiler->scalar_stage[MESA_SHADER_VERTEX] &&
970 pdevice->compiler->scalar_stage[MESA_SHADER_GEOMETRY];
971
972 struct anv_app_info *app_info = &pdevice->instance->app_info;
973
974 /* The new DOOM and Wolfenstein games require depthBounds without
975 * checking for it. They seem to run fine without it so just claim it's
976 * there and accept the consequences.
977 */
978 if (app_info->engine_name && strcmp(app_info->engine_name, "idTech") == 0)
979 pFeatures->depthBounds = true;
980 }
981
982 static void
983 anv_get_physical_device_features_1_1(struct anv_physical_device *pdevice,
984 VkPhysicalDeviceVulkan11Features *f)
985 {
986 assert(f->sType == VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_FEATURES);
987
988 f->storageBuffer16BitAccess = pdevice->info.gen >= 8;
989 f->uniformAndStorageBuffer16BitAccess = pdevice->info.gen >= 8;
990 f->storagePushConstant16 = pdevice->info.gen >= 8;
991 f->storageInputOutput16 = false;
992 f->multiview = true;
993 f->multiviewGeometryShader = true;
994 f->multiviewTessellationShader = true;
995 f->variablePointersStorageBuffer = true;
996 f->variablePointers = true;
997 f->protectedMemory = false;
998 f->samplerYcbcrConversion = true;
999 f->shaderDrawParameters = true;
1000 }
1001
1002 static void
1003 anv_get_physical_device_features_1_2(struct anv_physical_device *pdevice,
1004 VkPhysicalDeviceVulkan12Features *f)
1005 {
1006 assert(f->sType == VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_FEATURES);
1007
1008 f->samplerMirrorClampToEdge = true;
1009 f->drawIndirectCount = true;
1010 f->storageBuffer8BitAccess = pdevice->info.gen >= 8;
1011 f->uniformAndStorageBuffer8BitAccess = pdevice->info.gen >= 8;
1012 f->storagePushConstant8 = pdevice->info.gen >= 8;
1013 f->shaderBufferInt64Atomics = pdevice->info.gen >= 9 &&
1014 pdevice->use_softpin;
1015 f->shaderSharedInt64Atomics = false;
1016 f->shaderFloat16 = pdevice->info.gen >= 8;
1017 f->shaderInt8 = pdevice->info.gen >= 8;
1018
1019 bool descIndexing = pdevice->has_a64_buffer_access &&
1020 pdevice->has_bindless_images;
1021 f->descriptorIndexing = descIndexing;
1022 f->shaderInputAttachmentArrayDynamicIndexing = false;
1023 f->shaderUniformTexelBufferArrayDynamicIndexing = descIndexing;
1024 f->shaderStorageTexelBufferArrayDynamicIndexing = descIndexing;
1025 f->shaderUniformBufferArrayNonUniformIndexing = false;
1026 f->shaderSampledImageArrayNonUniformIndexing = descIndexing;
1027 f->shaderStorageBufferArrayNonUniformIndexing = descIndexing;
1028 f->shaderStorageImageArrayNonUniformIndexing = descIndexing;
1029 f->shaderInputAttachmentArrayNonUniformIndexing = false;
1030 f->shaderUniformTexelBufferArrayNonUniformIndexing = descIndexing;
1031 f->shaderStorageTexelBufferArrayNonUniformIndexing = descIndexing;
1032 f->descriptorBindingUniformBufferUpdateAfterBind = false;
1033 f->descriptorBindingSampledImageUpdateAfterBind = descIndexing;
1034 f->descriptorBindingStorageImageUpdateAfterBind = descIndexing;
1035 f->descriptorBindingStorageBufferUpdateAfterBind = descIndexing;
1036 f->descriptorBindingUniformTexelBufferUpdateAfterBind = descIndexing;
1037 f->descriptorBindingStorageTexelBufferUpdateAfterBind = descIndexing;
1038 f->descriptorBindingUpdateUnusedWhilePending = descIndexing;
1039 f->descriptorBindingPartiallyBound = descIndexing;
1040 f->descriptorBindingVariableDescriptorCount = false;
1041 f->runtimeDescriptorArray = descIndexing;
1042
1043 f->samplerFilterMinmax = pdevice->info.gen >= 9;
1044 f->scalarBlockLayout = true;
1045 f->imagelessFramebuffer = true;
1046 f->uniformBufferStandardLayout = true;
1047 f->shaderSubgroupExtendedTypes = true;
1048 f->separateDepthStencilLayouts = true;
1049 f->hostQueryReset = true;
1050 f->timelineSemaphore = true;
1051 f->bufferDeviceAddress = pdevice->has_a64_buffer_access;
1052 f->bufferDeviceAddressCaptureReplay = pdevice->has_a64_buffer_access;
1053 f->bufferDeviceAddressMultiDevice = false;
1054 f->vulkanMemoryModel = true;
1055 f->vulkanMemoryModelDeviceScope = true;
1056 f->vulkanMemoryModelAvailabilityVisibilityChains = true;
1057 f->shaderOutputViewportIndex = true;
1058 f->shaderOutputLayer = true;
1059 f->subgroupBroadcastDynamicId = true;
1060 }
1061
1062 void anv_GetPhysicalDeviceFeatures2(
1063 VkPhysicalDevice physicalDevice,
1064 VkPhysicalDeviceFeatures2* pFeatures)
1065 {
1066 ANV_FROM_HANDLE(anv_physical_device, pdevice, physicalDevice);
1067 anv_GetPhysicalDeviceFeatures(physicalDevice, &pFeatures->features);
1068
1069 VkPhysicalDeviceVulkan11Features core_1_1 = {
1070 .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_FEATURES,
1071 };
1072 anv_get_physical_device_features_1_1(pdevice, &core_1_1);
1073
1074 VkPhysicalDeviceVulkan12Features core_1_2 = {
1075 .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_FEATURES,
1076 };
1077 anv_get_physical_device_features_1_2(pdevice, &core_1_2);
1078
1079 #define CORE_FEATURE(major, minor, feature) \
1080 features->feature = core_##major##_##minor.feature
1081
1082
1083 vk_foreach_struct(ext, pFeatures->pNext) {
1084 switch (ext->sType) {
1085 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_8BIT_STORAGE_FEATURES_KHR: {
1086 VkPhysicalDevice8BitStorageFeaturesKHR *features =
1087 (VkPhysicalDevice8BitStorageFeaturesKHR *)ext;
1088 CORE_FEATURE(1, 2, storageBuffer8BitAccess);
1089 CORE_FEATURE(1, 2, uniformAndStorageBuffer8BitAccess);
1090 CORE_FEATURE(1, 2, storagePushConstant8);
1091 break;
1092 }
1093
1094 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES: {
1095 VkPhysicalDevice16BitStorageFeatures *features =
1096 (VkPhysicalDevice16BitStorageFeatures *)ext;
1097 CORE_FEATURE(1, 1, storageBuffer16BitAccess);
1098 CORE_FEATURE(1, 1, uniformAndStorageBuffer16BitAccess);
1099 CORE_FEATURE(1, 1, storagePushConstant16);
1100 CORE_FEATURE(1, 1, storageInputOutput16);
1101 break;
1102 }
1103
1104 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES_EXT: {
1105 VkPhysicalDeviceBufferDeviceAddressFeaturesEXT *features = (void *)ext;
1106 features->bufferDeviceAddress = pdevice->has_a64_buffer_access;
1107 features->bufferDeviceAddressCaptureReplay = false;
1108 features->bufferDeviceAddressMultiDevice = false;
1109 break;
1110 }
1111
1112 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES_KHR: {
1113 VkPhysicalDeviceBufferDeviceAddressFeaturesKHR *features = (void *)ext;
1114 CORE_FEATURE(1, 2, bufferDeviceAddress);
1115 CORE_FEATURE(1, 2, bufferDeviceAddressCaptureReplay);
1116 CORE_FEATURE(1, 2, bufferDeviceAddressMultiDevice);
1117 break;
1118 }
1119
1120 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COMPUTE_SHADER_DERIVATIVES_FEATURES_NV: {
1121 VkPhysicalDeviceComputeShaderDerivativesFeaturesNV *features =
1122 (VkPhysicalDeviceComputeShaderDerivativesFeaturesNV *)ext;
1123 features->computeDerivativeGroupQuads = true;
1124 features->computeDerivativeGroupLinear = true;
1125 break;
1126 }
1127
1128 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONDITIONAL_RENDERING_FEATURES_EXT: {
1129 VkPhysicalDeviceConditionalRenderingFeaturesEXT *features =
1130 (VkPhysicalDeviceConditionalRenderingFeaturesEXT*)ext;
1131 features->conditionalRendering = pdevice->info.gen >= 8 ||
1132 pdevice->info.is_haswell;
1133 features->inheritedConditionalRendering = pdevice->info.gen >= 8 ||
1134 pdevice->info.is_haswell;
1135 break;
1136 }
1137
1138 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CUSTOM_BORDER_COLOR_FEATURES_EXT: {
1139 VkPhysicalDeviceCustomBorderColorFeaturesEXT *features =
1140 (VkPhysicalDeviceCustomBorderColorFeaturesEXT *)ext;
1141 features->customBorderColors = pdevice->info.gen >= 8;
1142 features->customBorderColorWithoutFormat = pdevice->info.gen >= 8;
1143 break;
1144 }
1145
1146 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_CLIP_ENABLE_FEATURES_EXT: {
1147 VkPhysicalDeviceDepthClipEnableFeaturesEXT *features =
1148 (VkPhysicalDeviceDepthClipEnableFeaturesEXT *)ext;
1149 features->depthClipEnable = true;
1150 break;
1151 }
1152
1153 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FLOAT16_INT8_FEATURES_KHR: {
1154 VkPhysicalDeviceFloat16Int8FeaturesKHR *features = (void *)ext;
1155 CORE_FEATURE(1, 2, shaderFloat16);
1156 CORE_FEATURE(1, 2, shaderInt8);
1157 break;
1158 }
1159
1160 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADER_INTERLOCK_FEATURES_EXT: {
1161 VkPhysicalDeviceFragmentShaderInterlockFeaturesEXT *features =
1162 (VkPhysicalDeviceFragmentShaderInterlockFeaturesEXT *)ext;
1163 features->fragmentShaderSampleInterlock = pdevice->info.gen >= 9;
1164 features->fragmentShaderPixelInterlock = pdevice->info.gen >= 9;
1165 features->fragmentShaderShadingRateInterlock = false;
1166 break;
1167 }
1168
1169 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_HOST_QUERY_RESET_FEATURES_EXT: {
1170 VkPhysicalDeviceHostQueryResetFeaturesEXT *features =
1171 (VkPhysicalDeviceHostQueryResetFeaturesEXT *)ext;
1172 CORE_FEATURE(1, 2, hostQueryReset);
1173 break;
1174 }
1175
1176 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES_EXT: {
1177 VkPhysicalDeviceDescriptorIndexingFeaturesEXT *features =
1178 (VkPhysicalDeviceDescriptorIndexingFeaturesEXT *)ext;
1179 CORE_FEATURE(1, 2, shaderInputAttachmentArrayDynamicIndexing);
1180 CORE_FEATURE(1, 2, shaderUniformTexelBufferArrayDynamicIndexing);
1181 CORE_FEATURE(1, 2, shaderStorageTexelBufferArrayDynamicIndexing);
1182 CORE_FEATURE(1, 2, shaderUniformBufferArrayNonUniformIndexing);
1183 CORE_FEATURE(1, 2, shaderSampledImageArrayNonUniformIndexing);
1184 CORE_FEATURE(1, 2, shaderStorageBufferArrayNonUniformIndexing);
1185 CORE_FEATURE(1, 2, shaderStorageImageArrayNonUniformIndexing);
1186 CORE_FEATURE(1, 2, shaderInputAttachmentArrayNonUniformIndexing);
1187 CORE_FEATURE(1, 2, shaderUniformTexelBufferArrayNonUniformIndexing);
1188 CORE_FEATURE(1, 2, shaderStorageTexelBufferArrayNonUniformIndexing);
1189 CORE_FEATURE(1, 2, descriptorBindingUniformBufferUpdateAfterBind);
1190 CORE_FEATURE(1, 2, descriptorBindingSampledImageUpdateAfterBind);
1191 CORE_FEATURE(1, 2, descriptorBindingStorageImageUpdateAfterBind);
1192 CORE_FEATURE(1, 2, descriptorBindingStorageBufferUpdateAfterBind);
1193 CORE_FEATURE(1, 2, descriptorBindingUniformTexelBufferUpdateAfterBind);
1194 CORE_FEATURE(1, 2, descriptorBindingStorageTexelBufferUpdateAfterBind);
1195 CORE_FEATURE(1, 2, descriptorBindingUpdateUnusedWhilePending);
1196 CORE_FEATURE(1, 2, descriptorBindingPartiallyBound);
1197 CORE_FEATURE(1, 2, descriptorBindingVariableDescriptorCount);
1198 CORE_FEATURE(1, 2, runtimeDescriptorArray);
1199 break;
1200 }
1201
1202 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_ROBUSTNESS_FEATURES_EXT: {
1203 VkPhysicalDeviceImageRobustnessFeaturesEXT *features =
1204 (VkPhysicalDeviceImageRobustnessFeaturesEXT *)ext;
1205 features->robustImageAccess = true;
1206 break;
1207 }
1208
1209 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INDEX_TYPE_UINT8_FEATURES_EXT: {
1210 VkPhysicalDeviceIndexTypeUint8FeaturesEXT *features =
1211 (VkPhysicalDeviceIndexTypeUint8FeaturesEXT *)ext;
1212 features->indexTypeUint8 = true;
1213 break;
1214 }
1215
1216 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_FEATURES_EXT: {
1217 VkPhysicalDeviceInlineUniformBlockFeaturesEXT *features =
1218 (VkPhysicalDeviceInlineUniformBlockFeaturesEXT *)ext;
1219 features->inlineUniformBlock = true;
1220 features->descriptorBindingInlineUniformBlockUpdateAfterBind = true;
1221 break;
1222 }
1223
1224 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_FEATURES_EXT: {
1225 VkPhysicalDeviceLineRasterizationFeaturesEXT *features =
1226 (VkPhysicalDeviceLineRasterizationFeaturesEXT *)ext;
1227 features->rectangularLines = true;
1228 features->bresenhamLines = true;
1229 /* Support for Smooth lines with MSAA was removed on gen11. From the
1230 * BSpec section "Multisample ModesState" table for "AA Line Support
1231 * Requirements":
1232 *
1233 * GEN10:BUG:######## NUM_MULTISAMPLES == 1
1234 *
1235 * Fortunately, this isn't a case most people care about.
1236 */
1237 features->smoothLines = pdevice->info.gen < 10;
1238 features->stippledRectangularLines = false;
1239 features->stippledBresenhamLines = true;
1240 features->stippledSmoothLines = false;
1241 break;
1242 }
1243
1244 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES: {
1245 VkPhysicalDeviceMultiviewFeatures *features =
1246 (VkPhysicalDeviceMultiviewFeatures *)ext;
1247 CORE_FEATURE(1, 1, multiview);
1248 CORE_FEATURE(1, 1, multiviewGeometryShader);
1249 CORE_FEATURE(1, 1, multiviewTessellationShader);
1250 break;
1251 }
1252
1253 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGELESS_FRAMEBUFFER_FEATURES_KHR: {
1254 VkPhysicalDeviceImagelessFramebufferFeaturesKHR *features =
1255 (VkPhysicalDeviceImagelessFramebufferFeaturesKHR *)ext;
1256 CORE_FEATURE(1, 2, imagelessFramebuffer);
1257 break;
1258 }
1259
1260 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PERFORMANCE_QUERY_FEATURES_KHR: {
1261 VkPhysicalDevicePerformanceQueryFeaturesKHR *feature =
1262 (VkPhysicalDevicePerformanceQueryFeaturesKHR *)ext;
1263 feature->performanceCounterQueryPools = true;
1264 /* HW only supports a single configuration at a time. */
1265 feature->performanceCounterMultipleQueryPools = false;
1266 break;
1267 }
1268
1269 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_CREATION_CACHE_CONTROL_FEATURES_EXT: {
1270 VkPhysicalDevicePipelineCreationCacheControlFeaturesEXT *features =
1271 (VkPhysicalDevicePipelineCreationCacheControlFeaturesEXT *)ext;
1272 features->pipelineCreationCacheControl = true;
1273 break;
1274 }
1275
1276 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_EXECUTABLE_PROPERTIES_FEATURES_KHR: {
1277 VkPhysicalDevicePipelineExecutablePropertiesFeaturesKHR *features =
1278 (VkPhysicalDevicePipelineExecutablePropertiesFeaturesKHR *)ext;
1279 features->pipelineExecutableInfo = true;
1280 break;
1281 }
1282
1283 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRIVATE_DATA_FEATURES_EXT: {
1284 VkPhysicalDevicePrivateDataFeaturesEXT *features = (void *)ext;
1285 features->privateData = true;
1286 break;
1287 }
1288
1289 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES: {
1290 VkPhysicalDeviceProtectedMemoryFeatures *features = (void *)ext;
1291 CORE_FEATURE(1, 1, protectedMemory);
1292 break;
1293 }
1294
1295 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ROBUSTNESS_2_FEATURES_EXT: {
1296 VkPhysicalDeviceRobustness2FeaturesEXT *features = (void *)ext;
1297 features->robustBufferAccess2 = true;
1298 features->robustImageAccess2 = true;
1299 features->nullDescriptor = true;
1300 break;
1301 }
1302
1303 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES: {
1304 VkPhysicalDeviceSamplerYcbcrConversionFeatures *features =
1305 (VkPhysicalDeviceSamplerYcbcrConversionFeatures *) ext;
1306 CORE_FEATURE(1, 1, samplerYcbcrConversion);
1307 break;
1308 }
1309
1310 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SCALAR_BLOCK_LAYOUT_FEATURES_EXT: {
1311 VkPhysicalDeviceScalarBlockLayoutFeaturesEXT *features =
1312 (VkPhysicalDeviceScalarBlockLayoutFeaturesEXT *)ext;
1313 CORE_FEATURE(1, 2, scalarBlockLayout);
1314 break;
1315 }
1316
1317 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SEPARATE_DEPTH_STENCIL_LAYOUTS_FEATURES_KHR: {
1318 VkPhysicalDeviceSeparateDepthStencilLayoutsFeaturesKHR *features =
1319 (VkPhysicalDeviceSeparateDepthStencilLayoutsFeaturesKHR *)ext;
1320 CORE_FEATURE(1, 2, separateDepthStencilLayouts);
1321 break;
1322 }
1323
1324 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_FLOAT_FEATURES_EXT: {
1325 VkPhysicalDeviceShaderAtomicFloatFeaturesEXT *features = (void *)ext;
1326 features->shaderBufferFloat32Atomics = true;
1327 features->shaderBufferFloat32AtomicAdd = false;
1328 features->shaderBufferFloat64Atomics = false;
1329 features->shaderBufferFloat64AtomicAdd = false;
1330 features->shaderSharedFloat32Atomics = true;
1331 features->shaderSharedFloat32AtomicAdd = false;
1332 features->shaderSharedFloat64Atomics = false;
1333 features->shaderSharedFloat64AtomicAdd = false;
1334 features->shaderImageFloat32Atomics = true;
1335 features->shaderImageFloat32AtomicAdd = false;
1336 features->sparseImageFloat32Atomics = false;
1337 features->sparseImageFloat32AtomicAdd = false;
1338 break;
1339 }
1340
1341 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_INT64_FEATURES_KHR: {
1342 VkPhysicalDeviceShaderAtomicInt64FeaturesKHR *features = (void *)ext;
1343 CORE_FEATURE(1, 2, shaderBufferInt64Atomics);
1344 CORE_FEATURE(1, 2, shaderSharedInt64Atomics);
1345 break;
1346 }
1347
1348 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DEMOTE_TO_HELPER_INVOCATION_FEATURES_EXT: {
1349 VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT *features = (void *)ext;
1350 features->shaderDemoteToHelperInvocation = true;
1351 break;
1352 }
1353
1354 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CLOCK_FEATURES_KHR: {
1355 VkPhysicalDeviceShaderClockFeaturesKHR *features =
1356 (VkPhysicalDeviceShaderClockFeaturesKHR *)ext;
1357 features->shaderSubgroupClock = true;
1358 features->shaderDeviceClock = false;
1359 break;
1360 }
1361
1362 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETERS_FEATURES: {
1363 VkPhysicalDeviceShaderDrawParametersFeatures *features = (void *)ext;
1364 CORE_FEATURE(1, 1, shaderDrawParameters);
1365 break;
1366 }
1367
1368 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SUBGROUP_EXTENDED_TYPES_FEATURES_KHR: {
1369 VkPhysicalDeviceShaderSubgroupExtendedTypesFeaturesKHR *features =
1370 (VkPhysicalDeviceShaderSubgroupExtendedTypesFeaturesKHR *)ext;
1371 CORE_FEATURE(1, 2, shaderSubgroupExtendedTypes);
1372 break;
1373 }
1374
1375 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_FEATURES_EXT: {
1376 VkPhysicalDeviceSubgroupSizeControlFeaturesEXT *features =
1377 (VkPhysicalDeviceSubgroupSizeControlFeaturesEXT *)ext;
1378 features->subgroupSizeControl = true;
1379 features->computeFullSubgroups = true;
1380 break;
1381 }
1382
1383 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_FEATURES_EXT: {
1384 VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT *features =
1385 (VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT *)ext;
1386 features->texelBufferAlignment = true;
1387 break;
1388 }
1389
1390 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_FEATURES_KHR: {
1391 VkPhysicalDeviceTimelineSemaphoreFeaturesKHR *features =
1392 (VkPhysicalDeviceTimelineSemaphoreFeaturesKHR *) ext;
1393 CORE_FEATURE(1, 2, timelineSemaphore);
1394 break;
1395 }
1396
1397 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTERS_FEATURES: {
1398 VkPhysicalDeviceVariablePointersFeatures *features = (void *)ext;
1399 CORE_FEATURE(1, 1, variablePointersStorageBuffer);
1400 CORE_FEATURE(1, 1, variablePointers);
1401 break;
1402 }
1403
1404 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_FEATURES_EXT: {
1405 VkPhysicalDeviceTransformFeedbackFeaturesEXT *features =
1406 (VkPhysicalDeviceTransformFeedbackFeaturesEXT *)ext;
1407 features->transformFeedback = true;
1408 features->geometryStreams = true;
1409 break;
1410 }
1411
1412 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_UNIFORM_BUFFER_STANDARD_LAYOUT_FEATURES_KHR: {
1413 VkPhysicalDeviceUniformBufferStandardLayoutFeaturesKHR *features =
1414 (VkPhysicalDeviceUniformBufferStandardLayoutFeaturesKHR *)ext;
1415 CORE_FEATURE(1, 2, uniformBufferStandardLayout);
1416 break;
1417 }
1418
1419 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_FEATURES_EXT: {
1420 VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT *features =
1421 (VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT *)ext;
1422 features->vertexAttributeInstanceRateDivisor = true;
1423 features->vertexAttributeInstanceRateZeroDivisor = true;
1424 break;
1425 }
1426
1427 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_FEATURES:
1428 anv_get_physical_device_features_1_1(pdevice, (void *)ext);
1429 break;
1430
1431 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_FEATURES:
1432 anv_get_physical_device_features_1_2(pdevice, (void *)ext);
1433 break;
1434
1435 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_MEMORY_MODEL_FEATURES_KHR: {
1436 VkPhysicalDeviceVulkanMemoryModelFeaturesKHR *features = (void *)ext;
1437 CORE_FEATURE(1, 2, vulkanMemoryModel);
1438 CORE_FEATURE(1, 2, vulkanMemoryModelDeviceScope);
1439 CORE_FEATURE(1, 2, vulkanMemoryModelAvailabilityVisibilityChains);
1440 break;
1441 }
1442
1443 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_YCBCR_IMAGE_ARRAYS_FEATURES_EXT: {
1444 VkPhysicalDeviceYcbcrImageArraysFeaturesEXT *features =
1445 (VkPhysicalDeviceYcbcrImageArraysFeaturesEXT *)ext;
1446 features->ycbcrImageArrays = true;
1447 break;
1448 }
1449
1450 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTENDED_DYNAMIC_STATE_FEATURES_EXT: {
1451 VkPhysicalDeviceExtendedDynamicStateFeaturesEXT *features =
1452 (VkPhysicalDeviceExtendedDynamicStateFeaturesEXT *)ext;
1453 features->extendedDynamicState = true;
1454 break;
1455 }
1456
1457 default:
1458 anv_debug_ignored_stype(ext->sType);
1459 break;
1460 }
1461 }
1462
1463 #undef CORE_FEATURE
1464 }
1465
1466 #define MAX_PER_STAGE_DESCRIPTOR_UNIFORM_BUFFERS 64
1467
1468 #define MAX_PER_STAGE_DESCRIPTOR_INPUT_ATTACHMENTS 64
1469 #define MAX_DESCRIPTOR_SET_INPUT_ATTACHMENTS 256
1470
1471 #define MAX_CUSTOM_BORDER_COLORS 4096
1472
1473 void anv_GetPhysicalDeviceProperties(
1474 VkPhysicalDevice physicalDevice,
1475 VkPhysicalDeviceProperties* pProperties)
1476 {
1477 ANV_FROM_HANDLE(anv_physical_device, pdevice, physicalDevice);
1478 const struct gen_device_info *devinfo = &pdevice->info;
1479
1480 /* See assertions made when programming the buffer surface state. */
1481 const uint32_t max_raw_buffer_sz = devinfo->gen >= 7 ?
1482 (1ul << 30) : (1ul << 27);
1483
1484 const uint32_t max_ssbos = pdevice->has_a64_buffer_access ? UINT16_MAX : 64;
1485 const uint32_t max_textures =
1486 pdevice->has_bindless_images ? UINT16_MAX : 128;
1487 const uint32_t max_samplers =
1488 pdevice->has_bindless_samplers ? UINT16_MAX :
1489 (devinfo->gen >= 8 || devinfo->is_haswell) ? 128 : 16;
1490 const uint32_t max_images =
1491 pdevice->has_bindless_images ? UINT16_MAX : MAX_IMAGES;
1492
1493 /* If we can use bindless for everything, claim a high per-stage limit,
1494 * otherwise use the binding table size, minus the slots reserved for
1495 * render targets and one slot for the descriptor buffer. */
1496 const uint32_t max_per_stage =
1497 pdevice->has_bindless_images && pdevice->has_a64_buffer_access
1498 ? UINT32_MAX : MAX_BINDING_TABLE_SIZE - MAX_RTS - 1;
1499
1500 /* Limit max_threads to 64 for the GPGPU_WALKER command */
1501 const uint32_t max_workgroup_size = 32 * MIN2(64, devinfo->max_cs_threads);
1502
1503 VkSampleCountFlags sample_counts =
1504 isl_device_get_sample_counts(&pdevice->isl_dev);
1505
1506
1507 VkPhysicalDeviceLimits limits = {
1508 .maxImageDimension1D = (1 << 14),
1509 .maxImageDimension2D = (1 << 14),
1510 .maxImageDimension3D = (1 << 11),
1511 .maxImageDimensionCube = (1 << 14),
1512 .maxImageArrayLayers = (1 << 11),
1513 .maxTexelBufferElements = 128 * 1024 * 1024,
1514 .maxUniformBufferRange = (1ul << 27),
1515 .maxStorageBufferRange = max_raw_buffer_sz,
1516 .maxPushConstantsSize = MAX_PUSH_CONSTANTS_SIZE,
1517 .maxMemoryAllocationCount = UINT32_MAX,
1518 .maxSamplerAllocationCount = 64 * 1024,
1519 .bufferImageGranularity = 64, /* A cache line */
1520 .sparseAddressSpaceSize = 0,
1521 .maxBoundDescriptorSets = MAX_SETS,
1522 .maxPerStageDescriptorSamplers = max_samplers,
1523 .maxPerStageDescriptorUniformBuffers = MAX_PER_STAGE_DESCRIPTOR_UNIFORM_BUFFERS,
1524 .maxPerStageDescriptorStorageBuffers = max_ssbos,
1525 .maxPerStageDescriptorSampledImages = max_textures,
1526 .maxPerStageDescriptorStorageImages = max_images,
1527 .maxPerStageDescriptorInputAttachments = MAX_PER_STAGE_DESCRIPTOR_INPUT_ATTACHMENTS,
1528 .maxPerStageResources = max_per_stage,
1529 .maxDescriptorSetSamplers = 6 * max_samplers, /* number of stages * maxPerStageDescriptorSamplers */
1530 .maxDescriptorSetUniformBuffers = 6 * MAX_PER_STAGE_DESCRIPTOR_UNIFORM_BUFFERS, /* number of stages * maxPerStageDescriptorUniformBuffers */
1531 .maxDescriptorSetUniformBuffersDynamic = MAX_DYNAMIC_BUFFERS / 2,
1532 .maxDescriptorSetStorageBuffers = 6 * max_ssbos, /* number of stages * maxPerStageDescriptorStorageBuffers */
1533 .maxDescriptorSetStorageBuffersDynamic = MAX_DYNAMIC_BUFFERS / 2,
1534 .maxDescriptorSetSampledImages = 6 * max_textures, /* number of stages * maxPerStageDescriptorSampledImages */
1535 .maxDescriptorSetStorageImages = 6 * max_images, /* number of stages * maxPerStageDescriptorStorageImages */
1536 .maxDescriptorSetInputAttachments = MAX_DESCRIPTOR_SET_INPUT_ATTACHMENTS,
1537 .maxVertexInputAttributes = MAX_VBS,
1538 .maxVertexInputBindings = MAX_VBS,
1539 .maxVertexInputAttributeOffset = 2047,
1540 .maxVertexInputBindingStride = 2048,
1541 .maxVertexOutputComponents = 128,
1542 .maxTessellationGenerationLevel = 64,
1543 .maxTessellationPatchSize = 32,
1544 .maxTessellationControlPerVertexInputComponents = 128,
1545 .maxTessellationControlPerVertexOutputComponents = 128,
1546 .maxTessellationControlPerPatchOutputComponents = 128,
1547 .maxTessellationControlTotalOutputComponents = 2048,
1548 .maxTessellationEvaluationInputComponents = 128,
1549 .maxTessellationEvaluationOutputComponents = 128,
1550 .maxGeometryShaderInvocations = 32,
1551 .maxGeometryInputComponents = 64,
1552 .maxGeometryOutputComponents = 128,
1553 .maxGeometryOutputVertices = 256,
1554 .maxGeometryTotalOutputComponents = 1024,
1555 .maxFragmentInputComponents = 116, /* 128 components - (PSIZ, CLIP_DIST0, CLIP_DIST1) */
1556 .maxFragmentOutputAttachments = 8,
1557 .maxFragmentDualSrcAttachments = 1,
1558 .maxFragmentCombinedOutputResources = 8,
1559 .maxComputeSharedMemorySize = 64 * 1024,
1560 .maxComputeWorkGroupCount = { 65535, 65535, 65535 },
1561 .maxComputeWorkGroupInvocations = max_workgroup_size,
1562 .maxComputeWorkGroupSize = {
1563 max_workgroup_size,
1564 max_workgroup_size,
1565 max_workgroup_size,
1566 },
1567 .subPixelPrecisionBits = 8,
1568 .subTexelPrecisionBits = 8,
1569 .mipmapPrecisionBits = 8,
1570 .maxDrawIndexedIndexValue = UINT32_MAX,
1571 .maxDrawIndirectCount = UINT32_MAX,
1572 .maxSamplerLodBias = 16,
1573 .maxSamplerAnisotropy = 16,
1574 .maxViewports = MAX_VIEWPORTS,
1575 .maxViewportDimensions = { (1 << 14), (1 << 14) },
1576 .viewportBoundsRange = { INT16_MIN, INT16_MAX },
1577 .viewportSubPixelBits = 13, /* We take a float? */
1578 .minMemoryMapAlignment = 4096, /* A page */
1579 /* The dataport requires texel alignment so we need to assume a worst
1580 * case of R32G32B32A32 which is 16 bytes.
1581 */
1582 .minTexelBufferOffsetAlignment = 16,
1583 .minUniformBufferOffsetAlignment = ANV_UBO_ALIGNMENT,
1584 .minStorageBufferOffsetAlignment = 4,
1585 .minTexelOffset = -8,
1586 .maxTexelOffset = 7,
1587 .minTexelGatherOffset = -32,
1588 .maxTexelGatherOffset = 31,
1589 .minInterpolationOffset = -0.5,
1590 .maxInterpolationOffset = 0.4375,
1591 .subPixelInterpolationOffsetBits = 4,
1592 .maxFramebufferWidth = (1 << 14),
1593 .maxFramebufferHeight = (1 << 14),
1594 .maxFramebufferLayers = (1 << 11),
1595 .framebufferColorSampleCounts = sample_counts,
1596 .framebufferDepthSampleCounts = sample_counts,
1597 .framebufferStencilSampleCounts = sample_counts,
1598 .framebufferNoAttachmentsSampleCounts = sample_counts,
1599 .maxColorAttachments = MAX_RTS,
1600 .sampledImageColorSampleCounts = sample_counts,
1601 .sampledImageIntegerSampleCounts = sample_counts,
1602 .sampledImageDepthSampleCounts = sample_counts,
1603 .sampledImageStencilSampleCounts = sample_counts,
1604 .storageImageSampleCounts = VK_SAMPLE_COUNT_1_BIT,
1605 .maxSampleMaskWords = 1,
1606 .timestampComputeAndGraphics = true,
1607 .timestampPeriod = 1000000000.0 / devinfo->timestamp_frequency,
1608 .maxClipDistances = 8,
1609 .maxCullDistances = 8,
1610 .maxCombinedClipAndCullDistances = 8,
1611 .discreteQueuePriorities = 2,
1612 .pointSizeRange = { 0.125, 255.875 },
1613 .lineWidthRange = {
1614 0.0,
1615 (devinfo->gen >= 9 || devinfo->is_cherryview) ?
1616 2047.9921875 : 7.9921875,
1617 },
1618 .pointSizeGranularity = (1.0 / 8.0),
1619 .lineWidthGranularity = (1.0 / 128.0),
1620 .strictLines = false,
1621 .standardSampleLocations = true,
1622 .optimalBufferCopyOffsetAlignment = 128,
1623 .optimalBufferCopyRowPitchAlignment = 128,
1624 .nonCoherentAtomSize = 64,
1625 };
1626
1627 *pProperties = (VkPhysicalDeviceProperties) {
1628 .apiVersion = anv_physical_device_api_version(pdevice),
1629 .driverVersion = vk_get_driver_version(),
1630 .vendorID = 0x8086,
1631 .deviceID = pdevice->info.chipset_id,
1632 .deviceType = VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU,
1633 .limits = limits,
1634 .sparseProperties = {0}, /* Broadwell doesn't do sparse. */
1635 };
1636
1637 snprintf(pProperties->deviceName, sizeof(pProperties->deviceName),
1638 "%s", pdevice->name);
1639 memcpy(pProperties->pipelineCacheUUID,
1640 pdevice->pipeline_cache_uuid, VK_UUID_SIZE);
1641 }
1642
1643 static void
1644 anv_get_physical_device_properties_1_1(struct anv_physical_device *pdevice,
1645 VkPhysicalDeviceVulkan11Properties *p)
1646 {
1647 assert(p->sType == VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_PROPERTIES);
1648
1649 memcpy(p->deviceUUID, pdevice->device_uuid, VK_UUID_SIZE);
1650 memcpy(p->driverUUID, pdevice->driver_uuid, VK_UUID_SIZE);
1651 memset(p->deviceLUID, 0, VK_LUID_SIZE);
1652 p->deviceNodeMask = 0;
1653 p->deviceLUIDValid = false;
1654
1655 p->subgroupSize = BRW_SUBGROUP_SIZE;
1656 VkShaderStageFlags scalar_stages = 0;
1657 for (unsigned stage = 0; stage < MESA_SHADER_STAGES; stage++) {
1658 if (pdevice->compiler->scalar_stage[stage])
1659 scalar_stages |= mesa_to_vk_shader_stage(stage);
1660 }
1661 p->subgroupSupportedStages = scalar_stages;
1662 p->subgroupSupportedOperations = VK_SUBGROUP_FEATURE_BASIC_BIT |
1663 VK_SUBGROUP_FEATURE_VOTE_BIT |
1664 VK_SUBGROUP_FEATURE_BALLOT_BIT |
1665 VK_SUBGROUP_FEATURE_SHUFFLE_BIT |
1666 VK_SUBGROUP_FEATURE_SHUFFLE_RELATIVE_BIT |
1667 VK_SUBGROUP_FEATURE_QUAD_BIT;
1668 if (pdevice->info.gen >= 8) {
1669 /* TODO: There's no technical reason why these can't be made to
1670 * work on gen7 but they don't at the moment so it's best to leave
1671 * the feature disabled than enabled and broken.
1672 */
1673 p->subgroupSupportedOperations |= VK_SUBGROUP_FEATURE_ARITHMETIC_BIT |
1674 VK_SUBGROUP_FEATURE_CLUSTERED_BIT;
1675 }
1676 p->subgroupQuadOperationsInAllStages = pdevice->info.gen >= 8;
1677
1678 p->pointClippingBehavior = VK_POINT_CLIPPING_BEHAVIOR_USER_CLIP_PLANES_ONLY;
1679 p->maxMultiviewViewCount = 16;
1680 p->maxMultiviewInstanceIndex = UINT32_MAX / 16;
1681 p->protectedNoFault = false;
1682 /* This value doesn't matter for us today as our per-stage descriptors are
1683 * the real limit.
1684 */
1685 p->maxPerSetDescriptors = 1024;
1686 p->maxMemoryAllocationSize = MAX_MEMORY_ALLOCATION_SIZE;
1687 }
1688
1689 static void
1690 anv_get_physical_device_properties_1_2(struct anv_physical_device *pdevice,
1691 VkPhysicalDeviceVulkan12Properties *p)
1692 {
1693 assert(p->sType == VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_PROPERTIES);
1694
1695 p->driverID = VK_DRIVER_ID_INTEL_OPEN_SOURCE_MESA_KHR;
1696 memset(p->driverName, 0, sizeof(p->driverName));
1697 snprintf(p->driverName, VK_MAX_DRIVER_NAME_SIZE_KHR,
1698 "Intel open-source Mesa driver");
1699 memset(p->driverInfo, 0, sizeof(p->driverInfo));
1700 snprintf(p->driverInfo, VK_MAX_DRIVER_INFO_SIZE_KHR,
1701 "Mesa " PACKAGE_VERSION MESA_GIT_SHA1);
1702 p->conformanceVersion = (VkConformanceVersionKHR) {
1703 .major = 1,
1704 .minor = 2,
1705 .subminor = 0,
1706 .patch = 0,
1707 };
1708
1709 p->denormBehaviorIndependence =
1710 VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_ALL_KHR;
1711 p->roundingModeIndependence =
1712 VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_NONE_KHR;
1713
1714 /* Broadwell does not support HF denorms and there are restrictions
1715 * other gens. According to Kabylake's PRM:
1716 *
1717 * "math - Extended Math Function
1718 * [...]
1719 * Restriction : Half-float denorms are always retained."
1720 */
1721 p->shaderDenormFlushToZeroFloat16 = false;
1722 p->shaderDenormPreserveFloat16 = pdevice->info.gen > 8;
1723 p->shaderRoundingModeRTEFloat16 = true;
1724 p->shaderRoundingModeRTZFloat16 = true;
1725 p->shaderSignedZeroInfNanPreserveFloat16 = true;
1726
1727 p->shaderDenormFlushToZeroFloat32 = true;
1728 p->shaderDenormPreserveFloat32 = true;
1729 p->shaderRoundingModeRTEFloat32 = true;
1730 p->shaderRoundingModeRTZFloat32 = true;
1731 p->shaderSignedZeroInfNanPreserveFloat32 = true;
1732
1733 p->shaderDenormFlushToZeroFloat64 = true;
1734 p->shaderDenormPreserveFloat64 = true;
1735 p->shaderRoundingModeRTEFloat64 = true;
1736 p->shaderRoundingModeRTZFloat64 = true;
1737 p->shaderSignedZeroInfNanPreserveFloat64 = true;
1738
1739 /* It's a bit hard to exactly map our implementation to the limits
1740 * described here. The bindless surface handle in the extended
1741 * message descriptors is 20 bits and it's an index into the table of
1742 * RENDER_SURFACE_STATE structs that starts at bindless surface base
1743 * address. Given that most things consume two surface states per
1744 * view (general/sampled for textures and write-only/read-write for
1745 * images), we claim 2^19 things.
1746 *
1747 * For SSBOs, we just use A64 messages so there is no real limit
1748 * there beyond the limit on the total size of a descriptor set.
1749 */
1750 const unsigned max_bindless_views = 1 << 19;
1751 p->maxUpdateAfterBindDescriptorsInAllPools = max_bindless_views;
1752 p->shaderUniformBufferArrayNonUniformIndexingNative = false;
1753 p->shaderSampledImageArrayNonUniformIndexingNative = false;
1754 p->shaderStorageBufferArrayNonUniformIndexingNative = true;
1755 p->shaderStorageImageArrayNonUniformIndexingNative = false;
1756 p->shaderInputAttachmentArrayNonUniformIndexingNative = false;
1757 p->robustBufferAccessUpdateAfterBind = true;
1758 p->quadDivergentImplicitLod = false;
1759 p->maxPerStageDescriptorUpdateAfterBindSamplers = max_bindless_views;
1760 p->maxPerStageDescriptorUpdateAfterBindUniformBuffers = MAX_PER_STAGE_DESCRIPTOR_UNIFORM_BUFFERS;
1761 p->maxPerStageDescriptorUpdateAfterBindStorageBuffers = UINT32_MAX;
1762 p->maxPerStageDescriptorUpdateAfterBindSampledImages = max_bindless_views;
1763 p->maxPerStageDescriptorUpdateAfterBindStorageImages = max_bindless_views;
1764 p->maxPerStageDescriptorUpdateAfterBindInputAttachments = MAX_PER_STAGE_DESCRIPTOR_INPUT_ATTACHMENTS;
1765 p->maxPerStageUpdateAfterBindResources = UINT32_MAX;
1766 p->maxDescriptorSetUpdateAfterBindSamplers = max_bindless_views;
1767 p->maxDescriptorSetUpdateAfterBindUniformBuffers = 6 * MAX_PER_STAGE_DESCRIPTOR_UNIFORM_BUFFERS;
1768 p->maxDescriptorSetUpdateAfterBindUniformBuffersDynamic = MAX_DYNAMIC_BUFFERS / 2;
1769 p->maxDescriptorSetUpdateAfterBindStorageBuffers = UINT32_MAX;
1770 p->maxDescriptorSetUpdateAfterBindStorageBuffersDynamic = MAX_DYNAMIC_BUFFERS / 2;
1771 p->maxDescriptorSetUpdateAfterBindSampledImages = max_bindless_views;
1772 p->maxDescriptorSetUpdateAfterBindStorageImages = max_bindless_views;
1773 p->maxDescriptorSetUpdateAfterBindInputAttachments = MAX_DESCRIPTOR_SET_INPUT_ATTACHMENTS;
1774
1775 /* We support all of the depth resolve modes */
1776 p->supportedDepthResolveModes = VK_RESOLVE_MODE_SAMPLE_ZERO_BIT_KHR |
1777 VK_RESOLVE_MODE_AVERAGE_BIT_KHR |
1778 VK_RESOLVE_MODE_MIN_BIT_KHR |
1779 VK_RESOLVE_MODE_MAX_BIT_KHR;
1780 /* Average doesn't make sense for stencil so we don't support that */
1781 p->supportedStencilResolveModes = VK_RESOLVE_MODE_SAMPLE_ZERO_BIT_KHR;
1782 if (pdevice->info.gen >= 8) {
1783 /* The advanced stencil resolve modes currently require stencil
1784 * sampling be supported by the hardware.
1785 */
1786 p->supportedStencilResolveModes |= VK_RESOLVE_MODE_MIN_BIT_KHR |
1787 VK_RESOLVE_MODE_MAX_BIT_KHR;
1788 }
1789 p->independentResolveNone = true;
1790 p->independentResolve = true;
1791
1792 p->filterMinmaxSingleComponentFormats = pdevice->info.gen >= 9;
1793 p->filterMinmaxImageComponentMapping = pdevice->info.gen >= 9;
1794
1795 p->maxTimelineSemaphoreValueDifference = UINT64_MAX;
1796
1797 p->framebufferIntegerColorSampleCounts =
1798 isl_device_get_sample_counts(&pdevice->isl_dev);
1799 }
1800
1801 void anv_GetPhysicalDeviceProperties2(
1802 VkPhysicalDevice physicalDevice,
1803 VkPhysicalDeviceProperties2* pProperties)
1804 {
1805 ANV_FROM_HANDLE(anv_physical_device, pdevice, physicalDevice);
1806
1807 anv_GetPhysicalDeviceProperties(physicalDevice, &pProperties->properties);
1808
1809 VkPhysicalDeviceVulkan11Properties core_1_1 = {
1810 .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_PROPERTIES,
1811 };
1812 anv_get_physical_device_properties_1_1(pdevice, &core_1_1);
1813
1814 VkPhysicalDeviceVulkan12Properties core_1_2 = {
1815 .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_PROPERTIES,
1816 };
1817 anv_get_physical_device_properties_1_2(pdevice, &core_1_2);
1818
1819 #define CORE_RENAMED_PROPERTY(major, minor, ext_property, core_property) \
1820 memcpy(&properties->ext_property, &core_##major##_##minor.core_property, \
1821 sizeof(core_##major##_##minor.core_property))
1822
1823 #define CORE_PROPERTY(major, minor, property) \
1824 CORE_RENAMED_PROPERTY(major, minor, property, property)
1825
1826 vk_foreach_struct(ext, pProperties->pNext) {
1827 switch (ext->sType) {
1828 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CUSTOM_BORDER_COLOR_PROPERTIES_EXT: {
1829 VkPhysicalDeviceCustomBorderColorPropertiesEXT *properties =
1830 (VkPhysicalDeviceCustomBorderColorPropertiesEXT *)ext;
1831 properties->maxCustomBorderColorSamplers = MAX_CUSTOM_BORDER_COLORS;
1832 break;
1833 }
1834
1835 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_STENCIL_RESOLVE_PROPERTIES_KHR: {
1836 VkPhysicalDeviceDepthStencilResolvePropertiesKHR *properties =
1837 (VkPhysicalDeviceDepthStencilResolvePropertiesKHR *)ext;
1838 CORE_PROPERTY(1, 2, supportedDepthResolveModes);
1839 CORE_PROPERTY(1, 2, supportedStencilResolveModes);
1840 CORE_PROPERTY(1, 2, independentResolveNone);
1841 CORE_PROPERTY(1, 2, independentResolve);
1842 break;
1843 }
1844
1845 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_PROPERTIES_EXT: {
1846 VkPhysicalDeviceDescriptorIndexingPropertiesEXT *properties =
1847 (VkPhysicalDeviceDescriptorIndexingPropertiesEXT *)ext;
1848 CORE_PROPERTY(1, 2, maxUpdateAfterBindDescriptorsInAllPools);
1849 CORE_PROPERTY(1, 2, shaderUniformBufferArrayNonUniformIndexingNative);
1850 CORE_PROPERTY(1, 2, shaderSampledImageArrayNonUniformIndexingNative);
1851 CORE_PROPERTY(1, 2, shaderStorageBufferArrayNonUniformIndexingNative);
1852 CORE_PROPERTY(1, 2, shaderStorageImageArrayNonUniformIndexingNative);
1853 CORE_PROPERTY(1, 2, shaderInputAttachmentArrayNonUniformIndexingNative);
1854 CORE_PROPERTY(1, 2, robustBufferAccessUpdateAfterBind);
1855 CORE_PROPERTY(1, 2, quadDivergentImplicitLod);
1856 CORE_PROPERTY(1, 2, maxPerStageDescriptorUpdateAfterBindSamplers);
1857 CORE_PROPERTY(1, 2, maxPerStageDescriptorUpdateAfterBindUniformBuffers);
1858 CORE_PROPERTY(1, 2, maxPerStageDescriptorUpdateAfterBindStorageBuffers);
1859 CORE_PROPERTY(1, 2, maxPerStageDescriptorUpdateAfterBindSampledImages);
1860 CORE_PROPERTY(1, 2, maxPerStageDescriptorUpdateAfterBindStorageImages);
1861 CORE_PROPERTY(1, 2, maxPerStageDescriptorUpdateAfterBindInputAttachments);
1862 CORE_PROPERTY(1, 2, maxPerStageUpdateAfterBindResources);
1863 CORE_PROPERTY(1, 2, maxDescriptorSetUpdateAfterBindSamplers);
1864 CORE_PROPERTY(1, 2, maxDescriptorSetUpdateAfterBindUniformBuffers);
1865 CORE_PROPERTY(1, 2, maxDescriptorSetUpdateAfterBindUniformBuffersDynamic);
1866 CORE_PROPERTY(1, 2, maxDescriptorSetUpdateAfterBindStorageBuffers);
1867 CORE_PROPERTY(1, 2, maxDescriptorSetUpdateAfterBindStorageBuffersDynamic);
1868 CORE_PROPERTY(1, 2, maxDescriptorSetUpdateAfterBindSampledImages);
1869 CORE_PROPERTY(1, 2, maxDescriptorSetUpdateAfterBindStorageImages);
1870 CORE_PROPERTY(1, 2, maxDescriptorSetUpdateAfterBindInputAttachments);
1871 break;
1872 }
1873
1874 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DRIVER_PROPERTIES_KHR: {
1875 VkPhysicalDeviceDriverPropertiesKHR *properties =
1876 (VkPhysicalDeviceDriverPropertiesKHR *) ext;
1877 CORE_PROPERTY(1, 2, driverID);
1878 CORE_PROPERTY(1, 2, driverName);
1879 CORE_PROPERTY(1, 2, driverInfo);
1880 CORE_PROPERTY(1, 2, conformanceVersion);
1881 break;
1882 }
1883
1884 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_MEMORY_HOST_PROPERTIES_EXT: {
1885 VkPhysicalDeviceExternalMemoryHostPropertiesEXT *props =
1886 (VkPhysicalDeviceExternalMemoryHostPropertiesEXT *) ext;
1887 /* Userptr needs page aligned memory. */
1888 props->minImportedHostPointerAlignment = 4096;
1889 break;
1890 }
1891
1892 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES: {
1893 VkPhysicalDeviceIDProperties *properties =
1894 (VkPhysicalDeviceIDProperties *)ext;
1895 CORE_PROPERTY(1, 1, deviceUUID);
1896 CORE_PROPERTY(1, 1, driverUUID);
1897 CORE_PROPERTY(1, 1, deviceLUID);
1898 CORE_PROPERTY(1, 1, deviceLUIDValid);
1899 break;
1900 }
1901
1902 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_PROPERTIES_EXT: {
1903 VkPhysicalDeviceInlineUniformBlockPropertiesEXT *props =
1904 (VkPhysicalDeviceInlineUniformBlockPropertiesEXT *)ext;
1905 props->maxInlineUniformBlockSize = MAX_INLINE_UNIFORM_BLOCK_SIZE;
1906 props->maxPerStageDescriptorInlineUniformBlocks =
1907 MAX_INLINE_UNIFORM_BLOCK_DESCRIPTORS;
1908 props->maxPerStageDescriptorUpdateAfterBindInlineUniformBlocks =
1909 MAX_INLINE_UNIFORM_BLOCK_DESCRIPTORS;
1910 props->maxDescriptorSetInlineUniformBlocks =
1911 MAX_INLINE_UNIFORM_BLOCK_DESCRIPTORS;
1912 props->maxDescriptorSetUpdateAfterBindInlineUniformBlocks =
1913 MAX_INLINE_UNIFORM_BLOCK_DESCRIPTORS;
1914 break;
1915 }
1916
1917 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_PROPERTIES_EXT: {
1918 VkPhysicalDeviceLineRasterizationPropertiesEXT *props =
1919 (VkPhysicalDeviceLineRasterizationPropertiesEXT *)ext;
1920 /* In the Skylake PRM Vol. 7, subsection titled "GIQ (Diamond)
1921 * Sampling Rules - Legacy Mode", it says the following:
1922 *
1923 * "Note that the device divides a pixel into a 16x16 array of
1924 * subpixels, referenced by their upper left corners."
1925 *
1926 * This is the only known reference in the PRMs to the subpixel
1927 * precision of line rasterization and a "16x16 array of subpixels"
1928 * implies 4 subpixel precision bits. Empirical testing has shown
1929 * that 4 subpixel precision bits applies to all line rasterization
1930 * types.
1931 */
1932 props->lineSubPixelPrecisionBits = 4;
1933 break;
1934 }
1935
1936 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES: {
1937 VkPhysicalDeviceMaintenance3Properties *properties =
1938 (VkPhysicalDeviceMaintenance3Properties *)ext;
1939 /* This value doesn't matter for us today as our per-stage
1940 * descriptors are the real limit.
1941 */
1942 CORE_PROPERTY(1, 1, maxPerSetDescriptors);
1943 CORE_PROPERTY(1, 1, maxMemoryAllocationSize);
1944 break;
1945 }
1946
1947 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES: {
1948 VkPhysicalDeviceMultiviewProperties *properties =
1949 (VkPhysicalDeviceMultiviewProperties *)ext;
1950 CORE_PROPERTY(1, 1, maxMultiviewViewCount);
1951 CORE_PROPERTY(1, 1, maxMultiviewInstanceIndex);
1952 break;
1953 }
1954
1955 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PCI_BUS_INFO_PROPERTIES_EXT: {
1956 VkPhysicalDevicePCIBusInfoPropertiesEXT *properties =
1957 (VkPhysicalDevicePCIBusInfoPropertiesEXT *)ext;
1958 properties->pciDomain = pdevice->pci_info.domain;
1959 properties->pciBus = pdevice->pci_info.bus;
1960 properties->pciDevice = pdevice->pci_info.device;
1961 properties->pciFunction = pdevice->pci_info.function;
1962 break;
1963 }
1964
1965 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PERFORMANCE_QUERY_PROPERTIES_KHR: {
1966 VkPhysicalDevicePerformanceQueryPropertiesKHR *properties =
1967 (VkPhysicalDevicePerformanceQueryPropertiesKHR *)ext;
1968 /* We could support this by spawning a shader to do the equation
1969 * normalization.
1970 */
1971 properties->allowCommandBufferQueryCopies = false;
1972 break;
1973 }
1974
1975 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES: {
1976 VkPhysicalDevicePointClippingProperties *properties =
1977 (VkPhysicalDevicePointClippingProperties *) ext;
1978 CORE_PROPERTY(1, 1, pointClippingBehavior);
1979 break;
1980 }
1981
1982 #pragma GCC diagnostic push
1983 #pragma GCC diagnostic ignored "-Wswitch"
1984 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRESENTATION_PROPERTIES_ANDROID: {
1985 VkPhysicalDevicePresentationPropertiesANDROID *props =
1986 (VkPhysicalDevicePresentationPropertiesANDROID *)ext;
1987 props->sharedImage = VK_FALSE;
1988 break;
1989 }
1990 #pragma GCC diagnostic pop
1991
1992 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_PROPERTIES: {
1993 VkPhysicalDeviceProtectedMemoryProperties *properties =
1994 (VkPhysicalDeviceProtectedMemoryProperties *)ext;
1995 CORE_PROPERTY(1, 1, protectedNoFault);
1996 break;
1997 }
1998
1999 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR: {
2000 VkPhysicalDevicePushDescriptorPropertiesKHR *properties =
2001 (VkPhysicalDevicePushDescriptorPropertiesKHR *) ext;
2002 properties->maxPushDescriptors = MAX_PUSH_DESCRIPTORS;
2003 break;
2004 }
2005
2006 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ROBUSTNESS_2_PROPERTIES_EXT: {
2007 VkPhysicalDeviceRobustness2PropertiesEXT *properties = (void *)ext;
2008 properties->robustStorageBufferAccessSizeAlignment =
2009 ANV_SSBO_BOUNDS_CHECK_ALIGNMENT;
2010 properties->robustUniformBufferAccessSizeAlignment =
2011 ANV_UBO_ALIGNMENT;
2012 break;
2013 }
2014
2015 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_FILTER_MINMAX_PROPERTIES_EXT: {
2016 VkPhysicalDeviceSamplerFilterMinmaxPropertiesEXT *properties =
2017 (VkPhysicalDeviceSamplerFilterMinmaxPropertiesEXT *)ext;
2018 CORE_PROPERTY(1, 2, filterMinmaxImageComponentMapping);
2019 CORE_PROPERTY(1, 2, filterMinmaxSingleComponentFormats);
2020 break;
2021 }
2022
2023 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_PROPERTIES: {
2024 VkPhysicalDeviceSubgroupProperties *properties = (void *)ext;
2025 CORE_PROPERTY(1, 1, subgroupSize);
2026 CORE_RENAMED_PROPERTY(1, 1, supportedStages,
2027 subgroupSupportedStages);
2028 CORE_RENAMED_PROPERTY(1, 1, supportedOperations,
2029 subgroupSupportedOperations);
2030 CORE_RENAMED_PROPERTY(1, 1, quadOperationsInAllStages,
2031 subgroupQuadOperationsInAllStages);
2032 break;
2033 }
2034
2035 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES_EXT: {
2036 VkPhysicalDeviceSubgroupSizeControlPropertiesEXT *props =
2037 (VkPhysicalDeviceSubgroupSizeControlPropertiesEXT *)ext;
2038 STATIC_ASSERT(8 <= BRW_SUBGROUP_SIZE && BRW_SUBGROUP_SIZE <= 32);
2039 props->minSubgroupSize = 8;
2040 props->maxSubgroupSize = 32;
2041 props->maxComputeWorkgroupSubgroups = pdevice->info.max_cs_threads;
2042 props->requiredSubgroupSizeStages = VK_SHADER_STAGE_COMPUTE_BIT;
2043 break;
2044 }
2045 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FLOAT_CONTROLS_PROPERTIES_KHR : {
2046 VkPhysicalDeviceFloatControlsPropertiesKHR *properties = (void *)ext;
2047 CORE_PROPERTY(1, 2, denormBehaviorIndependence);
2048 CORE_PROPERTY(1, 2, roundingModeIndependence);
2049 CORE_PROPERTY(1, 2, shaderDenormFlushToZeroFloat16);
2050 CORE_PROPERTY(1, 2, shaderDenormPreserveFloat16);
2051 CORE_PROPERTY(1, 2, shaderRoundingModeRTEFloat16);
2052 CORE_PROPERTY(1, 2, shaderRoundingModeRTZFloat16);
2053 CORE_PROPERTY(1, 2, shaderSignedZeroInfNanPreserveFloat16);
2054 CORE_PROPERTY(1, 2, shaderDenormFlushToZeroFloat32);
2055 CORE_PROPERTY(1, 2, shaderDenormPreserveFloat32);
2056 CORE_PROPERTY(1, 2, shaderRoundingModeRTEFloat32);
2057 CORE_PROPERTY(1, 2, shaderRoundingModeRTZFloat32);
2058 CORE_PROPERTY(1, 2, shaderSignedZeroInfNanPreserveFloat32);
2059 CORE_PROPERTY(1, 2, shaderDenormFlushToZeroFloat64);
2060 CORE_PROPERTY(1, 2, shaderDenormPreserveFloat64);
2061 CORE_PROPERTY(1, 2, shaderRoundingModeRTEFloat64);
2062 CORE_PROPERTY(1, 2, shaderRoundingModeRTZFloat64);
2063 CORE_PROPERTY(1, 2, shaderSignedZeroInfNanPreserveFloat64);
2064 break;
2065 }
2066
2067 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_PROPERTIES_EXT: {
2068 VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT *props =
2069 (VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT *)ext;
2070
2071 /* From the SKL PRM Vol. 2d, docs for RENDER_SURFACE_STATE::Surface
2072 * Base Address:
2073 *
2074 * "For SURFTYPE_BUFFER non-rendertarget surfaces, this field
2075 * specifies the base address of the first element of the surface,
2076 * computed in software by adding the surface base address to the
2077 * byte offset of the element in the buffer. The base address must
2078 * be aligned to element size."
2079 *
2080 * The typed dataport messages require that things be texel aligned.
2081 * Otherwise, we may just load/store the wrong data or, in the worst
2082 * case, there may be hangs.
2083 */
2084 props->storageTexelBufferOffsetAlignmentBytes = 16;
2085 props->storageTexelBufferOffsetSingleTexelAlignment = true;
2086
2087 /* The sampler, however, is much more forgiving and it can handle
2088 * arbitrary byte alignment for linear and buffer surfaces. It's
2089 * hard to find a good PRM citation for this but years of empirical
2090 * experience demonstrate that this is true.
2091 */
2092 props->uniformTexelBufferOffsetAlignmentBytes = 1;
2093 props->uniformTexelBufferOffsetSingleTexelAlignment = false;
2094 break;
2095 }
2096
2097 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_PROPERTIES_KHR: {
2098 VkPhysicalDeviceTimelineSemaphorePropertiesKHR *properties =
2099 (VkPhysicalDeviceTimelineSemaphorePropertiesKHR *) ext;
2100 CORE_PROPERTY(1, 2, maxTimelineSemaphoreValueDifference);
2101 break;
2102 }
2103
2104 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_PROPERTIES_EXT: {
2105 VkPhysicalDeviceTransformFeedbackPropertiesEXT *props =
2106 (VkPhysicalDeviceTransformFeedbackPropertiesEXT *)ext;
2107
2108 props->maxTransformFeedbackStreams = MAX_XFB_STREAMS;
2109 props->maxTransformFeedbackBuffers = MAX_XFB_BUFFERS;
2110 props->maxTransformFeedbackBufferSize = (1ull << 32);
2111 props->maxTransformFeedbackStreamDataSize = 128 * 4;
2112 props->maxTransformFeedbackBufferDataSize = 128 * 4;
2113 props->maxTransformFeedbackBufferDataStride = 2048;
2114 props->transformFeedbackQueries = true;
2115 props->transformFeedbackStreamsLinesTriangles = false;
2116 props->transformFeedbackRasterizationStreamSelect = false;
2117 props->transformFeedbackDraw = true;
2118 break;
2119 }
2120
2121 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_PROPERTIES_EXT: {
2122 VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT *props =
2123 (VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT *)ext;
2124 /* We have to restrict this a bit for multiview */
2125 props->maxVertexAttribDivisor = UINT32_MAX / 16;
2126 break;
2127 }
2128
2129 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_PROPERTIES:
2130 anv_get_physical_device_properties_1_1(pdevice, (void *)ext);
2131 break;
2132
2133 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_PROPERTIES:
2134 anv_get_physical_device_properties_1_2(pdevice, (void *)ext);
2135 break;
2136
2137 default:
2138 anv_debug_ignored_stype(ext->sType);
2139 break;
2140 }
2141 }
2142
2143 #undef CORE_RENAMED_PROPERTY
2144 #undef CORE_PROPERTY
2145 }
2146
2147 /* We support exactly one queue family. */
2148 static const VkQueueFamilyProperties
2149 anv_queue_family_properties = {
2150 .queueFlags = VK_QUEUE_GRAPHICS_BIT |
2151 VK_QUEUE_COMPUTE_BIT |
2152 VK_QUEUE_TRANSFER_BIT,
2153 .queueCount = 1,
2154 .timestampValidBits = 36, /* XXX: Real value here */
2155 .minImageTransferGranularity = { 1, 1, 1 },
2156 };
2157
2158 void anv_GetPhysicalDeviceQueueFamilyProperties(
2159 VkPhysicalDevice physicalDevice,
2160 uint32_t* pCount,
2161 VkQueueFamilyProperties* pQueueFamilyProperties)
2162 {
2163 VK_OUTARRAY_MAKE(out, pQueueFamilyProperties, pCount);
2164
2165 vk_outarray_append(&out, p) {
2166 *p = anv_queue_family_properties;
2167 }
2168 }
2169
2170 void anv_GetPhysicalDeviceQueueFamilyProperties2(
2171 VkPhysicalDevice physicalDevice,
2172 uint32_t* pQueueFamilyPropertyCount,
2173 VkQueueFamilyProperties2* pQueueFamilyProperties)
2174 {
2175
2176 VK_OUTARRAY_MAKE(out, pQueueFamilyProperties, pQueueFamilyPropertyCount);
2177
2178 vk_outarray_append(&out, p) {
2179 p->queueFamilyProperties = anv_queue_family_properties;
2180
2181 vk_foreach_struct(s, p->pNext) {
2182 anv_debug_ignored_stype(s->sType);
2183 }
2184 }
2185 }
2186
2187 void anv_GetPhysicalDeviceMemoryProperties(
2188 VkPhysicalDevice physicalDevice,
2189 VkPhysicalDeviceMemoryProperties* pMemoryProperties)
2190 {
2191 ANV_FROM_HANDLE(anv_physical_device, physical_device, physicalDevice);
2192
2193 pMemoryProperties->memoryTypeCount = physical_device->memory.type_count;
2194 for (uint32_t i = 0; i < physical_device->memory.type_count; i++) {
2195 pMemoryProperties->memoryTypes[i] = (VkMemoryType) {
2196 .propertyFlags = physical_device->memory.types[i].propertyFlags,
2197 .heapIndex = physical_device->memory.types[i].heapIndex,
2198 };
2199 }
2200
2201 pMemoryProperties->memoryHeapCount = physical_device->memory.heap_count;
2202 for (uint32_t i = 0; i < physical_device->memory.heap_count; i++) {
2203 pMemoryProperties->memoryHeaps[i] = (VkMemoryHeap) {
2204 .size = physical_device->memory.heaps[i].size,
2205 .flags = physical_device->memory.heaps[i].flags,
2206 };
2207 }
2208 }
2209
2210 static void
2211 anv_get_memory_budget(VkPhysicalDevice physicalDevice,
2212 VkPhysicalDeviceMemoryBudgetPropertiesEXT *memoryBudget)
2213 {
2214 ANV_FROM_HANDLE(anv_physical_device, device, physicalDevice);
2215 uint64_t sys_available = get_available_system_memory();
2216 assert(sys_available > 0);
2217
2218 VkDeviceSize total_heaps_size = 0;
2219 for (size_t i = 0; i < device->memory.heap_count; i++)
2220 total_heaps_size += device->memory.heaps[i].size;
2221
2222 for (size_t i = 0; i < device->memory.heap_count; i++) {
2223 VkDeviceSize heap_size = device->memory.heaps[i].size;
2224 VkDeviceSize heap_used = device->memory.heaps[i].used;
2225 VkDeviceSize heap_budget;
2226
2227 double heap_proportion = (double) heap_size / total_heaps_size;
2228 VkDeviceSize sys_available_prop = sys_available * heap_proportion;
2229
2230 /*
2231 * Let's not incite the app to starve the system: report at most 90% of
2232 * available system memory.
2233 */
2234 uint64_t heap_available = sys_available_prop * 9 / 10;
2235 heap_budget = MIN2(heap_size, heap_used + heap_available);
2236
2237 /*
2238 * Round down to the nearest MB
2239 */
2240 heap_budget &= ~((1ull << 20) - 1);
2241
2242 /*
2243 * The heapBudget value must be non-zero for array elements less than
2244 * VkPhysicalDeviceMemoryProperties::memoryHeapCount. The heapBudget
2245 * value must be less than or equal to VkMemoryHeap::size for each heap.
2246 */
2247 assert(0 < heap_budget && heap_budget <= heap_size);
2248
2249 memoryBudget->heapUsage[i] = heap_used;
2250 memoryBudget->heapBudget[i] = heap_budget;
2251 }
2252
2253 /* The heapBudget and heapUsage values must be zero for array elements
2254 * greater than or equal to VkPhysicalDeviceMemoryProperties::memoryHeapCount
2255 */
2256 for (uint32_t i = device->memory.heap_count; i < VK_MAX_MEMORY_HEAPS; i++) {
2257 memoryBudget->heapBudget[i] = 0;
2258 memoryBudget->heapUsage[i] = 0;
2259 }
2260 }
2261
2262 void anv_GetPhysicalDeviceMemoryProperties2(
2263 VkPhysicalDevice physicalDevice,
2264 VkPhysicalDeviceMemoryProperties2* pMemoryProperties)
2265 {
2266 anv_GetPhysicalDeviceMemoryProperties(physicalDevice,
2267 &pMemoryProperties->memoryProperties);
2268
2269 vk_foreach_struct(ext, pMemoryProperties->pNext) {
2270 switch (ext->sType) {
2271 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_BUDGET_PROPERTIES_EXT:
2272 anv_get_memory_budget(physicalDevice, (void*)ext);
2273 break;
2274 default:
2275 anv_debug_ignored_stype(ext->sType);
2276 break;
2277 }
2278 }
2279 }
2280
2281 void
2282 anv_GetDeviceGroupPeerMemoryFeatures(
2283 VkDevice device,
2284 uint32_t heapIndex,
2285 uint32_t localDeviceIndex,
2286 uint32_t remoteDeviceIndex,
2287 VkPeerMemoryFeatureFlags* pPeerMemoryFeatures)
2288 {
2289 assert(localDeviceIndex == 0 && remoteDeviceIndex == 0);
2290 *pPeerMemoryFeatures = VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT |
2291 VK_PEER_MEMORY_FEATURE_COPY_DST_BIT |
2292 VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT |
2293 VK_PEER_MEMORY_FEATURE_GENERIC_DST_BIT;
2294 }
2295
2296 PFN_vkVoidFunction anv_GetInstanceProcAddr(
2297 VkInstance _instance,
2298 const char* pName)
2299 {
2300 ANV_FROM_HANDLE(anv_instance, instance, _instance);
2301
2302 /* The Vulkan 1.0 spec for vkGetInstanceProcAddr has a table of exactly
2303 * when we have to return valid function pointers, NULL, or it's left
2304 * undefined. See the table for exact details.
2305 */
2306 if (pName == NULL)
2307 return NULL;
2308
2309 #define LOOKUP_ANV_ENTRYPOINT(entrypoint) \
2310 if (strcmp(pName, "vk" #entrypoint) == 0) \
2311 return (PFN_vkVoidFunction)anv_##entrypoint
2312
2313 LOOKUP_ANV_ENTRYPOINT(EnumerateInstanceExtensionProperties);
2314 LOOKUP_ANV_ENTRYPOINT(EnumerateInstanceLayerProperties);
2315 LOOKUP_ANV_ENTRYPOINT(EnumerateInstanceVersion);
2316 LOOKUP_ANV_ENTRYPOINT(CreateInstance);
2317
2318 /* GetInstanceProcAddr() can also be called with a NULL instance.
2319 * See https://gitlab.khronos.org/vulkan/vulkan/issues/2057
2320 */
2321 LOOKUP_ANV_ENTRYPOINT(GetInstanceProcAddr);
2322
2323 #undef LOOKUP_ANV_ENTRYPOINT
2324
2325 if (instance == NULL)
2326 return NULL;
2327
2328 int idx = anv_get_instance_entrypoint_index(pName);
2329 if (idx >= 0)
2330 return instance->dispatch.entrypoints[idx];
2331
2332 idx = anv_get_physical_device_entrypoint_index(pName);
2333 if (idx >= 0)
2334 return instance->physical_device_dispatch.entrypoints[idx];
2335
2336 idx = anv_get_device_entrypoint_index(pName);
2337 if (idx >= 0)
2338 return instance->device_dispatch.entrypoints[idx];
2339
2340 return NULL;
2341 }
2342
2343 /* With version 1+ of the loader interface the ICD should expose
2344 * vk_icdGetInstanceProcAddr to work around certain LD_PRELOAD issues seen in apps.
2345 */
2346 PUBLIC
2347 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vk_icdGetInstanceProcAddr(
2348 VkInstance instance,
2349 const char* pName);
2350
2351 PUBLIC
2352 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vk_icdGetInstanceProcAddr(
2353 VkInstance instance,
2354 const char* pName)
2355 {
2356 return anv_GetInstanceProcAddr(instance, pName);
2357 }
2358
2359 PFN_vkVoidFunction anv_GetDeviceProcAddr(
2360 VkDevice _device,
2361 const char* pName)
2362 {
2363 ANV_FROM_HANDLE(anv_device, device, _device);
2364
2365 if (!device || !pName)
2366 return NULL;
2367
2368 int idx = anv_get_device_entrypoint_index(pName);
2369 if (idx < 0)
2370 return NULL;
2371
2372 return device->dispatch.entrypoints[idx];
2373 }
2374
2375 /* With version 4+ of the loader interface the ICD should expose
2376 * vk_icdGetPhysicalDeviceProcAddr()
2377 */
2378 PUBLIC
2379 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vk_icdGetPhysicalDeviceProcAddr(
2380 VkInstance _instance,
2381 const char* pName);
2382
2383 PFN_vkVoidFunction vk_icdGetPhysicalDeviceProcAddr(
2384 VkInstance _instance,
2385 const char* pName)
2386 {
2387 ANV_FROM_HANDLE(anv_instance, instance, _instance);
2388
2389 if (!pName || !instance)
2390 return NULL;
2391
2392 int idx = anv_get_physical_device_entrypoint_index(pName);
2393 if (idx < 0)
2394 return NULL;
2395
2396 return instance->physical_device_dispatch.entrypoints[idx];
2397 }
2398
2399
2400 VkResult
2401 anv_CreateDebugReportCallbackEXT(VkInstance _instance,
2402 const VkDebugReportCallbackCreateInfoEXT* pCreateInfo,
2403 const VkAllocationCallbacks* pAllocator,
2404 VkDebugReportCallbackEXT* pCallback)
2405 {
2406 ANV_FROM_HANDLE(anv_instance, instance, _instance);
2407 return vk_create_debug_report_callback(&instance->debug_report_callbacks,
2408 pCreateInfo, pAllocator, &instance->alloc,
2409 pCallback);
2410 }
2411
2412 void
2413 anv_DestroyDebugReportCallbackEXT(VkInstance _instance,
2414 VkDebugReportCallbackEXT _callback,
2415 const VkAllocationCallbacks* pAllocator)
2416 {
2417 ANV_FROM_HANDLE(anv_instance, instance, _instance);
2418 vk_destroy_debug_report_callback(&instance->debug_report_callbacks,
2419 _callback, pAllocator, &instance->alloc);
2420 }
2421
2422 void
2423 anv_DebugReportMessageEXT(VkInstance _instance,
2424 VkDebugReportFlagsEXT flags,
2425 VkDebugReportObjectTypeEXT objectType,
2426 uint64_t object,
2427 size_t location,
2428 int32_t messageCode,
2429 const char* pLayerPrefix,
2430 const char* pMessage)
2431 {
2432 ANV_FROM_HANDLE(anv_instance, instance, _instance);
2433 vk_debug_report(&instance->debug_report_callbacks, flags, objectType,
2434 object, location, messageCode, pLayerPrefix, pMessage);
2435 }
2436
2437 static struct anv_state
2438 anv_state_pool_emit_data(struct anv_state_pool *pool, size_t size, size_t align, const void *p)
2439 {
2440 struct anv_state state;
2441
2442 state = anv_state_pool_alloc(pool, size, align);
2443 memcpy(state.map, p, size);
2444
2445 return state;
2446 }
2447
2448 static void
2449 anv_device_init_border_colors(struct anv_device *device)
2450 {
2451 if (device->info.is_haswell) {
2452 static const struct hsw_border_color border_colors[] = {
2453 [VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK] = { .float32 = { 0.0, 0.0, 0.0, 0.0 } },
2454 [VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK] = { .float32 = { 0.0, 0.0, 0.0, 1.0 } },
2455 [VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE] = { .float32 = { 1.0, 1.0, 1.0, 1.0 } },
2456 [VK_BORDER_COLOR_INT_TRANSPARENT_BLACK] = { .uint32 = { 0, 0, 0, 0 } },
2457 [VK_BORDER_COLOR_INT_OPAQUE_BLACK] = { .uint32 = { 0, 0, 0, 1 } },
2458 [VK_BORDER_COLOR_INT_OPAQUE_WHITE] = { .uint32 = { 1, 1, 1, 1 } },
2459 };
2460
2461 device->border_colors =
2462 anv_state_pool_emit_data(&device->dynamic_state_pool,
2463 sizeof(border_colors), 512, border_colors);
2464 } else {
2465 static const struct gen8_border_color border_colors[] = {
2466 [VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK] = { .float32 = { 0.0, 0.0, 0.0, 0.0 } },
2467 [VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK] = { .float32 = { 0.0, 0.0, 0.0, 1.0 } },
2468 [VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE] = { .float32 = { 1.0, 1.0, 1.0, 1.0 } },
2469 [VK_BORDER_COLOR_INT_TRANSPARENT_BLACK] = { .uint32 = { 0, 0, 0, 0 } },
2470 [VK_BORDER_COLOR_INT_OPAQUE_BLACK] = { .uint32 = { 0, 0, 0, 1 } },
2471 [VK_BORDER_COLOR_INT_OPAQUE_WHITE] = { .uint32 = { 1, 1, 1, 1 } },
2472 };
2473
2474 device->border_colors =
2475 anv_state_pool_emit_data(&device->dynamic_state_pool,
2476 sizeof(border_colors), 64, border_colors);
2477 }
2478 }
2479
2480 static VkResult
2481 anv_device_init_trivial_batch(struct anv_device *device)
2482 {
2483 VkResult result = anv_device_alloc_bo(device, 4096,
2484 ANV_BO_ALLOC_MAPPED,
2485 0 /* explicit_address */,
2486 &device->trivial_batch_bo);
2487 if (result != VK_SUCCESS)
2488 return result;
2489
2490 struct anv_batch batch = {
2491 .start = device->trivial_batch_bo->map,
2492 .next = device->trivial_batch_bo->map,
2493 .end = device->trivial_batch_bo->map + 4096,
2494 };
2495
2496 anv_batch_emit(&batch, GEN7_MI_BATCH_BUFFER_END, bbe);
2497 anv_batch_emit(&batch, GEN7_MI_NOOP, noop);
2498
2499 if (!device->info.has_llc)
2500 gen_clflush_range(batch.start, batch.next - batch.start);
2501
2502 return VK_SUCCESS;
2503 }
2504
2505 VkResult anv_EnumerateDeviceExtensionProperties(
2506 VkPhysicalDevice physicalDevice,
2507 const char* pLayerName,
2508 uint32_t* pPropertyCount,
2509 VkExtensionProperties* pProperties)
2510 {
2511 ANV_FROM_HANDLE(anv_physical_device, device, physicalDevice);
2512 VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
2513
2514 for (int i = 0; i < ANV_DEVICE_EXTENSION_COUNT; i++) {
2515 if (device->supported_extensions.extensions[i]) {
2516 vk_outarray_append(&out, prop) {
2517 *prop = anv_device_extensions[i];
2518 }
2519 }
2520 }
2521
2522 return vk_outarray_status(&out);
2523 }
2524
2525 static int
2526 vk_priority_to_gen(int priority)
2527 {
2528 switch (priority) {
2529 case VK_QUEUE_GLOBAL_PRIORITY_LOW_EXT:
2530 return GEN_CONTEXT_LOW_PRIORITY;
2531 case VK_QUEUE_GLOBAL_PRIORITY_MEDIUM_EXT:
2532 return GEN_CONTEXT_MEDIUM_PRIORITY;
2533 case VK_QUEUE_GLOBAL_PRIORITY_HIGH_EXT:
2534 return GEN_CONTEXT_HIGH_PRIORITY;
2535 case VK_QUEUE_GLOBAL_PRIORITY_REALTIME_EXT:
2536 return GEN_CONTEXT_REALTIME_PRIORITY;
2537 default:
2538 unreachable("Invalid priority");
2539 }
2540 }
2541
2542 static VkResult
2543 anv_device_init_hiz_clear_value_bo(struct anv_device *device)
2544 {
2545 VkResult result = anv_device_alloc_bo(device, 4096,
2546 ANV_BO_ALLOC_MAPPED,
2547 0 /* explicit_address */,
2548 &device->hiz_clear_bo);
2549 if (result != VK_SUCCESS)
2550 return result;
2551
2552 union isl_color_value hiz_clear = { .u32 = { 0, } };
2553 hiz_clear.f32[0] = ANV_HZ_FC_VAL;
2554
2555 memcpy(device->hiz_clear_bo->map, hiz_clear.u32, sizeof(hiz_clear.u32));
2556
2557 if (!device->info.has_llc)
2558 gen_clflush_range(device->hiz_clear_bo->map, sizeof(hiz_clear.u32));
2559
2560 return VK_SUCCESS;
2561 }
2562
2563 static bool
2564 get_bo_from_pool(struct gen_batch_decode_bo *ret,
2565 struct anv_block_pool *pool,
2566 uint64_t address)
2567 {
2568 anv_block_pool_foreach_bo(bo, pool) {
2569 uint64_t bo_address = gen_48b_address(bo->offset);
2570 if (address >= bo_address && address < (bo_address + bo->size)) {
2571 *ret = (struct gen_batch_decode_bo) {
2572 .addr = bo_address,
2573 .size = bo->size,
2574 .map = bo->map,
2575 };
2576 return true;
2577 }
2578 }
2579 return false;
2580 }
2581
2582 /* Finding a buffer for batch decoding */
2583 static struct gen_batch_decode_bo
2584 decode_get_bo(void *v_batch, bool ppgtt, uint64_t address)
2585 {
2586 struct anv_device *device = v_batch;
2587 struct gen_batch_decode_bo ret_bo = {};
2588
2589 assert(ppgtt);
2590
2591 if (get_bo_from_pool(&ret_bo, &device->dynamic_state_pool.block_pool, address))
2592 return ret_bo;
2593 if (get_bo_from_pool(&ret_bo, &device->instruction_state_pool.block_pool, address))
2594 return ret_bo;
2595 if (get_bo_from_pool(&ret_bo, &device->binding_table_pool.block_pool, address))
2596 return ret_bo;
2597 if (get_bo_from_pool(&ret_bo, &device->surface_state_pool.block_pool, address))
2598 return ret_bo;
2599
2600 if (!device->cmd_buffer_being_decoded)
2601 return (struct gen_batch_decode_bo) { };
2602
2603 struct anv_batch_bo **bo;
2604
2605 u_vector_foreach(bo, &device->cmd_buffer_being_decoded->seen_bbos) {
2606 /* The decoder zeroes out the top 16 bits, so we need to as well */
2607 uint64_t bo_address = (*bo)->bo->offset & (~0ull >> 16);
2608
2609 if (address >= bo_address && address < bo_address + (*bo)->bo->size) {
2610 return (struct gen_batch_decode_bo) {
2611 .addr = bo_address,
2612 .size = (*bo)->bo->size,
2613 .map = (*bo)->bo->map,
2614 };
2615 }
2616 }
2617
2618 return (struct gen_batch_decode_bo) { };
2619 }
2620
2621 struct gen_aux_map_buffer {
2622 struct gen_buffer base;
2623 struct anv_state state;
2624 };
2625
2626 static struct gen_buffer *
2627 gen_aux_map_buffer_alloc(void *driver_ctx, uint32_t size)
2628 {
2629 struct gen_aux_map_buffer *buf = malloc(sizeof(struct gen_aux_map_buffer));
2630 if (!buf)
2631 return NULL;
2632
2633 struct anv_device *device = (struct anv_device*)driver_ctx;
2634 assert(device->physical->supports_48bit_addresses &&
2635 device->physical->use_softpin);
2636
2637 struct anv_state_pool *pool = &device->dynamic_state_pool;
2638 buf->state = anv_state_pool_alloc(pool, size, size);
2639
2640 buf->base.gpu = pool->block_pool.bo->offset + buf->state.offset;
2641 buf->base.gpu_end = buf->base.gpu + buf->state.alloc_size;
2642 buf->base.map = buf->state.map;
2643 buf->base.driver_bo = &buf->state;
2644 return &buf->base;
2645 }
2646
2647 static void
2648 gen_aux_map_buffer_free(void *driver_ctx, struct gen_buffer *buffer)
2649 {
2650 struct gen_aux_map_buffer *buf = (struct gen_aux_map_buffer*)buffer;
2651 struct anv_device *device = (struct anv_device*)driver_ctx;
2652 struct anv_state_pool *pool = &device->dynamic_state_pool;
2653 anv_state_pool_free(pool, buf->state);
2654 free(buf);
2655 }
2656
2657 static struct gen_mapped_pinned_buffer_alloc aux_map_allocator = {
2658 .alloc = gen_aux_map_buffer_alloc,
2659 .free = gen_aux_map_buffer_free,
2660 };
2661
2662 static VkResult
2663 check_physical_device_features(VkPhysicalDevice physicalDevice,
2664 const VkPhysicalDeviceFeatures *features)
2665 {
2666 VkPhysicalDeviceFeatures supported_features;
2667 anv_GetPhysicalDeviceFeatures(physicalDevice, &supported_features);
2668 VkBool32 *supported_feature = (VkBool32 *)&supported_features;
2669 VkBool32 *enabled_feature = (VkBool32 *)features;
2670 unsigned num_features = sizeof(VkPhysicalDeviceFeatures) / sizeof(VkBool32);
2671 for (uint32_t i = 0; i < num_features; i++) {
2672 if (enabled_feature[i] && !supported_feature[i])
2673 return vk_error(VK_ERROR_FEATURE_NOT_PRESENT);
2674 }
2675
2676 return VK_SUCCESS;
2677 }
2678
2679 VkResult anv_CreateDevice(
2680 VkPhysicalDevice physicalDevice,
2681 const VkDeviceCreateInfo* pCreateInfo,
2682 const VkAllocationCallbacks* pAllocator,
2683 VkDevice* pDevice)
2684 {
2685 ANV_FROM_HANDLE(anv_physical_device, physical_device, physicalDevice);
2686 VkResult result;
2687 struct anv_device *device;
2688
2689 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO);
2690
2691 struct anv_device_extension_table enabled_extensions = { };
2692 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
2693 int idx;
2694 for (idx = 0; idx < ANV_DEVICE_EXTENSION_COUNT; idx++) {
2695 if (strcmp(pCreateInfo->ppEnabledExtensionNames[i],
2696 anv_device_extensions[idx].extensionName) == 0)
2697 break;
2698 }
2699
2700 if (idx >= ANV_DEVICE_EXTENSION_COUNT)
2701 return vk_error(VK_ERROR_EXTENSION_NOT_PRESENT);
2702
2703 if (!physical_device->supported_extensions.extensions[idx])
2704 return vk_error(VK_ERROR_EXTENSION_NOT_PRESENT);
2705
2706 enabled_extensions.extensions[idx] = true;
2707 }
2708
2709 /* Check enabled features */
2710 bool robust_buffer_access = false;
2711 if (pCreateInfo->pEnabledFeatures) {
2712 result = check_physical_device_features(physicalDevice,
2713 pCreateInfo->pEnabledFeatures);
2714 if (result != VK_SUCCESS)
2715 return result;
2716
2717 if (pCreateInfo->pEnabledFeatures->robustBufferAccess)
2718 robust_buffer_access = true;
2719 }
2720
2721 vk_foreach_struct_const(ext, pCreateInfo->pNext) {
2722 switch (ext->sType) {
2723 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2: {
2724 const VkPhysicalDeviceFeatures2 *features = (const void *)ext;
2725 result = check_physical_device_features(physicalDevice,
2726 &features->features);
2727 if (result != VK_SUCCESS)
2728 return result;
2729
2730 if (features->features.robustBufferAccess)
2731 robust_buffer_access = true;
2732 break;
2733 }
2734
2735 default:
2736 /* Don't warn */
2737 break;
2738 }
2739 }
2740
2741 /* Check requested queues and fail if we are requested to create any
2742 * queues with flags we don't support.
2743 */
2744 assert(pCreateInfo->queueCreateInfoCount > 0);
2745 for (uint32_t i = 0; i < pCreateInfo->queueCreateInfoCount; i++) {
2746 if (pCreateInfo->pQueueCreateInfos[i].flags != 0)
2747 return vk_error(VK_ERROR_INITIALIZATION_FAILED);
2748 }
2749
2750 /* Check if client specified queue priority. */
2751 const VkDeviceQueueGlobalPriorityCreateInfoEXT *queue_priority =
2752 vk_find_struct_const(pCreateInfo->pQueueCreateInfos[0].pNext,
2753 DEVICE_QUEUE_GLOBAL_PRIORITY_CREATE_INFO_EXT);
2754
2755 VkQueueGlobalPriorityEXT priority =
2756 queue_priority ? queue_priority->globalPriority :
2757 VK_QUEUE_GLOBAL_PRIORITY_MEDIUM_EXT;
2758
2759 device = vk_alloc2(&physical_device->instance->alloc, pAllocator,
2760 sizeof(*device), 8,
2761 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
2762 if (!device)
2763 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
2764
2765 vk_device_init(&device->vk, pCreateInfo,
2766 &physical_device->instance->alloc, pAllocator);
2767
2768 if (INTEL_DEBUG & DEBUG_BATCH) {
2769 const unsigned decode_flags =
2770 GEN_BATCH_DECODE_FULL |
2771 ((INTEL_DEBUG & DEBUG_COLOR) ? GEN_BATCH_DECODE_IN_COLOR : 0) |
2772 GEN_BATCH_DECODE_OFFSETS |
2773 GEN_BATCH_DECODE_FLOATS;
2774
2775 gen_batch_decode_ctx_init(&device->decoder_ctx,
2776 &physical_device->info,
2777 stderr, decode_flags, NULL,
2778 decode_get_bo, NULL, device);
2779 }
2780
2781 device->physical = physical_device;
2782 device->no_hw = physical_device->no_hw;
2783 device->_lost = false;
2784
2785 /* XXX(chadv): Can we dup() physicalDevice->fd here? */
2786 device->fd = open(physical_device->path, O_RDWR | O_CLOEXEC);
2787 if (device->fd == -1) {
2788 result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
2789 goto fail_device;
2790 }
2791
2792 device->context_id = anv_gem_create_context(device);
2793 if (device->context_id == -1) {
2794 result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
2795 goto fail_fd;
2796 }
2797
2798 result = anv_queue_init(device, &device->queue);
2799 if (result != VK_SUCCESS)
2800 goto fail_context_id;
2801
2802 if (physical_device->use_softpin) {
2803 if (pthread_mutex_init(&device->vma_mutex, NULL) != 0) {
2804 result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
2805 goto fail_queue;
2806 }
2807
2808 /* keep the page with address zero out of the allocator */
2809 util_vma_heap_init(&device->vma_lo,
2810 LOW_HEAP_MIN_ADDRESS, LOW_HEAP_SIZE);
2811
2812 util_vma_heap_init(&device->vma_cva, CLIENT_VISIBLE_HEAP_MIN_ADDRESS,
2813 CLIENT_VISIBLE_HEAP_SIZE);
2814
2815 /* Leave the last 4GiB out of the high vma range, so that no state
2816 * base address + size can overflow 48 bits. For more information see
2817 * the comment about Wa32bitGeneralStateOffset in anv_allocator.c
2818 */
2819 util_vma_heap_init(&device->vma_hi, HIGH_HEAP_MIN_ADDRESS,
2820 physical_device->gtt_size - (1ull << 32) -
2821 HIGH_HEAP_MIN_ADDRESS);
2822 }
2823
2824 list_inithead(&device->memory_objects);
2825
2826 /* As per spec, the driver implementation may deny requests to acquire
2827 * a priority above the default priority (MEDIUM) if the caller does not
2828 * have sufficient privileges. In this scenario VK_ERROR_NOT_PERMITTED_EXT
2829 * is returned.
2830 */
2831 if (physical_device->has_context_priority) {
2832 int err = anv_gem_set_context_param(device->fd, device->context_id,
2833 I915_CONTEXT_PARAM_PRIORITY,
2834 vk_priority_to_gen(priority));
2835 if (err != 0 && priority > VK_QUEUE_GLOBAL_PRIORITY_MEDIUM_EXT) {
2836 result = vk_error(VK_ERROR_NOT_PERMITTED_EXT);
2837 goto fail_vmas;
2838 }
2839 }
2840
2841 device->info = physical_device->info;
2842 device->isl_dev = physical_device->isl_dev;
2843
2844 /* On Broadwell and later, we can use batch chaining to more efficiently
2845 * implement growing command buffers. Prior to Haswell, the kernel
2846 * command parser gets in the way and we have to fall back to growing
2847 * the batch.
2848 */
2849 device->can_chain_batches = device->info.gen >= 8;
2850
2851 device->robust_buffer_access = robust_buffer_access;
2852 device->enabled_extensions = enabled_extensions;
2853
2854 const struct anv_instance *instance = physical_device->instance;
2855 for (unsigned i = 0; i < ARRAY_SIZE(device->dispatch.entrypoints); i++) {
2856 /* Vulkan requires that entrypoints for extensions which have not been
2857 * enabled must not be advertised.
2858 */
2859 if (!anv_device_entrypoint_is_enabled(i, instance->app_info.api_version,
2860 &instance->enabled_extensions,
2861 &device->enabled_extensions)) {
2862 device->dispatch.entrypoints[i] = NULL;
2863 } else {
2864 device->dispatch.entrypoints[i] =
2865 anv_resolve_device_entrypoint(&device->info, i);
2866 }
2867 }
2868
2869 if (pthread_mutex_init(&device->mutex, NULL) != 0) {
2870 result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
2871 goto fail_queue;
2872 }
2873
2874 pthread_condattr_t condattr;
2875 if (pthread_condattr_init(&condattr) != 0) {
2876 result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
2877 goto fail_mutex;
2878 }
2879 if (pthread_condattr_setclock(&condattr, CLOCK_MONOTONIC) != 0) {
2880 pthread_condattr_destroy(&condattr);
2881 result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
2882 goto fail_mutex;
2883 }
2884 if (pthread_cond_init(&device->queue_submit, &condattr) != 0) {
2885 pthread_condattr_destroy(&condattr);
2886 result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
2887 goto fail_mutex;
2888 }
2889 pthread_condattr_destroy(&condattr);
2890
2891 result = anv_bo_cache_init(&device->bo_cache);
2892 if (result != VK_SUCCESS)
2893 goto fail_queue_cond;
2894
2895 anv_bo_pool_init(&device->batch_bo_pool, device);
2896
2897 result = anv_state_pool_init(&device->dynamic_state_pool, device,
2898 DYNAMIC_STATE_POOL_MIN_ADDRESS, 0, 16384);
2899 if (result != VK_SUCCESS)
2900 goto fail_batch_bo_pool;
2901
2902 if (device->info.gen >= 8) {
2903 /* The border color pointer is limited to 24 bits, so we need to make
2904 * sure that any such color used at any point in the program doesn't
2905 * exceed that limit.
2906 * We achieve that by reserving all the custom border colors we support
2907 * right off the bat, so they are close to the base address.
2908 */
2909 anv_state_reserved_pool_init(&device->custom_border_colors,
2910 &device->dynamic_state_pool,
2911 sizeof(struct gen8_border_color),
2912 MAX_CUSTOM_BORDER_COLORS, 64);
2913 }
2914
2915 result = anv_state_pool_init(&device->instruction_state_pool, device,
2916 INSTRUCTION_STATE_POOL_MIN_ADDRESS, 0, 16384);
2917 if (result != VK_SUCCESS)
2918 goto fail_dynamic_state_pool;
2919
2920 result = anv_state_pool_init(&device->surface_state_pool, device,
2921 SURFACE_STATE_POOL_MIN_ADDRESS, 0, 4096);
2922 if (result != VK_SUCCESS)
2923 goto fail_instruction_state_pool;
2924
2925 if (physical_device->use_softpin) {
2926 int64_t bt_pool_offset = (int64_t)BINDING_TABLE_POOL_MIN_ADDRESS -
2927 (int64_t)SURFACE_STATE_POOL_MIN_ADDRESS;
2928 assert(INT32_MIN < bt_pool_offset && bt_pool_offset < 0);
2929 result = anv_state_pool_init(&device->binding_table_pool, device,
2930 SURFACE_STATE_POOL_MIN_ADDRESS,
2931 bt_pool_offset, 4096);
2932 if (result != VK_SUCCESS)
2933 goto fail_surface_state_pool;
2934 }
2935
2936 if (device->info.has_aux_map) {
2937 device->aux_map_ctx = gen_aux_map_init(device, &aux_map_allocator,
2938 &physical_device->info);
2939 if (!device->aux_map_ctx)
2940 goto fail_binding_table_pool;
2941 }
2942
2943 result = anv_device_alloc_bo(device, 4096,
2944 ANV_BO_ALLOC_CAPTURE | ANV_BO_ALLOC_MAPPED /* flags */,
2945 0 /* explicit_address */,
2946 &device->workaround_bo);
2947 if (result != VK_SUCCESS)
2948 goto fail_surface_aux_map_pool;
2949
2950 device->workaround_address = (struct anv_address) {
2951 .bo = device->workaround_bo,
2952 .offset = align_u32(
2953 intel_debug_write_identifiers(device->workaround_bo->map,
2954 device->workaround_bo->size,
2955 "Anv") + 8, 8),
2956 };
2957
2958 if (!device->info.has_llc) {
2959 gen_clflush_range(device->workaround_bo->map,
2960 device->workaround_address.offset);
2961 }
2962
2963 result = anv_device_init_trivial_batch(device);
2964 if (result != VK_SUCCESS)
2965 goto fail_workaround_bo;
2966
2967 /* Allocate a null surface state at surface state offset 0. This makes
2968 * NULL descriptor handling trivial because we can just memset structures
2969 * to zero and they have a valid descriptor.
2970 */
2971 device->null_surface_state =
2972 anv_state_pool_alloc(&device->surface_state_pool,
2973 device->isl_dev.ss.size,
2974 device->isl_dev.ss.align);
2975 isl_null_fill_state(&device->isl_dev, device->null_surface_state.map,
2976 isl_extent3d(1, 1, 1) /* This shouldn't matter */);
2977 assert(device->null_surface_state.offset == 0);
2978
2979 if (device->info.gen >= 10) {
2980 result = anv_device_init_hiz_clear_value_bo(device);
2981 if (result != VK_SUCCESS)
2982 goto fail_trivial_batch_bo;
2983 }
2984
2985 anv_scratch_pool_init(device, &device->scratch_pool);
2986
2987 switch (device->info.gen) {
2988 case 7:
2989 if (!device->info.is_haswell)
2990 result = gen7_init_device_state(device);
2991 else
2992 result = gen75_init_device_state(device);
2993 break;
2994 case 8:
2995 result = gen8_init_device_state(device);
2996 break;
2997 case 9:
2998 result = gen9_init_device_state(device);
2999 break;
3000 case 10:
3001 result = gen10_init_device_state(device);
3002 break;
3003 case 11:
3004 result = gen11_init_device_state(device);
3005 break;
3006 case 12:
3007 result = gen12_init_device_state(device);
3008 break;
3009 default:
3010 /* Shouldn't get here as we don't create physical devices for any other
3011 * gens. */
3012 unreachable("unhandled gen");
3013 }
3014 if (result != VK_SUCCESS)
3015 goto fail_clear_value_bo;
3016
3017 anv_pipeline_cache_init(&device->default_pipeline_cache, device,
3018 true /* cache_enabled */, false /* external_sync */);
3019
3020 anv_device_init_blorp(device);
3021
3022 anv_device_init_border_colors(device);
3023
3024 anv_device_perf_init(device);
3025
3026 *pDevice = anv_device_to_handle(device);
3027
3028 return VK_SUCCESS;
3029
3030 fail_clear_value_bo:
3031 if (device->info.gen >= 10)
3032 anv_device_release_bo(device, device->hiz_clear_bo);
3033 anv_scratch_pool_finish(device, &device->scratch_pool);
3034 fail_trivial_batch_bo:
3035 anv_device_release_bo(device, device->trivial_batch_bo);
3036 fail_workaround_bo:
3037 anv_device_release_bo(device, device->workaround_bo);
3038 fail_surface_aux_map_pool:
3039 if (device->info.has_aux_map) {
3040 gen_aux_map_finish(device->aux_map_ctx);
3041 device->aux_map_ctx = NULL;
3042 }
3043 fail_binding_table_pool:
3044 if (physical_device->use_softpin)
3045 anv_state_pool_finish(&device->binding_table_pool);
3046 fail_surface_state_pool:
3047 anv_state_pool_finish(&device->surface_state_pool);
3048 fail_instruction_state_pool:
3049 anv_state_pool_finish(&device->instruction_state_pool);
3050 fail_dynamic_state_pool:
3051 if (device->info.gen >= 8)
3052 anv_state_reserved_pool_finish(&device->custom_border_colors);
3053 anv_state_pool_finish(&device->dynamic_state_pool);
3054 fail_batch_bo_pool:
3055 anv_bo_pool_finish(&device->batch_bo_pool);
3056 anv_bo_cache_finish(&device->bo_cache);
3057 fail_queue_cond:
3058 pthread_cond_destroy(&device->queue_submit);
3059 fail_mutex:
3060 pthread_mutex_destroy(&device->mutex);
3061 fail_vmas:
3062 if (physical_device->use_softpin) {
3063 util_vma_heap_finish(&device->vma_hi);
3064 util_vma_heap_finish(&device->vma_cva);
3065 util_vma_heap_finish(&device->vma_lo);
3066 }
3067 fail_queue:
3068 anv_queue_finish(&device->queue);
3069 fail_context_id:
3070 anv_gem_destroy_context(device, device->context_id);
3071 fail_fd:
3072 close(device->fd);
3073 fail_device:
3074 vk_free(&device->vk.alloc, device);
3075
3076 return result;
3077 }
3078
3079 void anv_DestroyDevice(
3080 VkDevice _device,
3081 const VkAllocationCallbacks* pAllocator)
3082 {
3083 ANV_FROM_HANDLE(anv_device, device, _device);
3084
3085 if (!device)
3086 return;
3087
3088 anv_device_finish_blorp(device);
3089
3090 anv_pipeline_cache_finish(&device->default_pipeline_cache);
3091
3092 anv_queue_finish(&device->queue);
3093
3094 #ifdef HAVE_VALGRIND
3095 /* We only need to free these to prevent valgrind errors. The backing
3096 * BO will go away in a couple of lines so we don't actually leak.
3097 */
3098 if (device->info.gen >= 8)
3099 anv_state_reserved_pool_finish(&device->custom_border_colors);
3100 anv_state_pool_free(&device->dynamic_state_pool, device->border_colors);
3101 anv_state_pool_free(&device->dynamic_state_pool, device->slice_hash);
3102 #endif
3103
3104 anv_scratch_pool_finish(device, &device->scratch_pool);
3105
3106 anv_device_release_bo(device, device->workaround_bo);
3107 anv_device_release_bo(device, device->trivial_batch_bo);
3108 if (device->info.gen >= 10)
3109 anv_device_release_bo(device, device->hiz_clear_bo);
3110
3111 if (device->info.has_aux_map) {
3112 gen_aux_map_finish(device->aux_map_ctx);
3113 device->aux_map_ctx = NULL;
3114 }
3115
3116 if (device->physical->use_softpin)
3117 anv_state_pool_finish(&device->binding_table_pool);
3118 anv_state_pool_finish(&device->surface_state_pool);
3119 anv_state_pool_finish(&device->instruction_state_pool);
3120 anv_state_pool_finish(&device->dynamic_state_pool);
3121
3122 anv_bo_pool_finish(&device->batch_bo_pool);
3123
3124 anv_bo_cache_finish(&device->bo_cache);
3125