55d079e133f53c021996df810684951a0c3fa5e7
[mesa.git] / src / intel / vulkan / anv_device.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <assert.h>
25 #include <stdbool.h>
26 #include <string.h>
27 #include <sys/mman.h>
28 #include <sys/sysinfo.h>
29 #include <unistd.h>
30 #include <fcntl.h>
31 #include "drm-uapi/drm_fourcc.h"
32 #include "drm-uapi/drm.h"
33 #include <xf86drm.h>
34
35 #include "anv_private.h"
36 #include "util/debug.h"
37 #include "util/build_id.h"
38 #include "util/disk_cache.h"
39 #include "util/mesa-sha1.h"
40 #include "util/os_file.h"
41 #include "util/u_atomic.h"
42 #include "util/u_string.h"
43 #include "util/driconf.h"
44 #include "git_sha1.h"
45 #include "vk_util.h"
46 #include "common/gen_aux_map.h"
47 #include "common/gen_defines.h"
48 #include "compiler/glsl_types.h"
49
50 #include "genxml/gen7_pack.h"
51
52 static const char anv_dri_options_xml[] =
53 DRI_CONF_BEGIN
54 DRI_CONF_SECTION_PERFORMANCE
55 DRI_CONF_VK_X11_OVERRIDE_MIN_IMAGE_COUNT(0)
56 DRI_CONF_VK_X11_STRICT_IMAGE_COUNT("false")
57 DRI_CONF_SECTION_END
58
59 DRI_CONF_SECTION_DEBUG
60 DRI_CONF_ALWAYS_FLUSH_CACHE("false")
61 DRI_CONF_VK_WSI_FORCE_BGRA8_UNORM_FIRST("false")
62 DRI_CONF_SECTION_END
63 DRI_CONF_END;
64
65 /* This is probably far to big but it reflects the max size used for messages
66 * in OpenGLs KHR_debug.
67 */
68 #define MAX_DEBUG_MESSAGE_LENGTH 4096
69
70 /* Render engine timestamp register */
71 #define TIMESTAMP 0x2358
72
73 static void
74 compiler_debug_log(void *data, const char *fmt, ...)
75 {
76 char str[MAX_DEBUG_MESSAGE_LENGTH];
77 struct anv_device *device = (struct anv_device *)data;
78 struct anv_instance *instance = device->physical->instance;
79
80 if (list_is_empty(&instance->debug_report_callbacks.callbacks))
81 return;
82
83 va_list args;
84 va_start(args, fmt);
85 (void) vsnprintf(str, MAX_DEBUG_MESSAGE_LENGTH, fmt, args);
86 va_end(args);
87
88 vk_debug_report(&instance->debug_report_callbacks,
89 VK_DEBUG_REPORT_DEBUG_BIT_EXT,
90 VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
91 0, 0, 0, "anv", str);
92 }
93
94 static void
95 compiler_perf_log(void *data, const char *fmt, ...)
96 {
97 va_list args;
98 va_start(args, fmt);
99
100 if (unlikely(INTEL_DEBUG & DEBUG_PERF))
101 intel_logd_v(fmt, args);
102
103 va_end(args);
104 }
105
106 static uint64_t
107 anv_compute_heap_size(int fd, uint64_t gtt_size)
108 {
109 /* Query the total ram from the system */
110 struct sysinfo info;
111 sysinfo(&info);
112
113 uint64_t total_ram = (uint64_t)info.totalram * (uint64_t)info.mem_unit;
114
115 /* We don't want to burn too much ram with the GPU. If the user has 4GiB
116 * or less, we use at most half. If they have more than 4GiB, we use 3/4.
117 */
118 uint64_t available_ram;
119 if (total_ram <= 4ull * 1024ull * 1024ull * 1024ull)
120 available_ram = total_ram / 2;
121 else
122 available_ram = total_ram * 3 / 4;
123
124 /* We also want to leave some padding for things we allocate in the driver,
125 * so don't go over 3/4 of the GTT either.
126 */
127 uint64_t available_gtt = gtt_size * 3 / 4;
128
129 return MIN2(available_ram, available_gtt);
130 }
131
132 static VkResult
133 anv_physical_device_init_heaps(struct anv_physical_device *device, int fd)
134 {
135 if (anv_gem_get_context_param(fd, 0, I915_CONTEXT_PARAM_GTT_SIZE,
136 &device->gtt_size) == -1) {
137 /* If, for whatever reason, we can't actually get the GTT size from the
138 * kernel (too old?) fall back to the aperture size.
139 */
140 anv_perf_warn(NULL, NULL,
141 "Failed to get I915_CONTEXT_PARAM_GTT_SIZE: %m");
142
143 if (gen_get_aperture_size(fd, &device->gtt_size) == -1) {
144 return vk_errorfi(device->instance, NULL,
145 VK_ERROR_INITIALIZATION_FAILED,
146 "failed to get aperture size: %m");
147 }
148 }
149
150 /* We only allow 48-bit addresses with softpin because knowing the actual
151 * address is required for the vertex cache flush workaround.
152 */
153 device->supports_48bit_addresses = (device->info.gen >= 8) &&
154 device->has_softpin &&
155 device->gtt_size > (4ULL << 30 /* GiB */);
156
157 uint64_t heap_size = anv_compute_heap_size(fd, device->gtt_size);
158
159 if (heap_size > (2ull << 30) && !device->supports_48bit_addresses) {
160 /* When running with an overridden PCI ID, we may get a GTT size from
161 * the kernel that is greater than 2 GiB but the execbuf check for 48bit
162 * address support can still fail. Just clamp the address space size to
163 * 2 GiB if we don't have 48-bit support.
164 */
165 intel_logw("%s:%d: The kernel reported a GTT size larger than 2 GiB but "
166 "not support for 48-bit addresses",
167 __FILE__, __LINE__);
168 heap_size = 2ull << 30;
169 }
170
171 device->memory.heap_count = 1;
172 device->memory.heaps[0] = (struct anv_memory_heap) {
173 .size = heap_size,
174 .flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT,
175 };
176
177 uint32_t type_count = 0;
178 for (uint32_t heap = 0; heap < device->memory.heap_count; heap++) {
179 if (device->info.has_llc) {
180 /* Big core GPUs share LLC with the CPU and thus one memory type can be
181 * both cached and coherent at the same time.
182 */
183 device->memory.types[type_count++] = (struct anv_memory_type) {
184 .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
185 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
186 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT |
187 VK_MEMORY_PROPERTY_HOST_CACHED_BIT,
188 .heapIndex = heap,
189 };
190 } else {
191 /* The spec requires that we expose a host-visible, coherent memory
192 * type, but Atom GPUs don't share LLC. Thus we offer two memory types
193 * to give the application a choice between cached, but not coherent and
194 * coherent but uncached (WC though).
195 */
196 device->memory.types[type_count++] = (struct anv_memory_type) {
197 .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
198 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
199 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
200 .heapIndex = heap,
201 };
202 device->memory.types[type_count++] = (struct anv_memory_type) {
203 .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
204 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
205 VK_MEMORY_PROPERTY_HOST_CACHED_BIT,
206 .heapIndex = heap,
207 };
208 }
209 }
210 device->memory.type_count = type_count;
211
212 return VK_SUCCESS;
213 }
214
215 static VkResult
216 anv_physical_device_init_uuids(struct anv_physical_device *device)
217 {
218 const struct build_id_note *note =
219 build_id_find_nhdr_for_addr(anv_physical_device_init_uuids);
220 if (!note) {
221 return vk_errorfi(device->instance, NULL,
222 VK_ERROR_INITIALIZATION_FAILED,
223 "Failed to find build-id");
224 }
225
226 unsigned build_id_len = build_id_length(note);
227 if (build_id_len < 20) {
228 return vk_errorfi(device->instance, NULL,
229 VK_ERROR_INITIALIZATION_FAILED,
230 "build-id too short. It needs to be a SHA");
231 }
232
233 memcpy(device->driver_build_sha1, build_id_data(note), 20);
234
235 struct mesa_sha1 sha1_ctx;
236 uint8_t sha1[20];
237 STATIC_ASSERT(VK_UUID_SIZE <= sizeof(sha1));
238
239 /* The pipeline cache UUID is used for determining when a pipeline cache is
240 * invalid. It needs both a driver build and the PCI ID of the device.
241 */
242 _mesa_sha1_init(&sha1_ctx);
243 _mesa_sha1_update(&sha1_ctx, build_id_data(note), build_id_len);
244 _mesa_sha1_update(&sha1_ctx, &device->info.chipset_id,
245 sizeof(device->info.chipset_id));
246 _mesa_sha1_update(&sha1_ctx, &device->always_use_bindless,
247 sizeof(device->always_use_bindless));
248 _mesa_sha1_update(&sha1_ctx, &device->has_a64_buffer_access,
249 sizeof(device->has_a64_buffer_access));
250 _mesa_sha1_update(&sha1_ctx, &device->has_bindless_images,
251 sizeof(device->has_bindless_images));
252 _mesa_sha1_update(&sha1_ctx, &device->has_bindless_samplers,
253 sizeof(device->has_bindless_samplers));
254 _mesa_sha1_final(&sha1_ctx, sha1);
255 memcpy(device->pipeline_cache_uuid, sha1, VK_UUID_SIZE);
256
257 /* The driver UUID is used for determining sharability of images and memory
258 * between two Vulkan instances in separate processes. People who want to
259 * share memory need to also check the device UUID (below) so all this
260 * needs to be is the build-id.
261 */
262 memcpy(device->driver_uuid, build_id_data(note), VK_UUID_SIZE);
263
264 /* The device UUID uniquely identifies the given device within the machine.
265 * Since we never have more than one device, this doesn't need to be a real
266 * UUID. However, on the off-chance that someone tries to use this to
267 * cache pre-tiled images or something of the like, we use the PCI ID and
268 * some bits of ISL info to ensure that this is safe.
269 */
270 _mesa_sha1_init(&sha1_ctx);
271 _mesa_sha1_update(&sha1_ctx, &device->info.chipset_id,
272 sizeof(device->info.chipset_id));
273 _mesa_sha1_update(&sha1_ctx, &device->isl_dev.has_bit6_swizzling,
274 sizeof(device->isl_dev.has_bit6_swizzling));
275 _mesa_sha1_final(&sha1_ctx, sha1);
276 memcpy(device->device_uuid, sha1, VK_UUID_SIZE);
277
278 return VK_SUCCESS;
279 }
280
281 static void
282 anv_physical_device_init_disk_cache(struct anv_physical_device *device)
283 {
284 #ifdef ENABLE_SHADER_CACHE
285 char renderer[10];
286 ASSERTED int len = snprintf(renderer, sizeof(renderer), "anv_%04x",
287 device->info.chipset_id);
288 assert(len == sizeof(renderer) - 2);
289
290 char timestamp[41];
291 _mesa_sha1_format(timestamp, device->driver_build_sha1);
292
293 const uint64_t driver_flags =
294 brw_get_compiler_config_value(device->compiler);
295 device->disk_cache = disk_cache_create(renderer, timestamp, driver_flags);
296 #else
297 device->disk_cache = NULL;
298 #endif
299 }
300
301 static void
302 anv_physical_device_free_disk_cache(struct anv_physical_device *device)
303 {
304 #ifdef ENABLE_SHADER_CACHE
305 if (device->disk_cache)
306 disk_cache_destroy(device->disk_cache);
307 #else
308 assert(device->disk_cache == NULL);
309 #endif
310 }
311
312 static uint64_t
313 get_available_system_memory()
314 {
315 char *meminfo = os_read_file("/proc/meminfo", NULL);
316 if (!meminfo)
317 return 0;
318
319 char *str = strstr(meminfo, "MemAvailable:");
320 if (!str) {
321 free(meminfo);
322 return 0;
323 }
324
325 uint64_t kb_mem_available;
326 if (sscanf(str, "MemAvailable: %" PRIx64, &kb_mem_available) == 1) {
327 free(meminfo);
328 return kb_mem_available << 10;
329 }
330
331 free(meminfo);
332 return 0;
333 }
334
335 static VkResult
336 anv_physical_device_try_create(struct anv_instance *instance,
337 drmDevicePtr drm_device,
338 struct anv_physical_device **device_out)
339 {
340 const char *primary_path = drm_device->nodes[DRM_NODE_PRIMARY];
341 const char *path = drm_device->nodes[DRM_NODE_RENDER];
342 VkResult result;
343 int fd;
344 int master_fd = -1;
345
346 brw_process_intel_debug_variable();
347
348 fd = open(path, O_RDWR | O_CLOEXEC);
349 if (fd < 0)
350 return vk_error(VK_ERROR_INCOMPATIBLE_DRIVER);
351
352 struct gen_device_info devinfo;
353 if (!gen_get_device_info_from_fd(fd, &devinfo)) {
354 result = vk_error(VK_ERROR_INCOMPATIBLE_DRIVER);
355 goto fail_fd;
356 }
357
358 const char *device_name = gen_get_device_name(devinfo.chipset_id);
359
360 if (devinfo.is_haswell) {
361 intel_logw("Haswell Vulkan support is incomplete");
362 } else if (devinfo.gen == 7 && !devinfo.is_baytrail) {
363 intel_logw("Ivy Bridge Vulkan support is incomplete");
364 } else if (devinfo.gen == 7 && devinfo.is_baytrail) {
365 intel_logw("Bay Trail Vulkan support is incomplete");
366 } else if (devinfo.gen >= 8 && devinfo.gen <= 11) {
367 /* Gen8-11 fully supported */
368 } else if (devinfo.gen == 12) {
369 intel_logw("Vulkan is not yet fully supported on gen12");
370 } else {
371 result = vk_errorfi(instance, NULL, VK_ERROR_INCOMPATIBLE_DRIVER,
372 "Vulkan not yet supported on %s", device_name);
373 goto fail_fd;
374 }
375
376 struct anv_physical_device *device =
377 vk_alloc(&instance->alloc, sizeof(*device), 8,
378 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
379 if (device == NULL) {
380 result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
381 goto fail_fd;
382 }
383
384 vk_object_base_init(NULL, &device->base, VK_OBJECT_TYPE_PHYSICAL_DEVICE);
385 device->instance = instance;
386
387 assert(strlen(path) < ARRAY_SIZE(device->path));
388 snprintf(device->path, ARRAY_SIZE(device->path), "%s", path);
389
390 device->info = devinfo;
391 device->name = device_name;
392
393 device->no_hw = device->info.no_hw;
394 if (getenv("INTEL_NO_HW") != NULL)
395 device->no_hw = true;
396
397 device->pci_info.domain = drm_device->businfo.pci->domain;
398 device->pci_info.bus = drm_device->businfo.pci->bus;
399 device->pci_info.device = drm_device->businfo.pci->dev;
400 device->pci_info.function = drm_device->businfo.pci->func;
401
402 device->cmd_parser_version = -1;
403 if (device->info.gen == 7) {
404 device->cmd_parser_version =
405 anv_gem_get_param(fd, I915_PARAM_CMD_PARSER_VERSION);
406 if (device->cmd_parser_version == -1) {
407 result = vk_errorfi(device->instance, NULL,
408 VK_ERROR_INITIALIZATION_FAILED,
409 "failed to get command parser version");
410 goto fail_alloc;
411 }
412 }
413
414 if (!anv_gem_get_param(fd, I915_PARAM_HAS_WAIT_TIMEOUT)) {
415 result = vk_errorfi(device->instance, NULL,
416 VK_ERROR_INITIALIZATION_FAILED,
417 "kernel missing gem wait");
418 goto fail_alloc;
419 }
420
421 if (!anv_gem_get_param(fd, I915_PARAM_HAS_EXECBUF2)) {
422 result = vk_errorfi(device->instance, NULL,
423 VK_ERROR_INITIALIZATION_FAILED,
424 "kernel missing execbuf2");
425 goto fail_alloc;
426 }
427
428 if (!device->info.has_llc &&
429 anv_gem_get_param(fd, I915_PARAM_MMAP_VERSION) < 1) {
430 result = vk_errorfi(device->instance, NULL,
431 VK_ERROR_INITIALIZATION_FAILED,
432 "kernel missing wc mmap");
433 goto fail_alloc;
434 }
435
436 device->has_softpin = anv_gem_get_param(fd, I915_PARAM_HAS_EXEC_SOFTPIN);
437 device->has_exec_async = anv_gem_get_param(fd, I915_PARAM_HAS_EXEC_ASYNC);
438 device->has_exec_capture = anv_gem_get_param(fd, I915_PARAM_HAS_EXEC_CAPTURE);
439 device->has_exec_fence = anv_gem_get_param(fd, I915_PARAM_HAS_EXEC_FENCE);
440 device->has_syncobj = anv_gem_get_param(fd, I915_PARAM_HAS_EXEC_FENCE_ARRAY);
441 device->has_syncobj_wait = device->has_syncobj &&
442 anv_gem_supports_syncobj_wait(fd);
443 device->has_syncobj_wait_available =
444 anv_gem_get_drm_cap(fd, DRM_CAP_SYNCOBJ_TIMELINE) != 0;
445
446 device->has_context_priority = anv_gem_has_context_priority(fd);
447
448 result = anv_physical_device_init_heaps(device, fd);
449 if (result != VK_SUCCESS)
450 goto fail_alloc;
451
452 device->use_softpin = device->has_softpin &&
453 device->supports_48bit_addresses;
454
455 device->has_context_isolation =
456 anv_gem_get_param(fd, I915_PARAM_HAS_CONTEXT_ISOLATION);
457
458 device->has_exec_timeline =
459 anv_gem_get_param(fd, I915_PARAM_HAS_EXEC_TIMELINE_FENCES);
460 if (env_var_as_boolean("ANV_QUEUE_THREAD_DISABLE", false))
461 device->has_exec_timeline = false;
462
463 device->always_use_bindless =
464 env_var_as_boolean("ANV_ALWAYS_BINDLESS", false);
465
466 device->use_call_secondary =
467 device->use_softpin &&
468 !env_var_as_boolean("ANV_DISABLE_SECONDARY_CMD_BUFFER_CALLS", false);
469
470 /* We first got the A64 messages on broadwell and we can only use them if
471 * we can pass addresses directly into the shader which requires softpin.
472 */
473 device->has_a64_buffer_access = device->info.gen >= 8 &&
474 device->use_softpin;
475
476 /* We first get bindless image access on Skylake and we can only really do
477 * it if we don't have any relocations so we need softpin.
478 */
479 device->has_bindless_images = device->info.gen >= 9 &&
480 device->use_softpin;
481
482 /* We've had bindless samplers since Ivy Bridge (forever in Vulkan terms)
483 * because it's just a matter of setting the sampler address in the sample
484 * message header. However, we've not bothered to wire it up for vec4 so
485 * we leave it disabled on gen7.
486 */
487 device->has_bindless_samplers = device->info.gen >= 8;
488
489 device->has_implicit_ccs = device->info.has_aux_map;
490
491 /* Check if we can read the GPU timestamp register from the CPU */
492 uint64_t u64_ignore;
493 device->has_reg_timestamp = anv_gem_reg_read(fd, TIMESTAMP | I915_REG_READ_8B_WA,
494 &u64_ignore) == 0;
495
496 device->has_mem_available = get_available_system_memory() != 0;
497
498 device->always_flush_cache =
499 driQueryOptionb(&instance->dri_options, "always_flush_cache");
500
501 device->has_mmap_offset =
502 anv_gem_get_param(fd, I915_PARAM_MMAP_GTT_VERSION) >= 4;
503
504 /* GENs prior to 8 do not support EU/Subslice info */
505 if (device->info.gen >= 8) {
506 device->subslice_total = anv_gem_get_param(fd, I915_PARAM_SUBSLICE_TOTAL);
507 device->eu_total = anv_gem_get_param(fd, I915_PARAM_EU_TOTAL);
508
509 /* Without this information, we cannot get the right Braswell
510 * brandstrings, and we have to use conservative numbers for GPGPU on
511 * many platforms, but otherwise, things will just work.
512 */
513 if (device->subslice_total < 1 || device->eu_total < 1) {
514 intel_logw("Kernel 4.1 required to properly query GPU properties");
515 }
516 } else if (device->info.gen == 7) {
517 device->subslice_total = 1 << (device->info.gt - 1);
518 }
519
520 if (device->info.is_cherryview &&
521 device->subslice_total > 0 && device->eu_total > 0) {
522 /* Logical CS threads = EUs per subslice * num threads per EU */
523 uint32_t max_cs_threads =
524 device->eu_total / device->subslice_total * device->info.num_thread_per_eu;
525
526 /* Fuse configurations may give more threads than expected, never less. */
527 if (max_cs_threads > device->info.max_cs_threads)
528 device->info.max_cs_threads = max_cs_threads;
529 }
530
531 device->compiler = brw_compiler_create(NULL, &device->info);
532 if (device->compiler == NULL) {
533 result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
534 goto fail_alloc;
535 }
536 device->compiler->shader_debug_log = compiler_debug_log;
537 device->compiler->shader_perf_log = compiler_perf_log;
538 device->compiler->supports_pull_constants = false;
539 device->compiler->constant_buffer_0_is_relative =
540 device->info.gen < 8 || !device->has_context_isolation;
541 device->compiler->supports_shader_constants = true;
542 device->compiler->compact_params = false;
543
544 /* Broadwell PRM says:
545 *
546 * "Before Gen8, there was a historical configuration control field to
547 * swizzle address bit[6] for in X/Y tiling modes. This was set in three
548 * different places: TILECTL[1:0], ARB_MODE[5:4], and
549 * DISP_ARB_CTL[14:13].
550 *
551 * For Gen8 and subsequent generations, the swizzle fields are all
552 * reserved, and the CPU's memory controller performs all address
553 * swizzling modifications."
554 */
555 bool swizzled =
556 device->info.gen < 8 && anv_gem_get_bit6_swizzle(fd, I915_TILING_X);
557
558 isl_device_init(&device->isl_dev, &device->info, swizzled);
559
560 result = anv_physical_device_init_uuids(device);
561 if (result != VK_SUCCESS)
562 goto fail_compiler;
563
564 anv_physical_device_init_disk_cache(device);
565
566 if (instance->enabled_extensions.KHR_display) {
567 master_fd = open(primary_path, O_RDWR | O_CLOEXEC);
568 if (master_fd >= 0) {
569 /* prod the device with a GETPARAM call which will fail if
570 * we don't have permission to even render on this device
571 */
572 if (anv_gem_get_param(master_fd, I915_PARAM_CHIPSET_ID) == 0) {
573 close(master_fd);
574 master_fd = -1;
575 }
576 }
577 }
578 device->master_fd = master_fd;
579
580 result = anv_init_wsi(device);
581 if (result != VK_SUCCESS)
582 goto fail_disk_cache;
583
584 device->perf = anv_get_perf(&device->info, fd);
585
586 anv_physical_device_get_supported_extensions(device,
587 &device->supported_extensions);
588
589
590 device->local_fd = fd;
591
592 *device_out = device;
593
594 return VK_SUCCESS;
595
596 fail_disk_cache:
597 anv_physical_device_free_disk_cache(device);
598 fail_compiler:
599 ralloc_free(device->compiler);
600 fail_alloc:
601 vk_free(&instance->alloc, device);
602 fail_fd:
603 close(fd);
604 if (master_fd != -1)
605 close(master_fd);
606 return result;
607 }
608
609 static void
610 anv_physical_device_destroy(struct anv_physical_device *device)
611 {
612 anv_finish_wsi(device);
613 anv_physical_device_free_disk_cache(device);
614 ralloc_free(device->compiler);
615 ralloc_free(device->perf);
616 close(device->local_fd);
617 if (device->master_fd >= 0)
618 close(device->master_fd);
619 vk_object_base_finish(&device->base);
620 vk_free(&device->instance->alloc, device);
621 }
622
623 static void *
624 default_alloc_func(void *pUserData, size_t size, size_t align,
625 VkSystemAllocationScope allocationScope)
626 {
627 return malloc(size);
628 }
629
630 static void *
631 default_realloc_func(void *pUserData, void *pOriginal, size_t size,
632 size_t align, VkSystemAllocationScope allocationScope)
633 {
634 return realloc(pOriginal, size);
635 }
636
637 static void
638 default_free_func(void *pUserData, void *pMemory)
639 {
640 free(pMemory);
641 }
642
643 static const VkAllocationCallbacks default_alloc = {
644 .pUserData = NULL,
645 .pfnAllocation = default_alloc_func,
646 .pfnReallocation = default_realloc_func,
647 .pfnFree = default_free_func,
648 };
649
650 VkResult anv_EnumerateInstanceExtensionProperties(
651 const char* pLayerName,
652 uint32_t* pPropertyCount,
653 VkExtensionProperties* pProperties)
654 {
655 VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
656
657 for (int i = 0; i < ANV_INSTANCE_EXTENSION_COUNT; i++) {
658 if (anv_instance_extensions_supported.extensions[i]) {
659 vk_outarray_append(&out, prop) {
660 *prop = anv_instance_extensions[i];
661 }
662 }
663 }
664
665 return vk_outarray_status(&out);
666 }
667
668 VkResult anv_CreateInstance(
669 const VkInstanceCreateInfo* pCreateInfo,
670 const VkAllocationCallbacks* pAllocator,
671 VkInstance* pInstance)
672 {
673 struct anv_instance *instance;
674 VkResult result;
675
676 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO);
677
678 struct anv_instance_extension_table enabled_extensions = {};
679 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
680 int idx;
681 for (idx = 0; idx < ANV_INSTANCE_EXTENSION_COUNT; idx++) {
682 if (strcmp(pCreateInfo->ppEnabledExtensionNames[i],
683 anv_instance_extensions[idx].extensionName) == 0)
684 break;
685 }
686
687 if (idx >= ANV_INSTANCE_EXTENSION_COUNT)
688 return vk_error(VK_ERROR_EXTENSION_NOT_PRESENT);
689
690 if (!anv_instance_extensions_supported.extensions[idx])
691 return vk_error(VK_ERROR_EXTENSION_NOT_PRESENT);
692
693 enabled_extensions.extensions[idx] = true;
694 }
695
696 instance = vk_alloc2(&default_alloc, pAllocator, sizeof(*instance), 8,
697 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
698 if (!instance)
699 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
700
701 vk_object_base_init(NULL, &instance->base, VK_OBJECT_TYPE_INSTANCE);
702
703 if (pAllocator)
704 instance->alloc = *pAllocator;
705 else
706 instance->alloc = default_alloc;
707
708 instance->app_info = (struct anv_app_info) { .api_version = 0 };
709 if (pCreateInfo->pApplicationInfo) {
710 const VkApplicationInfo *app = pCreateInfo->pApplicationInfo;
711
712 instance->app_info.app_name =
713 vk_strdup(&instance->alloc, app->pApplicationName,
714 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
715 instance->app_info.app_version = app->applicationVersion;
716
717 instance->app_info.engine_name =
718 vk_strdup(&instance->alloc, app->pEngineName,
719 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
720 instance->app_info.engine_version = app->engineVersion;
721
722 instance->app_info.api_version = app->apiVersion;
723 }
724
725 if (instance->app_info.api_version == 0)
726 instance->app_info.api_version = VK_API_VERSION_1_0;
727
728 instance->enabled_extensions = enabled_extensions;
729
730 for (unsigned i = 0; i < ARRAY_SIZE(instance->dispatch.entrypoints); i++) {
731 /* Vulkan requires that entrypoints for extensions which have not been
732 * enabled must not be advertised.
733 */
734 if (!anv_instance_entrypoint_is_enabled(i, instance->app_info.api_version,
735 &instance->enabled_extensions)) {
736 instance->dispatch.entrypoints[i] = NULL;
737 } else {
738 instance->dispatch.entrypoints[i] =
739 anv_instance_dispatch_table.entrypoints[i];
740 }
741 }
742
743 for (unsigned i = 0; i < ARRAY_SIZE(instance->physical_device_dispatch.entrypoints); i++) {
744 /* Vulkan requires that entrypoints for extensions which have not been
745 * enabled must not be advertised.
746 */
747 if (!anv_physical_device_entrypoint_is_enabled(i, instance->app_info.api_version,
748 &instance->enabled_extensions)) {
749 instance->physical_device_dispatch.entrypoints[i] = NULL;
750 } else {
751 instance->physical_device_dispatch.entrypoints[i] =
752 anv_physical_device_dispatch_table.entrypoints[i];
753 }
754 }
755
756 for (unsigned i = 0; i < ARRAY_SIZE(instance->device_dispatch.entrypoints); i++) {
757 /* Vulkan requires that entrypoints for extensions which have not been
758 * enabled must not be advertised.
759 */
760 if (!anv_device_entrypoint_is_enabled(i, instance->app_info.api_version,
761 &instance->enabled_extensions, NULL)) {
762 instance->device_dispatch.entrypoints[i] = NULL;
763 } else {
764 instance->device_dispatch.entrypoints[i] =
765 anv_device_dispatch_table.entrypoints[i];
766 }
767 }
768
769 instance->physical_devices_enumerated = false;
770 list_inithead(&instance->physical_devices);
771
772 result = vk_debug_report_instance_init(&instance->debug_report_callbacks);
773 if (result != VK_SUCCESS) {
774 vk_free2(&default_alloc, pAllocator, instance);
775 return vk_error(result);
776 }
777
778 instance->pipeline_cache_enabled =
779 env_var_as_boolean("ANV_ENABLE_PIPELINE_CACHE", true);
780
781 glsl_type_singleton_init_or_ref();
782
783 VG(VALGRIND_CREATE_MEMPOOL(instance, 0, false));
784
785 driParseOptionInfo(&instance->available_dri_options, anv_dri_options_xml);
786 driParseConfigFiles(&instance->dri_options, &instance->available_dri_options,
787 0, "anv", NULL,
788 instance->app_info.app_name,
789 instance->app_info.app_version,
790 instance->app_info.engine_name,
791 instance->app_info.engine_version);
792
793 *pInstance = anv_instance_to_handle(instance);
794
795 return VK_SUCCESS;
796 }
797
798 void anv_DestroyInstance(
799 VkInstance _instance,
800 const VkAllocationCallbacks* pAllocator)
801 {
802 ANV_FROM_HANDLE(anv_instance, instance, _instance);
803
804 if (!instance)
805 return;
806
807 list_for_each_entry_safe(struct anv_physical_device, pdevice,
808 &instance->physical_devices, link)
809 anv_physical_device_destroy(pdevice);
810
811 vk_free(&instance->alloc, (char *)instance->app_info.app_name);
812 vk_free(&instance->alloc, (char *)instance->app_info.engine_name);
813
814 VG(VALGRIND_DESTROY_MEMPOOL(instance));
815
816 vk_debug_report_instance_destroy(&instance->debug_report_callbacks);
817
818 glsl_type_singleton_decref();
819
820 driDestroyOptionCache(&instance->dri_options);
821 driDestroyOptionInfo(&instance->available_dri_options);
822
823 vk_object_base_finish(&instance->base);
824 vk_free(&instance->alloc, instance);
825 }
826
827 static VkResult
828 anv_enumerate_physical_devices(struct anv_instance *instance)
829 {
830 if (instance->physical_devices_enumerated)
831 return VK_SUCCESS;
832
833 instance->physical_devices_enumerated = true;
834
835 /* TODO: Check for more devices ? */
836 drmDevicePtr devices[8];
837 int max_devices;
838
839 max_devices = drmGetDevices2(0, devices, ARRAY_SIZE(devices));
840 if (max_devices < 1)
841 return VK_SUCCESS;
842
843 VkResult result = VK_SUCCESS;
844 for (unsigned i = 0; i < (unsigned)max_devices; i++) {
845 if (devices[i]->available_nodes & 1 << DRM_NODE_RENDER &&
846 devices[i]->bustype == DRM_BUS_PCI &&
847 devices[i]->deviceinfo.pci->vendor_id == 0x8086) {
848
849 struct anv_physical_device *pdevice;
850 result = anv_physical_device_try_create(instance, devices[i],
851 &pdevice);
852 /* Incompatible DRM device, skip. */
853 if (result == VK_ERROR_INCOMPATIBLE_DRIVER) {
854 result = VK_SUCCESS;
855 continue;
856 }
857
858 /* Error creating the physical device, report the error. */
859 if (result != VK_SUCCESS)
860 break;
861
862 list_addtail(&pdevice->link, &instance->physical_devices);
863 }
864 }
865 drmFreeDevices(devices, max_devices);
866
867 /* If we successfully enumerated any devices, call it success */
868 return result;
869 }
870
871 VkResult anv_EnumeratePhysicalDevices(
872 VkInstance _instance,
873 uint32_t* pPhysicalDeviceCount,
874 VkPhysicalDevice* pPhysicalDevices)
875 {
876 ANV_FROM_HANDLE(anv_instance, instance, _instance);
877 VK_OUTARRAY_MAKE(out, pPhysicalDevices, pPhysicalDeviceCount);
878
879 VkResult result = anv_enumerate_physical_devices(instance);
880 if (result != VK_SUCCESS)
881 return result;
882
883 list_for_each_entry(struct anv_physical_device, pdevice,
884 &instance->physical_devices, link) {
885 vk_outarray_append(&out, i) {
886 *i = anv_physical_device_to_handle(pdevice);
887 }
888 }
889
890 return vk_outarray_status(&out);
891 }
892
893 VkResult anv_EnumeratePhysicalDeviceGroups(
894 VkInstance _instance,
895 uint32_t* pPhysicalDeviceGroupCount,
896 VkPhysicalDeviceGroupProperties* pPhysicalDeviceGroupProperties)
897 {
898 ANV_FROM_HANDLE(anv_instance, instance, _instance);
899 VK_OUTARRAY_MAKE(out, pPhysicalDeviceGroupProperties,
900 pPhysicalDeviceGroupCount);
901
902 VkResult result = anv_enumerate_physical_devices(instance);
903 if (result != VK_SUCCESS)
904 return result;
905
906 list_for_each_entry(struct anv_physical_device, pdevice,
907 &instance->physical_devices, link) {
908 vk_outarray_append(&out, p) {
909 p->physicalDeviceCount = 1;
910 memset(p->physicalDevices, 0, sizeof(p->physicalDevices));
911 p->physicalDevices[0] = anv_physical_device_to_handle(pdevice);
912 p->subsetAllocation = false;
913
914 vk_foreach_struct(ext, p->pNext)
915 anv_debug_ignored_stype(ext->sType);
916 }
917 }
918
919 return vk_outarray_status(&out);
920 }
921
922 void anv_GetPhysicalDeviceFeatures(
923 VkPhysicalDevice physicalDevice,
924 VkPhysicalDeviceFeatures* pFeatures)
925 {
926 ANV_FROM_HANDLE(anv_physical_device, pdevice, physicalDevice);
927
928 *pFeatures = (VkPhysicalDeviceFeatures) {
929 .robustBufferAccess = true,
930 .fullDrawIndexUint32 = true,
931 .imageCubeArray = true,
932 .independentBlend = true,
933 .geometryShader = true,
934 .tessellationShader = true,
935 .sampleRateShading = true,
936 .dualSrcBlend = true,
937 .logicOp = true,
938 .multiDrawIndirect = true,
939 .drawIndirectFirstInstance = true,
940 .depthClamp = true,
941 .depthBiasClamp = true,
942 .fillModeNonSolid = true,
943 .depthBounds = pdevice->info.gen >= 12,
944 .wideLines = true,
945 .largePoints = true,
946 .alphaToOne = true,
947 .multiViewport = true,
948 .samplerAnisotropy = true,
949 .textureCompressionETC2 = pdevice->info.gen >= 8 ||
950 pdevice->info.is_baytrail,
951 .textureCompressionASTC_LDR = pdevice->info.gen >= 9, /* FINISHME CHV */
952 .textureCompressionBC = true,
953 .occlusionQueryPrecise = true,
954 .pipelineStatisticsQuery = true,
955 .fragmentStoresAndAtomics = true,
956 .shaderTessellationAndGeometryPointSize = true,
957 .shaderImageGatherExtended = true,
958 .shaderStorageImageExtendedFormats = true,
959 .shaderStorageImageMultisample = false,
960 .shaderStorageImageReadWithoutFormat = false,
961 .shaderStorageImageWriteWithoutFormat = true,
962 .shaderUniformBufferArrayDynamicIndexing = true,
963 .shaderSampledImageArrayDynamicIndexing = true,
964 .shaderStorageBufferArrayDynamicIndexing = true,
965 .shaderStorageImageArrayDynamicIndexing = true,
966 .shaderClipDistance = true,
967 .shaderCullDistance = true,
968 .shaderFloat64 = pdevice->info.gen >= 8 &&
969 pdevice->info.has_64bit_float,
970 .shaderInt64 = pdevice->info.gen >= 8 &&
971 pdevice->info.has_64bit_int,
972 .shaderInt16 = pdevice->info.gen >= 8,
973 .shaderResourceMinLod = pdevice->info.gen >= 9,
974 .variableMultisampleRate = true,
975 .inheritedQueries = true,
976 };
977
978 /* We can't do image stores in vec4 shaders */
979 pFeatures->vertexPipelineStoresAndAtomics =
980 pdevice->compiler->scalar_stage[MESA_SHADER_VERTEX] &&
981 pdevice->compiler->scalar_stage[MESA_SHADER_GEOMETRY];
982
983 struct anv_app_info *app_info = &pdevice->instance->app_info;
984
985 /* The new DOOM and Wolfenstein games require depthBounds without
986 * checking for it. They seem to run fine without it so just claim it's
987 * there and accept the consequences.
988 */
989 if (app_info->engine_name && strcmp(app_info->engine_name, "idTech") == 0)
990 pFeatures->depthBounds = true;
991 }
992
993 static void
994 anv_get_physical_device_features_1_1(struct anv_physical_device *pdevice,
995 VkPhysicalDeviceVulkan11Features *f)
996 {
997 assert(f->sType == VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_FEATURES);
998
999 f->storageBuffer16BitAccess = pdevice->info.gen >= 8;
1000 f->uniformAndStorageBuffer16BitAccess = pdevice->info.gen >= 8;
1001 f->storagePushConstant16 = pdevice->info.gen >= 8;
1002 f->storageInputOutput16 = false;
1003 f->multiview = true;
1004 f->multiviewGeometryShader = true;
1005 f->multiviewTessellationShader = true;
1006 f->variablePointersStorageBuffer = true;
1007 f->variablePointers = true;
1008 f->protectedMemory = false;
1009 f->samplerYcbcrConversion = true;
1010 f->shaderDrawParameters = true;
1011 }
1012
1013 static void
1014 anv_get_physical_device_features_1_2(struct anv_physical_device *pdevice,
1015 VkPhysicalDeviceVulkan12Features *f)
1016 {
1017 assert(f->sType == VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_FEATURES);
1018
1019 f->samplerMirrorClampToEdge = true;
1020 f->drawIndirectCount = true;
1021 f->storageBuffer8BitAccess = pdevice->info.gen >= 8;
1022 f->uniformAndStorageBuffer8BitAccess = pdevice->info.gen >= 8;
1023 f->storagePushConstant8 = pdevice->info.gen >= 8;
1024 f->shaderBufferInt64Atomics = pdevice->info.gen >= 9 &&
1025 pdevice->use_softpin;
1026 f->shaderSharedInt64Atomics = false;
1027 f->shaderFloat16 = pdevice->info.gen >= 8;
1028 f->shaderInt8 = pdevice->info.gen >= 8;
1029
1030 bool descIndexing = pdevice->has_a64_buffer_access &&
1031 pdevice->has_bindless_images;
1032 f->descriptorIndexing = descIndexing;
1033 f->shaderInputAttachmentArrayDynamicIndexing = false;
1034 f->shaderUniformTexelBufferArrayDynamicIndexing = descIndexing;
1035 f->shaderStorageTexelBufferArrayDynamicIndexing = descIndexing;
1036 f->shaderUniformBufferArrayNonUniformIndexing = false;
1037 f->shaderSampledImageArrayNonUniformIndexing = descIndexing;
1038 f->shaderStorageBufferArrayNonUniformIndexing = descIndexing;
1039 f->shaderStorageImageArrayNonUniformIndexing = descIndexing;
1040 f->shaderInputAttachmentArrayNonUniformIndexing = false;
1041 f->shaderUniformTexelBufferArrayNonUniformIndexing = descIndexing;
1042 f->shaderStorageTexelBufferArrayNonUniformIndexing = descIndexing;
1043 f->descriptorBindingUniformBufferUpdateAfterBind = false;
1044 f->descriptorBindingSampledImageUpdateAfterBind = descIndexing;
1045 f->descriptorBindingStorageImageUpdateAfterBind = descIndexing;
1046 f->descriptorBindingStorageBufferUpdateAfterBind = descIndexing;
1047 f->descriptorBindingUniformTexelBufferUpdateAfterBind = descIndexing;
1048 f->descriptorBindingStorageTexelBufferUpdateAfterBind = descIndexing;
1049 f->descriptorBindingUpdateUnusedWhilePending = descIndexing;
1050 f->descriptorBindingPartiallyBound = descIndexing;
1051 f->descriptorBindingVariableDescriptorCount = false;
1052 f->runtimeDescriptorArray = descIndexing;
1053
1054 f->samplerFilterMinmax = pdevice->info.gen >= 9;
1055 f->scalarBlockLayout = true;
1056 f->imagelessFramebuffer = true;
1057 f->uniformBufferStandardLayout = true;
1058 f->shaderSubgroupExtendedTypes = true;
1059 f->separateDepthStencilLayouts = true;
1060 f->hostQueryReset = true;
1061 f->timelineSemaphore = true;
1062 f->bufferDeviceAddress = pdevice->has_a64_buffer_access;
1063 f->bufferDeviceAddressCaptureReplay = pdevice->has_a64_buffer_access;
1064 f->bufferDeviceAddressMultiDevice = false;
1065 f->vulkanMemoryModel = true;
1066 f->vulkanMemoryModelDeviceScope = true;
1067 f->vulkanMemoryModelAvailabilityVisibilityChains = true;
1068 f->shaderOutputViewportIndex = true;
1069 f->shaderOutputLayer = true;
1070 f->subgroupBroadcastDynamicId = true;
1071 }
1072
1073 void anv_GetPhysicalDeviceFeatures2(
1074 VkPhysicalDevice physicalDevice,
1075 VkPhysicalDeviceFeatures2* pFeatures)
1076 {
1077 ANV_FROM_HANDLE(anv_physical_device, pdevice, physicalDevice);
1078 anv_GetPhysicalDeviceFeatures(physicalDevice, &pFeatures->features);
1079
1080 VkPhysicalDeviceVulkan11Features core_1_1 = {
1081 .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_FEATURES,
1082 };
1083 anv_get_physical_device_features_1_1(pdevice, &core_1_1);
1084
1085 VkPhysicalDeviceVulkan12Features core_1_2 = {
1086 .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_FEATURES,
1087 };
1088 anv_get_physical_device_features_1_2(pdevice, &core_1_2);
1089
1090 #define CORE_FEATURE(major, minor, feature) \
1091 features->feature = core_##major##_##minor.feature
1092
1093
1094 vk_foreach_struct(ext, pFeatures->pNext) {
1095 switch (ext->sType) {
1096 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_4444_FORMATS_FEATURES_EXT: {
1097 VkPhysicalDevice4444FormatsFeaturesEXT *features =
1098 (VkPhysicalDevice4444FormatsFeaturesEXT *)ext;
1099 features->formatA4R4G4B4 = true;
1100 features->formatA4B4G4R4 = false;
1101 break;
1102 }
1103
1104 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_8BIT_STORAGE_FEATURES_KHR: {
1105 VkPhysicalDevice8BitStorageFeaturesKHR *features =
1106 (VkPhysicalDevice8BitStorageFeaturesKHR *)ext;
1107 CORE_FEATURE(1, 2, storageBuffer8BitAccess);
1108 CORE_FEATURE(1, 2, uniformAndStorageBuffer8BitAccess);
1109 CORE_FEATURE(1, 2, storagePushConstant8);
1110 break;
1111 }
1112
1113 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES: {
1114 VkPhysicalDevice16BitStorageFeatures *features =
1115 (VkPhysicalDevice16BitStorageFeatures *)ext;
1116 CORE_FEATURE(1, 1, storageBuffer16BitAccess);
1117 CORE_FEATURE(1, 1, uniformAndStorageBuffer16BitAccess);
1118 CORE_FEATURE(1, 1, storagePushConstant16);
1119 CORE_FEATURE(1, 1, storageInputOutput16);
1120 break;
1121 }
1122
1123 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES_EXT: {
1124 VkPhysicalDeviceBufferDeviceAddressFeaturesEXT *features = (void *)ext;
1125 features->bufferDeviceAddress = pdevice->has_a64_buffer_access;
1126 features->bufferDeviceAddressCaptureReplay = false;
1127 features->bufferDeviceAddressMultiDevice = false;
1128 break;
1129 }
1130
1131 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES_KHR: {
1132 VkPhysicalDeviceBufferDeviceAddressFeaturesKHR *features = (void *)ext;
1133 CORE_FEATURE(1, 2, bufferDeviceAddress);
1134 CORE_FEATURE(1, 2, bufferDeviceAddressCaptureReplay);
1135 CORE_FEATURE(1, 2, bufferDeviceAddressMultiDevice);
1136 break;
1137 }
1138
1139 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COMPUTE_SHADER_DERIVATIVES_FEATURES_NV: {
1140 VkPhysicalDeviceComputeShaderDerivativesFeaturesNV *features =
1141 (VkPhysicalDeviceComputeShaderDerivativesFeaturesNV *)ext;
1142 features->computeDerivativeGroupQuads = true;
1143 features->computeDerivativeGroupLinear = true;
1144 break;
1145 }
1146
1147 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONDITIONAL_RENDERING_FEATURES_EXT: {
1148 VkPhysicalDeviceConditionalRenderingFeaturesEXT *features =
1149 (VkPhysicalDeviceConditionalRenderingFeaturesEXT*)ext;
1150 features->conditionalRendering = pdevice->info.gen >= 8 ||
1151 pdevice->info.is_haswell;
1152 features->inheritedConditionalRendering = pdevice->info.gen >= 8 ||
1153 pdevice->info.is_haswell;
1154 break;
1155 }
1156
1157 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CUSTOM_BORDER_COLOR_FEATURES_EXT: {
1158 VkPhysicalDeviceCustomBorderColorFeaturesEXT *features =
1159 (VkPhysicalDeviceCustomBorderColorFeaturesEXT *)ext;
1160 features->customBorderColors = pdevice->info.gen >= 8;
1161 features->customBorderColorWithoutFormat = pdevice->info.gen >= 8;
1162 break;
1163 }
1164
1165 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_CLIP_ENABLE_FEATURES_EXT: {
1166 VkPhysicalDeviceDepthClipEnableFeaturesEXT *features =
1167 (VkPhysicalDeviceDepthClipEnableFeaturesEXT *)ext;
1168 features->depthClipEnable = true;
1169 break;
1170 }
1171
1172 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FLOAT16_INT8_FEATURES_KHR: {
1173 VkPhysicalDeviceFloat16Int8FeaturesKHR *features = (void *)ext;
1174 CORE_FEATURE(1, 2, shaderFloat16);
1175 CORE_FEATURE(1, 2, shaderInt8);
1176 break;
1177 }
1178
1179 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADER_INTERLOCK_FEATURES_EXT: {
1180 VkPhysicalDeviceFragmentShaderInterlockFeaturesEXT *features =
1181 (VkPhysicalDeviceFragmentShaderInterlockFeaturesEXT *)ext;
1182 features->fragmentShaderSampleInterlock = pdevice->info.gen >= 9;
1183 features->fragmentShaderPixelInterlock = pdevice->info.gen >= 9;
1184 features->fragmentShaderShadingRateInterlock = false;
1185 break;
1186 }
1187
1188 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_HOST_QUERY_RESET_FEATURES_EXT: {
1189 VkPhysicalDeviceHostQueryResetFeaturesEXT *features =
1190 (VkPhysicalDeviceHostQueryResetFeaturesEXT *)ext;
1191 CORE_FEATURE(1, 2, hostQueryReset);
1192 break;
1193 }
1194
1195 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES_EXT: {
1196 VkPhysicalDeviceDescriptorIndexingFeaturesEXT *features =
1197 (VkPhysicalDeviceDescriptorIndexingFeaturesEXT *)ext;
1198 CORE_FEATURE(1, 2, shaderInputAttachmentArrayDynamicIndexing);
1199 CORE_FEATURE(1, 2, shaderUniformTexelBufferArrayDynamicIndexing);
1200 CORE_FEATURE(1, 2, shaderStorageTexelBufferArrayDynamicIndexing);
1201 CORE_FEATURE(1, 2, shaderUniformBufferArrayNonUniformIndexing);
1202 CORE_FEATURE(1, 2, shaderSampledImageArrayNonUniformIndexing);
1203 CORE_FEATURE(1, 2, shaderStorageBufferArrayNonUniformIndexing);
1204 CORE_FEATURE(1, 2, shaderStorageImageArrayNonUniformIndexing);
1205 CORE_FEATURE(1, 2, shaderInputAttachmentArrayNonUniformIndexing);
1206 CORE_FEATURE(1, 2, shaderUniformTexelBufferArrayNonUniformIndexing);
1207 CORE_FEATURE(1, 2, shaderStorageTexelBufferArrayNonUniformIndexing);
1208 CORE_FEATURE(1, 2, descriptorBindingUniformBufferUpdateAfterBind);
1209 CORE_FEATURE(1, 2, descriptorBindingSampledImageUpdateAfterBind);
1210 CORE_FEATURE(1, 2, descriptorBindingStorageImageUpdateAfterBind);
1211 CORE_FEATURE(1, 2, descriptorBindingStorageBufferUpdateAfterBind);
1212 CORE_FEATURE(1, 2, descriptorBindingUniformTexelBufferUpdateAfterBind);
1213 CORE_FEATURE(1, 2, descriptorBindingStorageTexelBufferUpdateAfterBind);
1214 CORE_FEATURE(1, 2, descriptorBindingUpdateUnusedWhilePending);
1215 CORE_FEATURE(1, 2, descriptorBindingPartiallyBound);
1216 CORE_FEATURE(1, 2, descriptorBindingVariableDescriptorCount);
1217 CORE_FEATURE(1, 2, runtimeDescriptorArray);
1218 break;
1219 }
1220
1221 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_ROBUSTNESS_FEATURES_EXT: {
1222 VkPhysicalDeviceImageRobustnessFeaturesEXT *features =
1223 (VkPhysicalDeviceImageRobustnessFeaturesEXT *)ext;
1224 features->robustImageAccess = true;
1225 break;
1226 }
1227
1228 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INDEX_TYPE_UINT8_FEATURES_EXT: {
1229 VkPhysicalDeviceIndexTypeUint8FeaturesEXT *features =
1230 (VkPhysicalDeviceIndexTypeUint8FeaturesEXT *)ext;
1231 features->indexTypeUint8 = true;
1232 break;
1233 }
1234
1235 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_FEATURES_EXT: {
1236 VkPhysicalDeviceInlineUniformBlockFeaturesEXT *features =
1237 (VkPhysicalDeviceInlineUniformBlockFeaturesEXT *)ext;
1238 features->inlineUniformBlock = true;
1239 features->descriptorBindingInlineUniformBlockUpdateAfterBind = true;
1240 break;
1241 }
1242
1243 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_FEATURES_EXT: {
1244 VkPhysicalDeviceLineRasterizationFeaturesEXT *features =
1245 (VkPhysicalDeviceLineRasterizationFeaturesEXT *)ext;
1246 features->rectangularLines = true;
1247 features->bresenhamLines = true;
1248 /* Support for Smooth lines with MSAA was removed on gen11. From the
1249 * BSpec section "Multisample ModesState" table for "AA Line Support
1250 * Requirements":
1251 *
1252 * GEN10:BUG:######## NUM_MULTISAMPLES == 1
1253 *
1254 * Fortunately, this isn't a case most people care about.
1255 */
1256 features->smoothLines = pdevice->info.gen < 10;
1257 features->stippledRectangularLines = false;
1258 features->stippledBresenhamLines = true;
1259 features->stippledSmoothLines = false;
1260 break;
1261 }
1262
1263 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES: {
1264 VkPhysicalDeviceMultiviewFeatures *features =
1265 (VkPhysicalDeviceMultiviewFeatures *)ext;
1266 CORE_FEATURE(1, 1, multiview);
1267 CORE_FEATURE(1, 1, multiviewGeometryShader);
1268 CORE_FEATURE(1, 1, multiviewTessellationShader);
1269 break;
1270 }
1271
1272 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGELESS_FRAMEBUFFER_FEATURES_KHR: {
1273 VkPhysicalDeviceImagelessFramebufferFeaturesKHR *features =
1274 (VkPhysicalDeviceImagelessFramebufferFeaturesKHR *)ext;
1275 CORE_FEATURE(1, 2, imagelessFramebuffer);
1276 break;
1277 }
1278
1279 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PERFORMANCE_QUERY_FEATURES_KHR: {
1280 VkPhysicalDevicePerformanceQueryFeaturesKHR *feature =
1281 (VkPhysicalDevicePerformanceQueryFeaturesKHR *)ext;
1282 feature->performanceCounterQueryPools = true;
1283 /* HW only supports a single configuration at a time. */
1284 feature->performanceCounterMultipleQueryPools = false;
1285 break;
1286 }
1287
1288 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_CREATION_CACHE_CONTROL_FEATURES_EXT: {
1289 VkPhysicalDevicePipelineCreationCacheControlFeaturesEXT *features =
1290 (VkPhysicalDevicePipelineCreationCacheControlFeaturesEXT *)ext;
1291 features->pipelineCreationCacheControl = true;
1292 break;
1293 }
1294
1295 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_EXECUTABLE_PROPERTIES_FEATURES_KHR: {
1296 VkPhysicalDevicePipelineExecutablePropertiesFeaturesKHR *features =
1297 (VkPhysicalDevicePipelineExecutablePropertiesFeaturesKHR *)ext;
1298 features->pipelineExecutableInfo = true;
1299 break;
1300 }
1301
1302 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRIVATE_DATA_FEATURES_EXT: {
1303 VkPhysicalDevicePrivateDataFeaturesEXT *features = (void *)ext;
1304 features->privateData = true;
1305 break;
1306 }
1307
1308 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES: {
1309 VkPhysicalDeviceProtectedMemoryFeatures *features = (void *)ext;
1310 CORE_FEATURE(1, 1, protectedMemory);
1311 break;
1312 }
1313
1314 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ROBUSTNESS_2_FEATURES_EXT: {
1315 VkPhysicalDeviceRobustness2FeaturesEXT *features = (void *)ext;
1316 features->robustBufferAccess2 = true;
1317 features->robustImageAccess2 = true;
1318 features->nullDescriptor = true;
1319 break;
1320 }
1321
1322 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES: {
1323 VkPhysicalDeviceSamplerYcbcrConversionFeatures *features =
1324 (VkPhysicalDeviceSamplerYcbcrConversionFeatures *) ext;
1325 CORE_FEATURE(1, 1, samplerYcbcrConversion);
1326 break;
1327 }
1328
1329 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SCALAR_BLOCK_LAYOUT_FEATURES_EXT: {
1330 VkPhysicalDeviceScalarBlockLayoutFeaturesEXT *features =
1331 (VkPhysicalDeviceScalarBlockLayoutFeaturesEXT *)ext;
1332 CORE_FEATURE(1, 2, scalarBlockLayout);
1333 break;
1334 }
1335
1336 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SEPARATE_DEPTH_STENCIL_LAYOUTS_FEATURES_KHR: {
1337 VkPhysicalDeviceSeparateDepthStencilLayoutsFeaturesKHR *features =
1338 (VkPhysicalDeviceSeparateDepthStencilLayoutsFeaturesKHR *)ext;
1339 CORE_FEATURE(1, 2, separateDepthStencilLayouts);
1340 break;
1341 }
1342
1343 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_FLOAT_FEATURES_EXT: {
1344 VkPhysicalDeviceShaderAtomicFloatFeaturesEXT *features = (void *)ext;
1345 features->shaderBufferFloat32Atomics = true;
1346 features->shaderBufferFloat32AtomicAdd = false;
1347 features->shaderBufferFloat64Atomics = false;
1348 features->shaderBufferFloat64AtomicAdd = false;
1349 features->shaderSharedFloat32Atomics = true;
1350 features->shaderSharedFloat32AtomicAdd = false;
1351 features->shaderSharedFloat64Atomics = false;
1352 features->shaderSharedFloat64AtomicAdd = false;
1353 features->shaderImageFloat32Atomics = true;
1354 features->shaderImageFloat32AtomicAdd = false;
1355 features->sparseImageFloat32Atomics = false;
1356 features->sparseImageFloat32AtomicAdd = false;
1357 break;
1358 }
1359
1360 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_INT64_FEATURES_KHR: {
1361 VkPhysicalDeviceShaderAtomicInt64FeaturesKHR *features = (void *)ext;
1362 CORE_FEATURE(1, 2, shaderBufferInt64Atomics);
1363 CORE_FEATURE(1, 2, shaderSharedInt64Atomics);
1364 break;
1365 }
1366
1367 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DEMOTE_TO_HELPER_INVOCATION_FEATURES_EXT: {
1368 VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT *features = (void *)ext;
1369 features->shaderDemoteToHelperInvocation = true;
1370 break;
1371 }
1372
1373 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CLOCK_FEATURES_KHR: {
1374 VkPhysicalDeviceShaderClockFeaturesKHR *features =
1375 (VkPhysicalDeviceShaderClockFeaturesKHR *)ext;
1376 features->shaderSubgroupClock = true;
1377 features->shaderDeviceClock = false;
1378 break;
1379 }
1380
1381 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETERS_FEATURES: {
1382 VkPhysicalDeviceShaderDrawParametersFeatures *features = (void *)ext;
1383 CORE_FEATURE(1, 1, shaderDrawParameters);
1384 break;
1385 }
1386
1387 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_INTEGER_FUNCTIONS_2_FEATURES_INTEL: {
1388 VkPhysicalDeviceShaderIntegerFunctions2FeaturesINTEL *features =
1389 (VkPhysicalDeviceShaderIntegerFunctions2FeaturesINTEL *)ext;
1390 features->shaderIntegerFunctions2 = true;
1391 break;
1392 }
1393
1394 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SUBGROUP_EXTENDED_TYPES_FEATURES_KHR: {
1395 VkPhysicalDeviceShaderSubgroupExtendedTypesFeaturesKHR *features =
1396 (VkPhysicalDeviceShaderSubgroupExtendedTypesFeaturesKHR *)ext;
1397 CORE_FEATURE(1, 2, shaderSubgroupExtendedTypes);
1398 break;
1399 }
1400
1401 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_FEATURES_EXT: {
1402 VkPhysicalDeviceSubgroupSizeControlFeaturesEXT *features =
1403 (VkPhysicalDeviceSubgroupSizeControlFeaturesEXT *)ext;
1404 features->subgroupSizeControl = true;
1405 features->computeFullSubgroups = true;
1406 break;
1407 }
1408
1409 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_FEATURES_EXT: {
1410 VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT *features =
1411 (VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT *)ext;
1412 features->texelBufferAlignment = true;
1413 break;
1414 }
1415
1416 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_FEATURES_KHR: {
1417 VkPhysicalDeviceTimelineSemaphoreFeaturesKHR *features =
1418 (VkPhysicalDeviceTimelineSemaphoreFeaturesKHR *) ext;
1419 CORE_FEATURE(1, 2, timelineSemaphore);
1420 break;
1421 }
1422
1423 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTERS_FEATURES: {
1424 VkPhysicalDeviceVariablePointersFeatures *features = (void *)ext;
1425 CORE_FEATURE(1, 1, variablePointersStorageBuffer);
1426 CORE_FEATURE(1, 1, variablePointers);
1427 break;
1428 }
1429
1430 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_FEATURES_EXT: {
1431 VkPhysicalDeviceTransformFeedbackFeaturesEXT *features =
1432 (VkPhysicalDeviceTransformFeedbackFeaturesEXT *)ext;
1433 features->transformFeedback = true;
1434 features->geometryStreams = true;
1435 break;
1436 }
1437
1438 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_UNIFORM_BUFFER_STANDARD_LAYOUT_FEATURES_KHR: {
1439 VkPhysicalDeviceUniformBufferStandardLayoutFeaturesKHR *features =
1440 (VkPhysicalDeviceUniformBufferStandardLayoutFeaturesKHR *)ext;
1441 CORE_FEATURE(1, 2, uniformBufferStandardLayout);
1442 break;
1443 }
1444
1445 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_FEATURES_EXT: {
1446 VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT *features =
1447 (VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT *)ext;
1448 features->vertexAttributeInstanceRateDivisor = true;
1449 features->vertexAttributeInstanceRateZeroDivisor = true;
1450 break;
1451 }
1452
1453 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_FEATURES:
1454 anv_get_physical_device_features_1_1(pdevice, (void *)ext);
1455 break;
1456
1457 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_FEATURES:
1458 anv_get_physical_device_features_1_2(pdevice, (void *)ext);
1459 break;
1460
1461 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_MEMORY_MODEL_FEATURES_KHR: {
1462 VkPhysicalDeviceVulkanMemoryModelFeaturesKHR *features = (void *)ext;
1463 CORE_FEATURE(1, 2, vulkanMemoryModel);
1464 CORE_FEATURE(1, 2, vulkanMemoryModelDeviceScope);
1465 CORE_FEATURE(1, 2, vulkanMemoryModelAvailabilityVisibilityChains);
1466 break;
1467 }
1468
1469 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_YCBCR_IMAGE_ARRAYS_FEATURES_EXT: {
1470 VkPhysicalDeviceYcbcrImageArraysFeaturesEXT *features =
1471 (VkPhysicalDeviceYcbcrImageArraysFeaturesEXT *)ext;
1472 features->ycbcrImageArrays = true;
1473 break;
1474 }
1475
1476 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTENDED_DYNAMIC_STATE_FEATURES_EXT: {
1477 VkPhysicalDeviceExtendedDynamicStateFeaturesEXT *features =
1478 (VkPhysicalDeviceExtendedDynamicStateFeaturesEXT *)ext;
1479 features->extendedDynamicState = true;
1480 break;
1481 }
1482
1483 default:
1484 anv_debug_ignored_stype(ext->sType);
1485 break;
1486 }
1487 }
1488
1489 #undef CORE_FEATURE
1490 }
1491
1492 #define MAX_PER_STAGE_DESCRIPTOR_UNIFORM_BUFFERS 64
1493
1494 #define MAX_PER_STAGE_DESCRIPTOR_INPUT_ATTACHMENTS 64
1495 #define MAX_DESCRIPTOR_SET_INPUT_ATTACHMENTS 256
1496
1497 #define MAX_CUSTOM_BORDER_COLORS 4096
1498
1499 void anv_GetPhysicalDeviceProperties(
1500 VkPhysicalDevice physicalDevice,
1501 VkPhysicalDeviceProperties* pProperties)
1502 {
1503 ANV_FROM_HANDLE(anv_physical_device, pdevice, physicalDevice);
1504 const struct gen_device_info *devinfo = &pdevice->info;
1505
1506 /* See assertions made when programming the buffer surface state. */
1507 const uint32_t max_raw_buffer_sz = devinfo->gen >= 7 ?
1508 (1ul << 30) : (1ul << 27);
1509
1510 const uint32_t max_ssbos = pdevice->has_a64_buffer_access ? UINT16_MAX : 64;
1511 const uint32_t max_textures =
1512 pdevice->has_bindless_images ? UINT16_MAX : 128;
1513 const uint32_t max_samplers =
1514 pdevice->has_bindless_samplers ? UINT16_MAX :
1515 (devinfo->gen >= 8 || devinfo->is_haswell) ? 128 : 16;
1516 const uint32_t max_images =
1517 pdevice->has_bindless_images ? UINT16_MAX : MAX_IMAGES;
1518
1519 /* If we can use bindless for everything, claim a high per-stage limit,
1520 * otherwise use the binding table size, minus the slots reserved for
1521 * render targets and one slot for the descriptor buffer. */
1522 const uint32_t max_per_stage =
1523 pdevice->has_bindless_images && pdevice->has_a64_buffer_access
1524 ? UINT32_MAX : MAX_BINDING_TABLE_SIZE - MAX_RTS - 1;
1525
1526 /* Limit max_threads to 64 for the GPGPU_WALKER command */
1527 const uint32_t max_workgroup_size = 32 * MIN2(64, devinfo->max_cs_threads);
1528
1529 VkSampleCountFlags sample_counts =
1530 isl_device_get_sample_counts(&pdevice->isl_dev);
1531
1532
1533 VkPhysicalDeviceLimits limits = {
1534 .maxImageDimension1D = (1 << 14),
1535 .maxImageDimension2D = (1 << 14),
1536 .maxImageDimension3D = (1 << 11),
1537 .maxImageDimensionCube = (1 << 14),
1538 .maxImageArrayLayers = (1 << 11),
1539 .maxTexelBufferElements = 128 * 1024 * 1024,
1540 .maxUniformBufferRange = (1ul << 27),
1541 .maxStorageBufferRange = max_raw_buffer_sz,
1542 .maxPushConstantsSize = MAX_PUSH_CONSTANTS_SIZE,
1543 .maxMemoryAllocationCount = UINT32_MAX,
1544 .maxSamplerAllocationCount = 64 * 1024,
1545 .bufferImageGranularity = 64, /* A cache line */
1546 .sparseAddressSpaceSize = 0,
1547 .maxBoundDescriptorSets = MAX_SETS,
1548 .maxPerStageDescriptorSamplers = max_samplers,
1549 .maxPerStageDescriptorUniformBuffers = MAX_PER_STAGE_DESCRIPTOR_UNIFORM_BUFFERS,
1550 .maxPerStageDescriptorStorageBuffers = max_ssbos,
1551 .maxPerStageDescriptorSampledImages = max_textures,
1552 .maxPerStageDescriptorStorageImages = max_images,
1553 .maxPerStageDescriptorInputAttachments = MAX_PER_STAGE_DESCRIPTOR_INPUT_ATTACHMENTS,
1554 .maxPerStageResources = max_per_stage,
1555 .maxDescriptorSetSamplers = 6 * max_samplers, /* number of stages * maxPerStageDescriptorSamplers */
1556 .maxDescriptorSetUniformBuffers = 6 * MAX_PER_STAGE_DESCRIPTOR_UNIFORM_BUFFERS, /* number of stages * maxPerStageDescriptorUniformBuffers */
1557 .maxDescriptorSetUniformBuffersDynamic = MAX_DYNAMIC_BUFFERS / 2,
1558 .maxDescriptorSetStorageBuffers = 6 * max_ssbos, /* number of stages * maxPerStageDescriptorStorageBuffers */
1559 .maxDescriptorSetStorageBuffersDynamic = MAX_DYNAMIC_BUFFERS / 2,
1560 .maxDescriptorSetSampledImages = 6 * max_textures, /* number of stages * maxPerStageDescriptorSampledImages */
1561 .maxDescriptorSetStorageImages = 6 * max_images, /* number of stages * maxPerStageDescriptorStorageImages */
1562 .maxDescriptorSetInputAttachments = MAX_DESCRIPTOR_SET_INPUT_ATTACHMENTS,
1563 .maxVertexInputAttributes = MAX_VBS,
1564 .maxVertexInputBindings = MAX_VBS,
1565 .maxVertexInputAttributeOffset = 2047,
1566 .maxVertexInputBindingStride = 2048,
1567 .maxVertexOutputComponents = 128,
1568 .maxTessellationGenerationLevel = 64,
1569 .maxTessellationPatchSize = 32,
1570 .maxTessellationControlPerVertexInputComponents = 128,
1571 .maxTessellationControlPerVertexOutputComponents = 128,
1572 .maxTessellationControlPerPatchOutputComponents = 128,
1573 .maxTessellationControlTotalOutputComponents = 2048,
1574 .maxTessellationEvaluationInputComponents = 128,
1575 .maxTessellationEvaluationOutputComponents = 128,
1576 .maxGeometryShaderInvocations = 32,
1577 .maxGeometryInputComponents = 64,
1578 .maxGeometryOutputComponents = 128,
1579 .maxGeometryOutputVertices = 256,
1580 .maxGeometryTotalOutputComponents = 1024,
1581 .maxFragmentInputComponents = 116, /* 128 components - (PSIZ, CLIP_DIST0, CLIP_DIST1) */
1582 .maxFragmentOutputAttachments = 8,
1583 .maxFragmentDualSrcAttachments = 1,
1584 .maxFragmentCombinedOutputResources = 8,
1585 .maxComputeSharedMemorySize = 64 * 1024,
1586 .maxComputeWorkGroupCount = { 65535, 65535, 65535 },
1587 .maxComputeWorkGroupInvocations = max_workgroup_size,
1588 .maxComputeWorkGroupSize = {
1589 max_workgroup_size,
1590 max_workgroup_size,
1591 max_workgroup_size,
1592 },
1593 .subPixelPrecisionBits = 8,
1594 .subTexelPrecisionBits = 8,
1595 .mipmapPrecisionBits = 8,
1596 .maxDrawIndexedIndexValue = UINT32_MAX,
1597 .maxDrawIndirectCount = UINT32_MAX,
1598 .maxSamplerLodBias = 16,
1599 .maxSamplerAnisotropy = 16,
1600 .maxViewports = MAX_VIEWPORTS,
1601 .maxViewportDimensions = { (1 << 14), (1 << 14) },
1602 .viewportBoundsRange = { INT16_MIN, INT16_MAX },
1603 .viewportSubPixelBits = 13, /* We take a float? */
1604 .minMemoryMapAlignment = 4096, /* A page */
1605 /* The dataport requires texel alignment so we need to assume a worst
1606 * case of R32G32B32A32 which is 16 bytes.
1607 */
1608 .minTexelBufferOffsetAlignment = 16,
1609 .minUniformBufferOffsetAlignment = ANV_UBO_ALIGNMENT,
1610 .minStorageBufferOffsetAlignment = 4,
1611 .minTexelOffset = -8,
1612 .maxTexelOffset = 7,
1613 .minTexelGatherOffset = -32,
1614 .maxTexelGatherOffset = 31,
1615 .minInterpolationOffset = -0.5,
1616 .maxInterpolationOffset = 0.4375,
1617 .subPixelInterpolationOffsetBits = 4,
1618 .maxFramebufferWidth = (1 << 14),
1619 .maxFramebufferHeight = (1 << 14),
1620 .maxFramebufferLayers = (1 << 11),
1621 .framebufferColorSampleCounts = sample_counts,
1622 .framebufferDepthSampleCounts = sample_counts,
1623 .framebufferStencilSampleCounts = sample_counts,
1624 .framebufferNoAttachmentsSampleCounts = sample_counts,
1625 .maxColorAttachments = MAX_RTS,
1626 .sampledImageColorSampleCounts = sample_counts,
1627 .sampledImageIntegerSampleCounts = sample_counts,
1628 .sampledImageDepthSampleCounts = sample_counts,
1629 .sampledImageStencilSampleCounts = sample_counts,
1630 .storageImageSampleCounts = VK_SAMPLE_COUNT_1_BIT,
1631 .maxSampleMaskWords = 1,
1632 .timestampComputeAndGraphics = true,
1633 .timestampPeriod = 1000000000.0 / devinfo->timestamp_frequency,
1634 .maxClipDistances = 8,
1635 .maxCullDistances = 8,
1636 .maxCombinedClipAndCullDistances = 8,
1637 .discreteQueuePriorities = 2,
1638 .pointSizeRange = { 0.125, 255.875 },
1639 .lineWidthRange = {
1640 0.0,
1641 (devinfo->gen >= 9 || devinfo->is_cherryview) ?
1642 2047.9921875 : 7.9921875,
1643 },
1644 .pointSizeGranularity = (1.0 / 8.0),
1645 .lineWidthGranularity = (1.0 / 128.0),
1646 .strictLines = false,
1647 .standardSampleLocations = true,
1648 .optimalBufferCopyOffsetAlignment = 128,
1649 .optimalBufferCopyRowPitchAlignment = 128,
1650 .nonCoherentAtomSize = 64,
1651 };
1652
1653 *pProperties = (VkPhysicalDeviceProperties) {
1654 .apiVersion = anv_physical_device_api_version(pdevice),
1655 .driverVersion = vk_get_driver_version(),
1656 .vendorID = 0x8086,
1657 .deviceID = pdevice->info.chipset_id,
1658 .deviceType = VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU,
1659 .limits = limits,
1660 .sparseProperties = {0}, /* Broadwell doesn't do sparse. */
1661 };
1662
1663 snprintf(pProperties->deviceName, sizeof(pProperties->deviceName),
1664 "%s", pdevice->name);
1665 memcpy(pProperties->pipelineCacheUUID,
1666 pdevice->pipeline_cache_uuid, VK_UUID_SIZE);
1667 }
1668
1669 static void
1670 anv_get_physical_device_properties_1_1(struct anv_physical_device *pdevice,
1671 VkPhysicalDeviceVulkan11Properties *p)
1672 {
1673 assert(p->sType == VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_PROPERTIES);
1674
1675 memcpy(p->deviceUUID, pdevice->device_uuid, VK_UUID_SIZE);
1676 memcpy(p->driverUUID, pdevice->driver_uuid, VK_UUID_SIZE);
1677 memset(p->deviceLUID, 0, VK_LUID_SIZE);
1678 p->deviceNodeMask = 0;
1679 p->deviceLUIDValid = false;
1680
1681 p->subgroupSize = BRW_SUBGROUP_SIZE;
1682 VkShaderStageFlags scalar_stages = 0;
1683 for (unsigned stage = 0; stage < MESA_SHADER_STAGES; stage++) {
1684 if (pdevice->compiler->scalar_stage[stage])
1685 scalar_stages |= mesa_to_vk_shader_stage(stage);
1686 }
1687 p->subgroupSupportedStages = scalar_stages;
1688 p->subgroupSupportedOperations = VK_SUBGROUP_FEATURE_BASIC_BIT |
1689 VK_SUBGROUP_FEATURE_VOTE_BIT |
1690 VK_SUBGROUP_FEATURE_BALLOT_BIT |
1691 VK_SUBGROUP_FEATURE_SHUFFLE_BIT |
1692 VK_SUBGROUP_FEATURE_SHUFFLE_RELATIVE_BIT |
1693 VK_SUBGROUP_FEATURE_QUAD_BIT;
1694 if (pdevice->info.gen >= 8) {
1695 /* TODO: There's no technical reason why these can't be made to
1696 * work on gen7 but they don't at the moment so it's best to leave
1697 * the feature disabled than enabled and broken.
1698 */
1699 p->subgroupSupportedOperations |= VK_SUBGROUP_FEATURE_ARITHMETIC_BIT |
1700 VK_SUBGROUP_FEATURE_CLUSTERED_BIT;
1701 }
1702 p->subgroupQuadOperationsInAllStages = pdevice->info.gen >= 8;
1703
1704 p->pointClippingBehavior = VK_POINT_CLIPPING_BEHAVIOR_USER_CLIP_PLANES_ONLY;
1705 p->maxMultiviewViewCount = 16;
1706 p->maxMultiviewInstanceIndex = UINT32_MAX / 16;
1707 p->protectedNoFault = false;
1708 /* This value doesn't matter for us today as our per-stage descriptors are
1709 * the real limit.
1710 */
1711 p->maxPerSetDescriptors = 1024;
1712 p->maxMemoryAllocationSize = MAX_MEMORY_ALLOCATION_SIZE;
1713 }
1714
1715 static void
1716 anv_get_physical_device_properties_1_2(struct anv_physical_device *pdevice,
1717 VkPhysicalDeviceVulkan12Properties *p)
1718 {
1719 assert(p->sType == VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_PROPERTIES);
1720
1721 p->driverID = VK_DRIVER_ID_INTEL_OPEN_SOURCE_MESA_KHR;
1722 memset(p->driverName, 0, sizeof(p->driverName));
1723 snprintf(p->driverName, VK_MAX_DRIVER_NAME_SIZE_KHR,
1724 "Intel open-source Mesa driver");
1725 memset(p->driverInfo, 0, sizeof(p->driverInfo));
1726 snprintf(p->driverInfo, VK_MAX_DRIVER_INFO_SIZE_KHR,
1727 "Mesa " PACKAGE_VERSION MESA_GIT_SHA1);
1728 p->conformanceVersion = (VkConformanceVersionKHR) {
1729 .major = 1,
1730 .minor = 2,
1731 .subminor = 0,
1732 .patch = 0,
1733 };
1734
1735 p->denormBehaviorIndependence =
1736 VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_ALL_KHR;
1737 p->roundingModeIndependence =
1738 VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_NONE_KHR;
1739
1740 /* Broadwell does not support HF denorms and there are restrictions
1741 * other gens. According to Kabylake's PRM:
1742 *
1743 * "math - Extended Math Function
1744 * [...]
1745 * Restriction : Half-float denorms are always retained."
1746 */
1747 p->shaderDenormFlushToZeroFloat16 = false;
1748 p->shaderDenormPreserveFloat16 = pdevice->info.gen > 8;
1749 p->shaderRoundingModeRTEFloat16 = true;
1750 p->shaderRoundingModeRTZFloat16 = true;
1751 p->shaderSignedZeroInfNanPreserveFloat16 = true;
1752
1753 p->shaderDenormFlushToZeroFloat32 = true;
1754 p->shaderDenormPreserveFloat32 = true;
1755 p->shaderRoundingModeRTEFloat32 = true;
1756 p->shaderRoundingModeRTZFloat32 = true;
1757 p->shaderSignedZeroInfNanPreserveFloat32 = true;
1758
1759 p->shaderDenormFlushToZeroFloat64 = true;
1760 p->shaderDenormPreserveFloat64 = true;
1761 p->shaderRoundingModeRTEFloat64 = true;
1762 p->shaderRoundingModeRTZFloat64 = true;
1763 p->shaderSignedZeroInfNanPreserveFloat64 = true;
1764
1765 /* It's a bit hard to exactly map our implementation to the limits
1766 * described here. The bindless surface handle in the extended
1767 * message descriptors is 20 bits and it's an index into the table of
1768 * RENDER_SURFACE_STATE structs that starts at bindless surface base
1769 * address. Given that most things consume two surface states per
1770 * view (general/sampled for textures and write-only/read-write for
1771 * images), we claim 2^19 things.
1772 *
1773 * For SSBOs, we just use A64 messages so there is no real limit
1774 * there beyond the limit on the total size of a descriptor set.
1775 */
1776 const unsigned max_bindless_views = 1 << 19;
1777 p->maxUpdateAfterBindDescriptorsInAllPools = max_bindless_views;
1778 p->shaderUniformBufferArrayNonUniformIndexingNative = false;
1779 p->shaderSampledImageArrayNonUniformIndexingNative = false;
1780 p->shaderStorageBufferArrayNonUniformIndexingNative = true;
1781 p->shaderStorageImageArrayNonUniformIndexingNative = false;
1782 p->shaderInputAttachmentArrayNonUniformIndexingNative = false;
1783 p->robustBufferAccessUpdateAfterBind = true;
1784 p->quadDivergentImplicitLod = false;
1785 p->maxPerStageDescriptorUpdateAfterBindSamplers = max_bindless_views;
1786 p->maxPerStageDescriptorUpdateAfterBindUniformBuffers = MAX_PER_STAGE_DESCRIPTOR_UNIFORM_BUFFERS;
1787 p->maxPerStageDescriptorUpdateAfterBindStorageBuffers = UINT32_MAX;
1788 p->maxPerStageDescriptorUpdateAfterBindSampledImages = max_bindless_views;
1789 p->maxPerStageDescriptorUpdateAfterBindStorageImages = max_bindless_views;
1790 p->maxPerStageDescriptorUpdateAfterBindInputAttachments = MAX_PER_STAGE_DESCRIPTOR_INPUT_ATTACHMENTS;
1791 p->maxPerStageUpdateAfterBindResources = UINT32_MAX;
1792 p->maxDescriptorSetUpdateAfterBindSamplers = max_bindless_views;
1793 p->maxDescriptorSetUpdateAfterBindUniformBuffers = 6 * MAX_PER_STAGE_DESCRIPTOR_UNIFORM_BUFFERS;
1794 p->maxDescriptorSetUpdateAfterBindUniformBuffersDynamic = MAX_DYNAMIC_BUFFERS / 2;
1795 p->maxDescriptorSetUpdateAfterBindStorageBuffers = UINT32_MAX;
1796 p->maxDescriptorSetUpdateAfterBindStorageBuffersDynamic = MAX_DYNAMIC_BUFFERS / 2;
1797 p->maxDescriptorSetUpdateAfterBindSampledImages = max_bindless_views;
1798 p->maxDescriptorSetUpdateAfterBindStorageImages = max_bindless_views;
1799 p->maxDescriptorSetUpdateAfterBindInputAttachments = MAX_DESCRIPTOR_SET_INPUT_ATTACHMENTS;
1800
1801 /* We support all of the depth resolve modes */
1802 p->supportedDepthResolveModes = VK_RESOLVE_MODE_SAMPLE_ZERO_BIT_KHR |
1803 VK_RESOLVE_MODE_AVERAGE_BIT_KHR |
1804 VK_RESOLVE_MODE_MIN_BIT_KHR |
1805 VK_RESOLVE_MODE_MAX_BIT_KHR;
1806 /* Average doesn't make sense for stencil so we don't support that */
1807 p->supportedStencilResolveModes = VK_RESOLVE_MODE_SAMPLE_ZERO_BIT_KHR;
1808 if (pdevice->info.gen >= 8) {
1809 /* The advanced stencil resolve modes currently require stencil
1810 * sampling be supported by the hardware.
1811 */
1812 p->supportedStencilResolveModes |= VK_RESOLVE_MODE_MIN_BIT_KHR |
1813 VK_RESOLVE_MODE_MAX_BIT_KHR;
1814 }
1815 p->independentResolveNone = true;
1816 p->independentResolve = true;
1817
1818 p->filterMinmaxSingleComponentFormats = pdevice->info.gen >= 9;
1819 p->filterMinmaxImageComponentMapping = pdevice->info.gen >= 9;
1820
1821 p->maxTimelineSemaphoreValueDifference = UINT64_MAX;
1822
1823 p->framebufferIntegerColorSampleCounts =
1824 isl_device_get_sample_counts(&pdevice->isl_dev);
1825 }
1826
1827 void anv_GetPhysicalDeviceProperties2(
1828 VkPhysicalDevice physicalDevice,
1829 VkPhysicalDeviceProperties2* pProperties)
1830 {
1831 ANV_FROM_HANDLE(anv_physical_device, pdevice, physicalDevice);
1832
1833 anv_GetPhysicalDeviceProperties(physicalDevice, &pProperties->properties);
1834
1835 VkPhysicalDeviceVulkan11Properties core_1_1 = {
1836 .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_PROPERTIES,
1837 };
1838 anv_get_physical_device_properties_1_1(pdevice, &core_1_1);
1839
1840 VkPhysicalDeviceVulkan12Properties core_1_2 = {
1841 .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_PROPERTIES,
1842 };
1843 anv_get_physical_device_properties_1_2(pdevice, &core_1_2);
1844
1845 #define CORE_RENAMED_PROPERTY(major, minor, ext_property, core_property) \
1846 memcpy(&properties->ext_property, &core_##major##_##minor.core_property, \
1847 sizeof(core_##major##_##minor.core_property))
1848
1849 #define CORE_PROPERTY(major, minor, property) \
1850 CORE_RENAMED_PROPERTY(major, minor, property, property)
1851
1852 vk_foreach_struct(ext, pProperties->pNext) {
1853 switch (ext->sType) {
1854 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CUSTOM_BORDER_COLOR_PROPERTIES_EXT: {
1855 VkPhysicalDeviceCustomBorderColorPropertiesEXT *properties =
1856 (VkPhysicalDeviceCustomBorderColorPropertiesEXT *)ext;
1857 properties->maxCustomBorderColorSamplers = MAX_CUSTOM_BORDER_COLORS;
1858 break;
1859 }
1860
1861 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_STENCIL_RESOLVE_PROPERTIES_KHR: {
1862 VkPhysicalDeviceDepthStencilResolvePropertiesKHR *properties =
1863 (VkPhysicalDeviceDepthStencilResolvePropertiesKHR *)ext;
1864 CORE_PROPERTY(1, 2, supportedDepthResolveModes);
1865 CORE_PROPERTY(1, 2, supportedStencilResolveModes);
1866 CORE_PROPERTY(1, 2, independentResolveNone);
1867 CORE_PROPERTY(1, 2, independentResolve);
1868 break;
1869 }
1870
1871 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_PROPERTIES_EXT: {
1872 VkPhysicalDeviceDescriptorIndexingPropertiesEXT *properties =
1873 (VkPhysicalDeviceDescriptorIndexingPropertiesEXT *)ext;
1874 CORE_PROPERTY(1, 2, maxUpdateAfterBindDescriptorsInAllPools);
1875 CORE_PROPERTY(1, 2, shaderUniformBufferArrayNonUniformIndexingNative);
1876 CORE_PROPERTY(1, 2, shaderSampledImageArrayNonUniformIndexingNative);
1877 CORE_PROPERTY(1, 2, shaderStorageBufferArrayNonUniformIndexingNative);
1878 CORE_PROPERTY(1, 2, shaderStorageImageArrayNonUniformIndexingNative);
1879 CORE_PROPERTY(1, 2, shaderInputAttachmentArrayNonUniformIndexingNative);
1880 CORE_PROPERTY(1, 2, robustBufferAccessUpdateAfterBind);
1881 CORE_PROPERTY(1, 2, quadDivergentImplicitLod);
1882 CORE_PROPERTY(1, 2, maxPerStageDescriptorUpdateAfterBindSamplers);
1883 CORE_PROPERTY(1, 2, maxPerStageDescriptorUpdateAfterBindUniformBuffers);
1884 CORE_PROPERTY(1, 2, maxPerStageDescriptorUpdateAfterBindStorageBuffers);
1885 CORE_PROPERTY(1, 2, maxPerStageDescriptorUpdateAfterBindSampledImages);
1886 CORE_PROPERTY(1, 2, maxPerStageDescriptorUpdateAfterBindStorageImages);
1887 CORE_PROPERTY(1, 2, maxPerStageDescriptorUpdateAfterBindInputAttachments);
1888 CORE_PROPERTY(1, 2, maxPerStageUpdateAfterBindResources);
1889 CORE_PROPERTY(1, 2, maxDescriptorSetUpdateAfterBindSamplers);
1890 CORE_PROPERTY(1, 2, maxDescriptorSetUpdateAfterBindUniformBuffers);
1891 CORE_PROPERTY(1, 2, maxDescriptorSetUpdateAfterBindUniformBuffersDynamic);
1892 CORE_PROPERTY(1, 2, maxDescriptorSetUpdateAfterBindStorageBuffers);
1893 CORE_PROPERTY(1, 2, maxDescriptorSetUpdateAfterBindStorageBuffersDynamic);
1894 CORE_PROPERTY(1, 2, maxDescriptorSetUpdateAfterBindSampledImages);
1895 CORE_PROPERTY(1, 2, maxDescriptorSetUpdateAfterBindStorageImages);
1896 CORE_PROPERTY(1, 2, maxDescriptorSetUpdateAfterBindInputAttachments);
1897 break;
1898 }
1899
1900 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DRIVER_PROPERTIES_KHR: {
1901 VkPhysicalDeviceDriverPropertiesKHR *properties =
1902 (VkPhysicalDeviceDriverPropertiesKHR *) ext;
1903 CORE_PROPERTY(1, 2, driverID);
1904 CORE_PROPERTY(1, 2, driverName);
1905 CORE_PROPERTY(1, 2, driverInfo);
1906 CORE_PROPERTY(1, 2, conformanceVersion);
1907 break;
1908 }
1909
1910 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_MEMORY_HOST_PROPERTIES_EXT: {
1911 VkPhysicalDeviceExternalMemoryHostPropertiesEXT *props =
1912 (VkPhysicalDeviceExternalMemoryHostPropertiesEXT *) ext;
1913 /* Userptr needs page aligned memory. */
1914 props->minImportedHostPointerAlignment = 4096;
1915 break;
1916 }
1917
1918 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES: {
1919 VkPhysicalDeviceIDProperties *properties =
1920 (VkPhysicalDeviceIDProperties *)ext;
1921 CORE_PROPERTY(1, 1, deviceUUID);
1922 CORE_PROPERTY(1, 1, driverUUID);
1923 CORE_PROPERTY(1, 1, deviceLUID);
1924 CORE_PROPERTY(1, 1, deviceLUIDValid);
1925 break;
1926 }
1927
1928 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_PROPERTIES_EXT: {
1929 VkPhysicalDeviceInlineUniformBlockPropertiesEXT *props =
1930 (VkPhysicalDeviceInlineUniformBlockPropertiesEXT *)ext;
1931 props->maxInlineUniformBlockSize = MAX_INLINE_UNIFORM_BLOCK_SIZE;
1932 props->maxPerStageDescriptorInlineUniformBlocks =
1933 MAX_INLINE_UNIFORM_BLOCK_DESCRIPTORS;
1934 props->maxPerStageDescriptorUpdateAfterBindInlineUniformBlocks =
1935 MAX_INLINE_UNIFORM_BLOCK_DESCRIPTORS;
1936 props->maxDescriptorSetInlineUniformBlocks =
1937 MAX_INLINE_UNIFORM_BLOCK_DESCRIPTORS;
1938 props->maxDescriptorSetUpdateAfterBindInlineUniformBlocks =
1939 MAX_INLINE_UNIFORM_BLOCK_DESCRIPTORS;
1940 break;
1941 }
1942
1943 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_PROPERTIES_EXT: {
1944 VkPhysicalDeviceLineRasterizationPropertiesEXT *props =
1945 (VkPhysicalDeviceLineRasterizationPropertiesEXT *)ext;
1946 /* In the Skylake PRM Vol. 7, subsection titled "GIQ (Diamond)
1947 * Sampling Rules - Legacy Mode", it says the following:
1948 *
1949 * "Note that the device divides a pixel into a 16x16 array of
1950 * subpixels, referenced by their upper left corners."
1951 *
1952 * This is the only known reference in the PRMs to the subpixel
1953 * precision of line rasterization and a "16x16 array of subpixels"
1954 * implies 4 subpixel precision bits. Empirical testing has shown
1955 * that 4 subpixel precision bits applies to all line rasterization
1956 * types.
1957 */
1958 props->lineSubPixelPrecisionBits = 4;
1959 break;
1960 }
1961
1962 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES: {
1963 VkPhysicalDeviceMaintenance3Properties *properties =
1964 (VkPhysicalDeviceMaintenance3Properties *)ext;
1965 /* This value doesn't matter for us today as our per-stage
1966 * descriptors are the real limit.
1967 */
1968 CORE_PROPERTY(1, 1, maxPerSetDescriptors);
1969 CORE_PROPERTY(1, 1, maxMemoryAllocationSize);
1970 break;
1971 }
1972
1973 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES: {
1974 VkPhysicalDeviceMultiviewProperties *properties =
1975 (VkPhysicalDeviceMultiviewProperties *)ext;
1976 CORE_PROPERTY(1, 1, maxMultiviewViewCount);
1977 CORE_PROPERTY(1, 1, maxMultiviewInstanceIndex);
1978 break;
1979 }
1980
1981 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PCI_BUS_INFO_PROPERTIES_EXT: {
1982 VkPhysicalDevicePCIBusInfoPropertiesEXT *properties =
1983 (VkPhysicalDevicePCIBusInfoPropertiesEXT *)ext;
1984 properties->pciDomain = pdevice->pci_info.domain;
1985 properties->pciBus = pdevice->pci_info.bus;
1986 properties->pciDevice = pdevice->pci_info.device;
1987 properties->pciFunction = pdevice->pci_info.function;
1988 break;
1989 }
1990
1991 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PERFORMANCE_QUERY_PROPERTIES_KHR: {
1992 VkPhysicalDevicePerformanceQueryPropertiesKHR *properties =
1993 (VkPhysicalDevicePerformanceQueryPropertiesKHR *)ext;
1994 /* We could support this by spawning a shader to do the equation
1995 * normalization.
1996 */
1997 properties->allowCommandBufferQueryCopies = false;
1998 break;
1999 }
2000
2001 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES: {
2002 VkPhysicalDevicePointClippingProperties *properties =
2003 (VkPhysicalDevicePointClippingProperties *) ext;
2004 CORE_PROPERTY(1, 1, pointClippingBehavior);
2005 break;
2006 }
2007
2008 #pragma GCC diagnostic push
2009 #pragma GCC diagnostic ignored "-Wswitch"
2010 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRESENTATION_PROPERTIES_ANDROID: {
2011 VkPhysicalDevicePresentationPropertiesANDROID *props =
2012 (VkPhysicalDevicePresentationPropertiesANDROID *)ext;
2013 props->sharedImage = VK_FALSE;
2014 break;
2015 }
2016 #pragma GCC diagnostic pop
2017
2018 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_PROPERTIES: {
2019 VkPhysicalDeviceProtectedMemoryProperties *properties =
2020 (VkPhysicalDeviceProtectedMemoryProperties *)ext;
2021 CORE_PROPERTY(1, 1, protectedNoFault);
2022 break;
2023 }
2024
2025 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR: {
2026 VkPhysicalDevicePushDescriptorPropertiesKHR *properties =
2027 (VkPhysicalDevicePushDescriptorPropertiesKHR *) ext;
2028 properties->maxPushDescriptors = MAX_PUSH_DESCRIPTORS;
2029 break;
2030 }
2031
2032 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ROBUSTNESS_2_PROPERTIES_EXT: {
2033 VkPhysicalDeviceRobustness2PropertiesEXT *properties = (void *)ext;
2034 properties->robustStorageBufferAccessSizeAlignment =
2035 ANV_SSBO_BOUNDS_CHECK_ALIGNMENT;
2036 properties->robustUniformBufferAccessSizeAlignment =
2037 ANV_UBO_ALIGNMENT;
2038 break;
2039 }
2040
2041 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_FILTER_MINMAX_PROPERTIES_EXT: {
2042 VkPhysicalDeviceSamplerFilterMinmaxPropertiesEXT *properties =
2043 (VkPhysicalDeviceSamplerFilterMinmaxPropertiesEXT *)ext;
2044 CORE_PROPERTY(1, 2, filterMinmaxImageComponentMapping);
2045 CORE_PROPERTY(1, 2, filterMinmaxSingleComponentFormats);
2046 break;
2047 }
2048
2049 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_PROPERTIES: {
2050 VkPhysicalDeviceSubgroupProperties *properties = (void *)ext;
2051 CORE_PROPERTY(1, 1, subgroupSize);
2052 CORE_RENAMED_PROPERTY(1, 1, supportedStages,
2053 subgroupSupportedStages);
2054 CORE_RENAMED_PROPERTY(1, 1, supportedOperations,
2055 subgroupSupportedOperations);
2056 CORE_RENAMED_PROPERTY(1, 1, quadOperationsInAllStages,
2057 subgroupQuadOperationsInAllStages);
2058 break;
2059 }
2060
2061 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES_EXT: {
2062 VkPhysicalDeviceSubgroupSizeControlPropertiesEXT *props =
2063 (VkPhysicalDeviceSubgroupSizeControlPropertiesEXT *)ext;
2064 STATIC_ASSERT(8 <= BRW_SUBGROUP_SIZE && BRW_SUBGROUP_SIZE <= 32);
2065 props->minSubgroupSize = 8;
2066 props->maxSubgroupSize = 32;
2067 props->maxComputeWorkgroupSubgroups = pdevice->info.max_cs_threads;
2068 props->requiredSubgroupSizeStages = VK_SHADER_STAGE_COMPUTE_BIT;
2069 break;
2070 }
2071 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FLOAT_CONTROLS_PROPERTIES_KHR : {
2072 VkPhysicalDeviceFloatControlsPropertiesKHR *properties = (void *)ext;
2073 CORE_PROPERTY(1, 2, denormBehaviorIndependence);
2074 CORE_PROPERTY(1, 2, roundingModeIndependence);
2075 CORE_PROPERTY(1, 2, shaderDenormFlushToZeroFloat16);
2076 CORE_PROPERTY(1, 2, shaderDenormPreserveFloat16);
2077 CORE_PROPERTY(1, 2, shaderRoundingModeRTEFloat16);
2078 CORE_PROPERTY(1, 2, shaderRoundingModeRTZFloat16);
2079 CORE_PROPERTY(1, 2, shaderSignedZeroInfNanPreserveFloat16);
2080 CORE_PROPERTY(1, 2, shaderDenormFlushToZeroFloat32);
2081 CORE_PROPERTY(1, 2, shaderDenormPreserveFloat32);
2082 CORE_PROPERTY(1, 2, shaderRoundingModeRTEFloat32);
2083 CORE_PROPERTY(1, 2, shaderRoundingModeRTZFloat32);
2084 CORE_PROPERTY(1, 2, shaderSignedZeroInfNanPreserveFloat32);
2085 CORE_PROPERTY(1, 2, shaderDenormFlushToZeroFloat64);
2086 CORE_PROPERTY(1, 2, shaderDenormPreserveFloat64);
2087 CORE_PROPERTY(1, 2, shaderRoundingModeRTEFloat64);
2088 CORE_PROPERTY(1, 2, shaderRoundingModeRTZFloat64);
2089 CORE_PROPERTY(1, 2, shaderSignedZeroInfNanPreserveFloat64);
2090 break;
2091 }
2092
2093 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_PROPERTIES_EXT: {
2094 VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT *props =
2095 (VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT *)ext;
2096
2097 /* From the SKL PRM Vol. 2d, docs for RENDER_SURFACE_STATE::Surface
2098 * Base Address:
2099 *
2100 * "For SURFTYPE_BUFFER non-rendertarget surfaces, this field
2101 * specifies the base address of the first element of the surface,
2102 * computed in software by adding the surface base address to the
2103 * byte offset of the element in the buffer. The base address must
2104 * be aligned to element size."
2105 *
2106 * The typed dataport messages require that things be texel aligned.
2107 * Otherwise, we may just load/store the wrong data or, in the worst
2108 * case, there may be hangs.
2109 */
2110 props->storageTexelBufferOffsetAlignmentBytes = 16;
2111 props->storageTexelBufferOffsetSingleTexelAlignment = true;
2112
2113 /* The sampler, however, is much more forgiving and it can handle
2114 * arbitrary byte alignment for linear and buffer surfaces. It's
2115 * hard to find a good PRM citation for this but years of empirical
2116 * experience demonstrate that this is true.
2117 */
2118 props->uniformTexelBufferOffsetAlignmentBytes = 1;
2119 props->uniformTexelBufferOffsetSingleTexelAlignment = false;
2120 break;
2121 }
2122
2123 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_PROPERTIES_KHR: {
2124 VkPhysicalDeviceTimelineSemaphorePropertiesKHR *properties =
2125 (VkPhysicalDeviceTimelineSemaphorePropertiesKHR *) ext;
2126 CORE_PROPERTY(1, 2, maxTimelineSemaphoreValueDifference);
2127 break;
2128 }
2129
2130 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_PROPERTIES_EXT: {
2131 VkPhysicalDeviceTransformFeedbackPropertiesEXT *props =
2132 (VkPhysicalDeviceTransformFeedbackPropertiesEXT *)ext;
2133
2134 props->maxTransformFeedbackStreams = MAX_XFB_STREAMS;
2135 props->maxTransformFeedbackBuffers = MAX_XFB_BUFFERS;
2136 props->maxTransformFeedbackBufferSize = (1ull << 32);
2137 props->maxTransformFeedbackStreamDataSize = 128 * 4;
2138 props->maxTransformFeedbackBufferDataSize = 128 * 4;
2139 props->maxTransformFeedbackBufferDataStride = 2048;
2140 props->transformFeedbackQueries = true;
2141 props->transformFeedbackStreamsLinesTriangles = false;
2142 props->transformFeedbackRasterizationStreamSelect = false;
2143 props->transformFeedbackDraw = true;
2144 break;
2145 }
2146
2147 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_PROPERTIES_EXT: {
2148 VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT *props =
2149 (VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT *)ext;
2150 /* We have to restrict this a bit for multiview */
2151 props->maxVertexAttribDivisor = UINT32_MAX / 16;
2152 break;
2153 }
2154
2155 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_PROPERTIES:
2156 anv_get_physical_device_properties_1_1(pdevice, (void *)ext);
2157 break;
2158
2159 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_PROPERTIES:
2160 anv_get_physical_device_properties_1_2(pdevice, (void *)ext);
2161 break;
2162
2163 default:
2164 anv_debug_ignored_stype(ext->sType);
2165 break;
2166 }
2167 }
2168
2169 #undef CORE_RENAMED_PROPERTY
2170 #undef CORE_PROPERTY
2171 }
2172
2173 /* We support exactly one queue family. */
2174 static const VkQueueFamilyProperties
2175 anv_queue_family_properties = {
2176 .queueFlags = VK_QUEUE_GRAPHICS_BIT |
2177 VK_QUEUE_COMPUTE_BIT |
2178 VK_QUEUE_TRANSFER_BIT,
2179 .queueCount = 1,
2180 .timestampValidBits = 36, /* XXX: Real value here */
2181 .minImageTransferGranularity = { 1, 1, 1 },
2182 };
2183
2184 void anv_GetPhysicalDeviceQueueFamilyProperties(
2185 VkPhysicalDevice physicalDevice,
2186 uint32_t* pCount,
2187 VkQueueFamilyProperties* pQueueFamilyProperties)
2188 {
2189 VK_OUTARRAY_MAKE(out, pQueueFamilyProperties, pCount);
2190
2191 vk_outarray_append(&out, p) {
2192 *p = anv_queue_family_properties;
2193 }
2194 }
2195
2196 void anv_GetPhysicalDeviceQueueFamilyProperties2(
2197 VkPhysicalDevice physicalDevice,
2198 uint32_t* pQueueFamilyPropertyCount,
2199 VkQueueFamilyProperties2* pQueueFamilyProperties)
2200 {
2201
2202 VK_OUTARRAY_MAKE(out, pQueueFamilyProperties, pQueueFamilyPropertyCount);
2203
2204 vk_outarray_append(&out, p) {
2205 p->queueFamilyProperties = anv_queue_family_properties;
2206
2207 vk_foreach_struct(s, p->pNext) {
2208 anv_debug_ignored_stype(s->sType);
2209 }
2210 }
2211 }
2212
2213 void anv_GetPhysicalDeviceMemoryProperties(
2214 VkPhysicalDevice physicalDevice,
2215 VkPhysicalDeviceMemoryProperties* pMemoryProperties)
2216 {
2217 ANV_FROM_HANDLE(anv_physical_device, physical_device, physicalDevice);
2218
2219 pMemoryProperties->memoryTypeCount = physical_device->memory.type_count;
2220 for (uint32_t i = 0; i < physical_device->memory.type_count; i++) {
2221 pMemoryProperties->memoryTypes[i] = (VkMemoryType) {
2222 .propertyFlags = physical_device->memory.types[i].propertyFlags,
2223 .heapIndex = physical_device->memory.types[i].heapIndex,
2224 };
2225 }
2226
2227 pMemoryProperties->memoryHeapCount = physical_device->memory.heap_count;
2228 for (uint32_t i = 0; i < physical_device->memory.heap_count; i++) {
2229 pMemoryProperties->memoryHeaps[i] = (VkMemoryHeap) {
2230 .size = physical_device->memory.heaps[i].size,
2231 .flags = physical_device->memory.heaps[i].flags,
2232 };
2233 }
2234 }
2235
2236 static void
2237 anv_get_memory_budget(VkPhysicalDevice physicalDevice,
2238 VkPhysicalDeviceMemoryBudgetPropertiesEXT *memoryBudget)
2239 {
2240 ANV_FROM_HANDLE(anv_physical_device, device, physicalDevice);
2241 uint64_t sys_available = get_available_system_memory();
2242 assert(sys_available > 0);
2243
2244 VkDeviceSize total_heaps_size = 0;
2245 for (size_t i = 0; i < device->memory.heap_count; i++)
2246 total_heaps_size += device->memory.heaps[i].size;
2247
2248 for (size_t i = 0; i < device->memory.heap_count; i++) {
2249 VkDeviceSize heap_size = device->memory.heaps[i].size;
2250 VkDeviceSize heap_used = device->memory.heaps[i].used;
2251 VkDeviceSize heap_budget;
2252
2253 double heap_proportion = (double) heap_size / total_heaps_size;
2254 VkDeviceSize sys_available_prop = sys_available * heap_proportion;
2255
2256 /*
2257 * Let's not incite the app to starve the system: report at most 90% of
2258 * available system memory.
2259 */
2260 uint64_t heap_available = sys_available_prop * 9 / 10;
2261 heap_budget = MIN2(heap_size, heap_used + heap_available);
2262
2263 /*
2264 * Round down to the nearest MB
2265 */
2266 heap_budget &= ~((1ull << 20) - 1);
2267
2268 /*
2269 * The heapBudget value must be non-zero for array elements less than
2270 * VkPhysicalDeviceMemoryProperties::memoryHeapCount. The heapBudget
2271 * value must be less than or equal to VkMemoryHeap::size for each heap.
2272 */
2273 assert(0 < heap_budget && heap_budget <= heap_size);
2274
2275 memoryBudget->heapUsage[i] = heap_used;
2276 memoryBudget->heapBudget[i] = heap_budget;
2277 }
2278
2279 /* The heapBudget and heapUsage values must be zero for array elements
2280 * greater than or equal to VkPhysicalDeviceMemoryProperties::memoryHeapCount
2281 */
2282 for (uint32_t i = device->memory.heap_count; i < VK_MAX_MEMORY_HEAPS; i++) {
2283 memoryBudget->heapBudget[i] = 0;
2284 memoryBudget->heapUsage[i] = 0;
2285 }
2286 }
2287
2288 void anv_GetPhysicalDeviceMemoryProperties2(
2289 VkPhysicalDevice physicalDevice,
2290 VkPhysicalDeviceMemoryProperties2* pMemoryProperties)
2291 {
2292 anv_GetPhysicalDeviceMemoryProperties(physicalDevice,
2293 &pMemoryProperties->memoryProperties);
2294
2295 vk_foreach_struct(ext, pMemoryProperties->pNext) {
2296 switch (ext->sType) {
2297 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_BUDGET_PROPERTIES_EXT:
2298 anv_get_memory_budget(physicalDevice, (void*)ext);
2299 break;
2300 default:
2301 anv_debug_ignored_stype(ext->sType);
2302 break;
2303 }
2304 }
2305 }
2306
2307 void
2308 anv_GetDeviceGroupPeerMemoryFeatures(
2309 VkDevice device,
2310 uint32_t heapIndex,
2311 uint32_t localDeviceIndex,
2312 uint32_t remoteDeviceIndex,
2313 VkPeerMemoryFeatureFlags* pPeerMemoryFeatures)
2314 {
2315 assert(localDeviceIndex == 0 && remoteDeviceIndex == 0);
2316 *pPeerMemoryFeatures = VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT |
2317 VK_PEER_MEMORY_FEATURE_COPY_DST_BIT |
2318 VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT |
2319 VK_PEER_MEMORY_FEATURE_GENERIC_DST_BIT;
2320 }
2321
2322 PFN_vkVoidFunction anv_GetInstanceProcAddr(
2323 VkInstance _instance,
2324 const char* pName)
2325 {
2326 ANV_FROM_HANDLE(anv_instance, instance, _instance);
2327
2328 /* The Vulkan 1.0 spec for vkGetInstanceProcAddr has a table of exactly
2329 * when we have to return valid function pointers, NULL, or it's left
2330 * undefined. See the table for exact details.
2331 */
2332 if (pName == NULL)
2333 return NULL;
2334
2335 #define LOOKUP_ANV_ENTRYPOINT(entrypoint) \
2336 if (strcmp(pName, "vk" #entrypoint) == 0) \
2337 return (PFN_vkVoidFunction)anv_##entrypoint
2338
2339 LOOKUP_ANV_ENTRYPOINT(EnumerateInstanceExtensionProperties);
2340 LOOKUP_ANV_ENTRYPOINT(EnumerateInstanceLayerProperties);
2341 LOOKUP_ANV_ENTRYPOINT(EnumerateInstanceVersion);
2342 LOOKUP_ANV_ENTRYPOINT(CreateInstance);
2343
2344 /* GetInstanceProcAddr() can also be called with a NULL instance.
2345 * See https://gitlab.khronos.org/vulkan/vulkan/issues/2057
2346 */
2347 LOOKUP_ANV_ENTRYPOINT(GetInstanceProcAddr);
2348
2349 #undef LOOKUP_ANV_ENTRYPOINT
2350
2351 if (instance == NULL)
2352 return NULL;
2353
2354 int idx = anv_get_instance_entrypoint_index(pName);
2355 if (idx >= 0)
2356 return instance->dispatch.entrypoints[idx];
2357
2358 idx = anv_get_physical_device_entrypoint_index(pName);
2359 if (idx >= 0)
2360 return instance->physical_device_dispatch.entrypoints[idx];
2361
2362 idx = anv_get_device_entrypoint_index(pName);
2363 if (idx >= 0)
2364 return instance->device_dispatch.entrypoints[idx];
2365
2366 return NULL;
2367 }
2368
2369 /* With version 1+ of the loader interface the ICD should expose
2370 * vk_icdGetInstanceProcAddr to work around certain LD_PRELOAD issues seen in apps.
2371 */
2372 PUBLIC
2373 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vk_icdGetInstanceProcAddr(
2374 VkInstance instance,
2375 const char* pName);
2376
2377 PUBLIC
2378 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vk_icdGetInstanceProcAddr(
2379 VkInstance instance,
2380 const char* pName)
2381 {
2382 return anv_GetInstanceProcAddr(instance, pName);
2383 }
2384
2385 PFN_vkVoidFunction anv_GetDeviceProcAddr(
2386 VkDevice _device,
2387 const char* pName)
2388 {
2389 ANV_FROM_HANDLE(anv_device, device, _device);
2390
2391 if (!device || !pName)
2392 return NULL;
2393
2394 int idx = anv_get_device_entrypoint_index(pName);
2395 if (idx < 0)
2396 return NULL;
2397
2398 return device->dispatch.entrypoints[idx];
2399 }
2400
2401 /* With version 4+ of the loader interface the ICD should expose
2402 * vk_icdGetPhysicalDeviceProcAddr()
2403 */
2404 PUBLIC
2405 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vk_icdGetPhysicalDeviceProcAddr(
2406 VkInstance _instance,
2407 const char* pName);
2408
2409 PFN_vkVoidFunction vk_icdGetPhysicalDeviceProcAddr(
2410 VkInstance _instance,
2411 const char* pName)
2412 {
2413 ANV_FROM_HANDLE(anv_instance, instance, _instance);
2414
2415 if (!pName || !instance)
2416 return NULL;
2417
2418 int idx = anv_get_physical_device_entrypoint_index(pName);
2419 if (idx < 0)
2420 return NULL;
2421
2422 return instance->physical_device_dispatch.entrypoints[idx];
2423 }
2424
2425
2426 VkResult
2427 anv_CreateDebugReportCallbackEXT(VkInstance _instance,
2428 const VkDebugReportCallbackCreateInfoEXT* pCreateInfo,
2429 const VkAllocationCallbacks* pAllocator,
2430 VkDebugReportCallbackEXT* pCallback)
2431 {
2432 ANV_FROM_HANDLE(anv_instance, instance, _instance);
2433 return vk_create_debug_report_callback(&instance->debug_report_callbacks,
2434 pCreateInfo, pAllocator, &instance->alloc,
2435 pCallback);
2436 }
2437
2438 void
2439 anv_DestroyDebugReportCallbackEXT(VkInstance _instance,
2440 VkDebugReportCallbackEXT _callback,
2441 const VkAllocationCallbacks* pAllocator)
2442 {
2443 ANV_FROM_HANDLE(anv_instance, instance, _instance);
2444 vk_destroy_debug_report_callback(&instance->debug_report_callbacks,
2445 _callback, pAllocator, &instance->alloc);
2446 }
2447
2448 void
2449 anv_DebugReportMessageEXT(VkInstance _instance,
2450 VkDebugReportFlagsEXT flags,
2451 VkDebugReportObjectTypeEXT objectType,
2452 uint64_t object,
2453 size_t location,
2454 int32_t messageCode,
2455 const char* pLayerPrefix,
2456 const char* pMessage)
2457 {
2458 ANV_FROM_HANDLE(anv_instance, instance, _instance);
2459 vk_debug_report(&instance->debug_report_callbacks, flags, objectType,
2460 object, location, messageCode, pLayerPrefix, pMessage);
2461 }
2462
2463 static struct anv_state
2464 anv_state_pool_emit_data(struct anv_state_pool *pool, size_t size, size_t align, const void *p)
2465 {
2466 struct anv_state state;
2467
2468 state = anv_state_pool_alloc(pool, size, align);
2469 memcpy(state.map, p, size);
2470
2471 return state;
2472 }
2473
2474 static void
2475 anv_device_init_border_colors(struct anv_device *device)
2476 {
2477 if (device->info.is_haswell) {
2478 static const struct hsw_border_color border_colors[] = {
2479 [VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK] = { .float32 = { 0.0, 0.0, 0.0, 0.0 } },
2480 [VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK] = { .float32 = { 0.0, 0.0, 0.0, 1.0 } },
2481 [VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE] = { .float32 = { 1.0, 1.0, 1.0, 1.0 } },
2482 [VK_BORDER_COLOR_INT_TRANSPARENT_BLACK] = { .uint32 = { 0, 0, 0, 0 } },
2483 [VK_BORDER_COLOR_INT_OPAQUE_BLACK] = { .uint32 = { 0, 0, 0, 1 } },
2484 [VK_BORDER_COLOR_INT_OPAQUE_WHITE] = { .uint32 = { 1, 1, 1, 1 } },
2485 };
2486
2487 device->border_colors =
2488 anv_state_pool_emit_data(&device->dynamic_state_pool,
2489 sizeof(border_colors), 512, border_colors);
2490 } else {
2491 static const struct gen8_border_color border_colors[] = {
2492 [VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK] = { .float32 = { 0.0, 0.0, 0.0, 0.0 } },
2493 [VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK] = { .float32 = { 0.0, 0.0, 0.0, 1.0 } },
2494 [VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE] = { .float32 = { 1.0, 1.0, 1.0, 1.0 } },
2495 [VK_BORDER_COLOR_INT_TRANSPARENT_BLACK] = { .uint32 = { 0, 0, 0, 0 } },
2496 [VK_BORDER_COLOR_INT_OPAQUE_BLACK] = { .uint32 = { 0, 0, 0, 1 } },
2497 [VK_BORDER_COLOR_INT_OPAQUE_WHITE] = { .uint32 = { 1, 1, 1, 1 } },
2498 };
2499
2500 device->border_colors =
2501 anv_state_pool_emit_data(&device->dynamic_state_pool,
2502 sizeof(border_colors), 64, border_colors);
2503 }
2504 }
2505
2506 static VkResult
2507 anv_device_init_trivial_batch(struct anv_device *device)
2508 {
2509 VkResult result = anv_device_alloc_bo(device, 4096,
2510 ANV_BO_ALLOC_MAPPED,
2511 0 /* explicit_address */,
2512 &device->trivial_batch_bo);
2513 if (result != VK_SUCCESS)
2514 return result;
2515
2516 struct anv_batch batch = {
2517 .start = device->trivial_batch_bo->map,
2518 .next = device->trivial_batch_bo->map,
2519 .end = device->trivial_batch_bo->map + 4096,
2520 };
2521
2522 anv_batch_emit(&batch, GEN7_MI_BATCH_BUFFER_END, bbe);
2523 anv_batch_emit(&batch, GEN7_MI_NOOP, noop);
2524
2525 if (!device->info.has_llc)
2526 gen_clflush_range(batch.start, batch.next - batch.start);
2527
2528 return VK_SUCCESS;
2529 }
2530
2531 VkResult anv_EnumerateDeviceExtensionProperties(
2532 VkPhysicalDevice physicalDevice,
2533 const char* pLayerName,
2534 uint32_t* pPropertyCount,
2535 VkExtensionProperties* pProperties)
2536 {
2537 ANV_FROM_HANDLE(anv_physical_device, device, physicalDevice);
2538 VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
2539
2540 for (int i = 0; i < ANV_DEVICE_EXTENSION_COUNT; i++) {
2541 if (device->supported_extensions.extensions[i]) {
2542 vk_outarray_append(&out, prop) {
2543 *prop = anv_device_extensions[i];
2544 }
2545 }
2546 }
2547
2548 return vk_outarray_status(&out);
2549 }
2550
2551 static int
2552 vk_priority_to_gen(int priority)
2553 {
2554 switch (priority) {
2555 case VK_QUEUE_GLOBAL_PRIORITY_LOW_EXT:
2556 return GEN_CONTEXT_LOW_PRIORITY;
2557 case VK_QUEUE_GLOBAL_PRIORITY_MEDIUM_EXT:
2558 return GEN_CONTEXT_MEDIUM_PRIORITY;
2559 case VK_QUEUE_GLOBAL_PRIORITY_HIGH_EXT:
2560 return GEN_CONTEXT_HIGH_PRIORITY;
2561 case VK_QUEUE_GLOBAL_PRIORITY_REALTIME_EXT:
2562 return GEN_CONTEXT_REALTIME_PRIORITY;
2563 default:
2564 unreachable("Invalid priority");
2565 }
2566 }
2567
2568 static VkResult
2569 anv_device_init_hiz_clear_value_bo(struct anv_device *device)
2570 {
2571 VkResult result = anv_device_alloc_bo(device, 4096,
2572 ANV_BO_ALLOC_MAPPED,
2573 0 /* explicit_address */,
2574 &device->hiz_clear_bo);
2575 if (result != VK_SUCCESS)
2576 return result;
2577
2578 union isl_color_value hiz_clear = { .u32 = { 0, } };
2579 hiz_clear.f32[0] = ANV_HZ_FC_VAL;
2580
2581 memcpy(device->hiz_clear_bo->map, hiz_clear.u32, sizeof(hiz_clear.u32));
2582
2583 if (!device->info.has_llc)
2584 gen_clflush_range(device->hiz_clear_bo->map, sizeof(hiz_clear.u32));
2585
2586 return VK_SUCCESS;
2587 }
2588
2589 static bool
2590 get_bo_from_pool(struct gen_batch_decode_bo *ret,
2591 struct anv_block_pool *pool,
2592 uint64_t address)
2593 {
2594 anv_block_pool_foreach_bo(bo, pool) {
2595 uint64_t bo_address = gen_48b_address(bo->offset);
2596 if (address >= bo_address && address < (bo_address + bo->size)) {
2597 *ret = (struct gen_batch_decode_bo) {
2598 .addr = bo_address,
2599 .size = bo->size,
2600 .map = bo->map,
2601 };
2602 return true;
2603 }
2604 }
2605 return false;
2606 }
2607
2608 /* Finding a buffer for batch decoding */
2609 static struct gen_batch_decode_bo
2610 decode_get_bo(void *v_batch, bool ppgtt, uint64_t address)
2611 {
2612 struct anv_device *device = v_batch;
2613 struct gen_batch_decode_bo ret_bo = {};
2614
2615 assert(ppgtt);
2616
2617 if (get_bo_from_pool(&ret_bo, &device->dynamic_state_pool.block_pool, address))
2618 return ret_bo;
2619 if (get_bo_from_pool(&ret_bo, &device->instruction_state_pool.block_pool, address))
2620 return ret_bo;
2621 if (get_bo_from_pool(&ret_bo, &device->binding_table_pool.block_pool, address))
2622 return ret_bo;
2623 if (get_bo_from_pool(&ret_bo, &device->surface_state_pool.block_pool, address))
2624 return ret_bo;
2625
2626 if (!device->cmd_buffer_being_decoded)
2627 return (struct gen_batch_decode_bo) { };
2628
2629 struct anv_batch_bo **bo;
2630
2631 u_vector_foreach(bo, &device->cmd_buffer_being_decoded->seen_bbos) {
2632 /* The decoder zeroes out the top 16 bits, so we need to as well */
2633 uint64_t bo_address = (*bo)->bo->offset & (~0ull >> 16);
2634
2635 if (address >= bo_address && address < bo_address + (*bo)->bo->size) {
2636 return (struct gen_batch_decode_bo) {
2637 .addr = bo_address,
2638 .size = (*bo)->bo->size,
2639 .map = (*bo)->bo->map,
2640 };
2641 }
2642 }
2643
2644 return (struct gen_batch_decode_bo) { };
2645 }
2646
2647 struct gen_aux_map_buffer {
2648 struct gen_buffer base;
2649 struct anv_state state;
2650 };
2651
2652 static struct gen_buffer *
2653 gen_aux_map_buffer_alloc(void *driver_ctx, uint32_t size)
2654 {
2655 struct gen_aux_map_buffer *buf = malloc(sizeof(struct gen_aux_map_buffer));
2656 if (!buf)
2657 return NULL;
2658
2659 struct anv_device *device = (struct anv_device*)driver_ctx;
2660 assert(device->physical->supports_48bit_addresses &&
2661 device->physical->use_softpin);
2662
2663 struct anv_state_pool *pool = &device->dynamic_state_pool;
2664 buf->state = anv_state_pool_alloc(pool, size, size);
2665
2666 buf->base.gpu = pool->block_pool.bo->offset + buf->state.offset;
2667 buf->base.gpu_end = buf->base.gpu + buf->state.alloc_size;
2668 buf->base.map = buf->state.map;
2669 buf->base.driver_bo = &buf->state;
2670 return &buf->base;
2671 }
2672
2673 static void
2674 gen_aux_map_buffer_free(void *driver_ctx, struct gen_buffer *buffer)
2675 {
2676 struct gen_aux_map_buffer *buf = (struct gen_aux_map_buffer*)buffer;
2677 struct anv_device *device = (struct anv_device*)driver_ctx;
2678 struct anv_state_pool *pool = &device->dynamic_state_pool;
2679 anv_state_pool_free(pool, buf->state);
2680 free(buf);
2681 }
2682
2683 static struct gen_mapped_pinned_buffer_alloc aux_map_allocator = {
2684 .alloc = gen_aux_map_buffer_alloc,
2685 .free = gen_aux_map_buffer_free,
2686 };
2687
2688 static VkResult
2689 check_physical_device_features(VkPhysicalDevice physicalDevice,
2690 const VkPhysicalDeviceFeatures *features)
2691 {
2692 VkPhysicalDeviceFeatures supported_features;
2693 anv_GetPhysicalDeviceFeatures(physicalDevice, &supported_features);
2694 VkBool32 *supported_feature = (VkBool32 *)&supported_features;
2695 VkBool32 *enabled_feature = (VkBool32 *)features;
2696 unsigned num_features = sizeof(VkPhysicalDeviceFeatures) / sizeof(VkBool32);
2697 for (uint32_t i = 0; i < num_features; i++) {
2698 if (enabled_feature[i] && !supported_feature[i])
2699 return vk_error(VK_ERROR_FEATURE_NOT_PRESENT);
2700 }
2701
2702 return VK_SUCCESS;
2703 }
2704
2705 VkResult anv_CreateDevice(
2706 VkPhysicalDevice physicalDevice,
2707 const VkDeviceCreateInfo* pCreateInfo,
2708 const VkAllocationCallbacks* pAllocator,
2709 VkDevice* pDevice)
2710 {
2711 ANV_FROM_HANDLE(anv_physical_device, physical_device, physicalDevice);
2712 VkResult result;
2713 struct anv_device *device;
2714
2715 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO);
2716
2717 struct anv_device_extension_table enabled_extensions = { };
2718 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
2719 int idx;
2720 for (idx = 0; idx < ANV_DEVICE_EXTENSION_COUNT; idx++) {
2721 if (strcmp(pCreateInfo->ppEnabledExtensionNames[i],
2722 anv_device_extensions[idx].extensionName) == 0)
2723 break;
2724 }
2725
2726 if (idx >= ANV_DEVICE_EXTENSION_COUNT)
2727 return vk_error(VK_ERROR_EXTENSION_NOT_PRESENT);
2728
2729 if (!physical_device->supported_extensions.extensions[idx])
2730 return vk_error(VK_ERROR_EXTENSION_NOT_PRESENT);
2731
2732 enabled_extensions.extensions[idx] = true;
2733 }
2734
2735 /* Check enabled features */
2736 bool robust_buffer_access = false;
2737 if (pCreateInfo->pEnabledFeatures) {
2738 result = check_physical_device_features(physicalDevice,
2739 pCreateInfo->pEnabledFeatures);
2740 if (result != VK_SUCCESS)
2741 return result;
2742
2743 if (pCreateInfo->pEnabledFeatures->robustBufferAccess)
2744 robust_buffer_access = true;
2745 }
2746
2747 vk_foreach_struct_const(ext, pCreateInfo->pNext) {
2748 switch (ext->sType) {
2749 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2: {
2750 const VkPhysicalDeviceFeatures2 *features = (const void *)ext;
2751 result = check_physical_device_features(physicalDevice,
2752 &features->features);
2753 if (result != VK_SUCCESS)
2754 return result;
2755
2756 if (features->features.robustBufferAccess)
2757 robust_buffer_access = true;
2758 break;
2759 }
2760
2761 default:
2762 /* Don't warn */
2763 break;
2764 }
2765 }
2766
2767 /* Check requested queues and fail if we are requested to create any
2768 * queues with flags we don't support.
2769 */
2770 assert(pCreateInfo->queueCreateInfoCount > 0);
2771 for (uint32_t i = 0; i < pCreateInfo->queueCreateInfoCount; i++) {
2772 if (pCreateInfo->pQueueCreateInfos[i].flags != 0)
2773 return vk_error(VK_ERROR_INITIALIZATION_FAILED);
2774 }
2775
2776 /* Check if client specified queue priority. */
2777 const VkDeviceQueueGlobalPriorityCreateInfoEXT *queue_priority =
2778 vk_find_struct_const(pCreateInfo->pQueueCreateInfos[0].pNext,
2779 DEVICE_QUEUE_GLOBAL_PRIORITY_CREATE_INFO_EXT);
2780
2781 VkQueueGlobalPriorityEXT priority =
2782 queue_priority ? queue_priority->globalPriority :
2783 VK_QUEUE_GLOBAL_PRIORITY_MEDIUM_EXT;
2784
2785 device = vk_alloc2(&physical_device->instance->alloc, pAllocator,
2786 sizeof(*device), 8,
2787 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
2788 if (!device)
2789 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
2790
2791 vk_device_init(&device->vk, pCreateInfo,
2792 &physical_device->instance->alloc, pAllocator);
2793
2794 if (INTEL_DEBUG & DEBUG_BATCH) {
2795 const unsigned decode_flags =
2796 GEN_BATCH_DECODE_FULL |
2797 ((INTEL_DEBUG & DEBUG_COLOR) ? GEN_BATCH_DECODE_IN_COLOR : 0) |
2798 GEN_BATCH_DECODE_OFFSETS |
2799 GEN_BATCH_DECODE_FLOATS;
2800
2801 gen_batch_decode_ctx_init(&device->decoder_ctx,
2802 &physical_device->info,
2803 stderr, decode_flags, NULL,
2804 decode_get_bo, NULL, device);
2805 }
2806
2807 device->physical = physical_device;
2808 device->no_hw = physical_device->no_hw;
2809 device->_lost = false;
2810
2811 /* XXX(chadv): Can we dup() physicalDevice->fd here? */
2812 device->fd = open(physical_device->path, O_RDWR | O_CLOEXEC);
2813 if (device->fd == -1) {
2814 result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
2815 goto fail_device;
2816 }
2817
2818 device->context_id = anv_gem_create_context(device);
2819 if (device->context_id == -1) {
2820 result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
2821 goto fail_fd;
2822 }
2823
2824 result = anv_queue_init(device, &device->queue);
2825 if (result != VK_SUCCESS)
2826 goto fail_context_id;
2827
2828 if (physical_device->use_softpin) {
2829 if (pthread_mutex_init(&device->vma_mutex, NULL) != 0) {
2830 result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
2831 goto fail_queue;
2832 }
2833
2834 /* keep the page with address zero out of the allocator */
2835 util_vma_heap_init(&device->vma_lo,
2836 LOW_HEAP_MIN_ADDRESS, LOW_HEAP_SIZE);
2837
2838 util_vma_heap_init(&device->vma_cva, CLIENT_VISIBLE_HEAP_MIN_ADDRESS,
2839 CLIENT_VISIBLE_HEAP_SIZE);
2840
2841 /* Leave the last 4GiB out of the high vma range, so that no state
2842 * base address + size can overflow 48 bits. For more information see
2843 * the comment about Wa32bitGeneralStateOffset in anv_allocator.c
2844 */
2845 util_vma_heap_init(&device->vma_hi, HIGH_HEAP_MIN_ADDRESS,
2846 physical_device->gtt_size - (1ull << 32) -
2847 HIGH_HEAP_MIN_ADDRESS);
2848 }
2849
2850 list_inithead(&device->memory_objects);
2851
2852 /* As per spec, the driver implementation may deny requests to acquire
2853 * a priority above the default priority (MEDIUM) if the caller does not
2854 * have sufficient privileges. In this scenario VK_ERROR_NOT_PERMITTED_EXT
2855 * is returned.
2856 */
2857 if (physical_device->has_context_priority) {
2858 int err = anv_gem_set_context_param(device->fd, device->context_id,
2859 I915_CONTEXT_PARAM_PRIORITY,
2860 vk_priority_to_gen(priority));
2861 if (err != 0 && priority > VK_QUEUE_GLOBAL_PRIORITY_MEDIUM_EXT) {
2862 result = vk_error(VK_ERROR_NOT_PERMITTED_EXT);
2863 goto fail_vmas;
2864 }
2865 }
2866
2867 device->info = physical_device->info;
2868 device->isl_dev = physical_device->isl_dev;
2869
2870 /* On Broadwell and later, we can use batch chaining to more efficiently
2871 * implement growing command buffers. Prior to Haswell, the kernel
2872 * command parser gets in the way and we have to fall back to growing
2873 * the batch.
2874 */
2875 device->can_chain_batches = device->info.gen >= 8;
2876
2877 device->robust_buffer_access = robust_buffer_access;
2878 device->enabled_extensions = enabled_extensions;
2879
2880 const struct anv_instance *instance = physical_device->instance;
2881 for (unsigned i = 0; i < ARRAY_SIZE(device->dispatch.entrypoints); i++) {
2882 /* Vulkan requires that entrypoints for extensions which have not been
2883 * enabled must not be advertised.
2884 */
2885 if (!anv_device_entrypoint_is_enabled(i, instance->app_info.api_version,
2886 &instance->enabled_extensions,
2887 &device->enabled_extensions)) {
2888 device->dispatch.entrypoints[i] = NULL;
2889 } else {
2890 device->dispatch.entrypoints[i] =
2891 anv_resolve_device_entrypoint(&device->info, i);
2892 }
2893 }
2894
2895 if (pthread_mutex_init(&device->mutex, NULL) != 0) {
2896 result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
2897 goto fail_queue;
2898 }
2899
2900 pthread_condattr_t condattr;
2901 if (pthread_condattr_init(&condattr) != 0) {
2902 result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
2903 goto fail_mutex;
2904 }
2905 if (pthread_condattr_setclock(&condattr, CLOCK_MONOTONIC) != 0) {
2906 pthread_condattr_destroy(&condattr);
2907 result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
2908 goto fail_mutex;
2909 }
2910 if (pthread_cond_init(&device->queue_submit, &condattr) != 0) {
2911 pthread_condattr_destroy(&condattr);
2912 result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
2913 goto fail_mutex;
2914 }
2915 pthread_condattr_destroy(&condattr);
2916
2917 result = anv_bo_cache_init(&device->bo_cache);
2918 if (result != VK_SUCCESS)
2919 goto fail_queue_cond;
2920
2921 anv_bo_pool_init(&device->batch_bo_pool, device);
2922
2923 result = anv_state_pool_init(&device->dynamic_state_pool, device,
2924 DYNAMIC_STATE_POOL_MIN_ADDRESS, 0, 16384);
2925 if (result != VK_SUCCESS)
2926 goto fail_batch_bo_pool;
2927
2928 if (device->info.gen >= 8) {
2929 /* The border color pointer is limited to 24 bits, so we need to make
2930 * sure that any such color used at any point in the program doesn't
2931 * exceed that limit.
2932 * We achieve that by reserving all the custom border colors we support
2933 * right off the bat, so they are close to the base address.
2934 */
2935 anv_state_reserved_pool_init(&device->custom_border_colors,
2936 &device->dynamic_state_pool,
2937 MAX_CUSTOM_BORDER_COLORS,
2938 sizeof(struct gen8_border_color), 64);
2939 }
2940
2941 result = anv_state_pool_init(&device->instruction_state_pool, device,
2942 INSTRUCTION_STATE_POOL_MIN_ADDRESS, 0, 16384);
2943 if (result != VK_SUCCESS)
2944 goto fail_dynamic_state_pool;
2945
2946 result = anv_state_pool_init(&device->surface_state_pool, device,
2947 SURFACE_STATE_POOL_MIN_ADDRESS, 0, 4096);
2948 if (result != VK_SUCCESS)
2949 goto fail_instruction_state_pool;
2950
2951 if (physical_device->use_softpin) {
2952 int64_t bt_pool_offset = (int64_t)BINDING_TABLE_POOL_MIN_ADDRESS -
2953 (int64_t)SURFACE_STATE_POOL_MIN_ADDRESS;
2954 assert(INT32_MIN < bt_pool_offset && bt_pool_offset < 0);
2955 result = anv_state_pool_init(&device->binding_table_pool, device,
2956 SURFACE_STATE_POOL_MIN_ADDRESS,
2957 bt_pool_offset, 4096);
2958 if (result != VK_SUCCESS)
2959 goto fail_surface_state_pool;
2960 }
2961
2962 if (device->info.has_aux_map) {
2963 device->aux_map_ctx = gen_aux_map_init(device, &aux_map_allocator,
2964 &physical_device->info);
2965 if (!device->aux_map_ctx)
2966 goto fail_binding_table_pool;
2967 }
2968
2969 result = anv_device_alloc_bo(device, 4096,
2970 ANV_BO_ALLOC_CAPTURE | ANV_BO_ALLOC_MAPPED /* flags */,
2971 0 /* explicit_address */,
2972 &device->workaround_bo);
2973 if (result != VK_SUCCESS)
2974 goto fail_surface_aux_map_pool;
2975
2976 device->workaround_address = (struct anv_address) {
2977 .bo = device->workaround_bo,
2978 .offset = align_u32(
2979 intel_debug_write_identifiers(device->workaround_bo->map,
2980 device->workaround_bo->size,
2981 "Anv") + 8, 8),
2982 };
2983
2984 device->debug_frame_desc =
2985 intel_debug_get_identifier_block(device->workaround_bo->map,
2986 device->workaround_bo->size,
2987 GEN_DEBUG_BLOCK_TYPE_FRAME);
2988
2989 result = anv_device_init_trivial_batch(device);
2990 if (result != VK_SUCCESS)
2991 goto fail_workaround_bo;
2992
2993 /* Allocate a null surface state at surface state offset 0. This makes
2994 * NULL descriptor handling trivial because we can just memset structures
2995 * to zero and they have a valid descriptor.
2996 */
2997 device->null_surface_state =
2998 anv_state_pool_alloc(&device->surface_state_pool,
2999 device->isl_dev.ss.size,
3000 device->isl_dev.ss.align);
3001 isl_null_fill_state(&device->isl_dev, device->null_surface_state.map,
3002 isl_extent3d(1, 1, 1) /* This shouldn't matter */);
3003 assert(device->null_surface_state.offset == 0);
3004
3005 if (device->info.gen >= 10) {
3006 result = anv_device_init_hiz_clear_value_bo(device);
3007 if (result != VK_SUCCESS)
3008 goto fail_trivial_batch_bo;
3009 }
3010
3011 anv_scratch_pool_init(device, &device->scratch_pool);
3012
3013 switch (device->info.gen) {
3014 case 7:
3015 if (!device->info.is_haswell)
3016 result = gen7_init_device_state(device);
3017 else
3018 result = gen75_init_device_state(device);
3019 break;
3020 case 8:
3021 result = gen8_init_device_state(device);
3022 break;
3023 case 9:
3024 result = gen9_init_device_state(device);
3025 break;
3026 case 10:
3027 result = gen10_init_device_state(device);
3028 break;
3029 case 11:
3030 result = gen11_init_device_state(device);
3031 break;
3032 case 12:
3033 result = gen12_init_device_state(device);
3034 break;
3035 default:
3036 /* Shouldn't get here as we don't create physical devices for any other
3037 * gens. */
3038 unreachable("unhandled gen");
3039 }
3040 if (result != VK_SUCCESS)
3041 goto fail_clear_value_bo;
3042
3043 anv_pipeline_cache_init(&device->default_pipeline_cache, device,
3044 true /* cache_enabled */, false /* external_sync */);
3045
3046 anv_device_init_blorp(device);
3047
3048 anv_device_init_border_colors(device);
3049
3050 anv_device_perf_init(device);
3051
3052 *pDevice = anv_device_to_handle(device);
3053
3054 return VK_SUCCESS;
3055
3056 fail_clear_value_bo:
3057 if (device->info.gen >= 10)
3058 anv_device_release_bo(device, device->hiz_clear_bo);
3059 anv_scratch_pool_finish(device, &device->scratch_pool);
3060 fail_trivial_batch_bo:
3061 anv_device_release_bo(device, device->trivial_batch_bo);
3062 fail_workaround_bo:
3063 anv_device_release_bo(device, device->workaround_bo);
3064 fail_surface_aux_map_pool:
3065 if (device->info.has_aux_map) {
3066 gen_aux_map_finish(device->aux_map_ctx);
3067 device->aux_map_ctx = NULL;
3068 }
3069 fail_binding_table_pool:
3070 if (physical_device->use_softpin)
3071 anv_state_pool_finish(&device->binding_table_pool);
3072 fail_surface_state_pool:
3073 anv_state_pool_finish(&device->surface_state_pool);
3074 fail_instruction_state_pool:
3075 anv_state_pool_finish(&device->instruction_state_pool);
3076 fail_dynamic_state_pool:
3077 if (device->info.gen >= 8)
3078 anv_state_reserved_pool_finish(&device->custom_border_colors);
3079 anv_state_pool_finish(&device->dynamic_state_pool);
3080 fail_batch_bo_pool:
3081 anv_bo_pool_finish(&device->batch_bo_pool);
3082 anv_bo_cache_finish(&device->bo_cache);
3083 fail_queue_cond:
3084 pthread_cond_destroy(&device->queue_submit);
3085 fail_mutex:
3086 pthread_mutex_destroy(&device->mutex);
3087 fail_vmas:
3088 if (physical_device->use_softpin) {
3089 util_vma_heap_finish(&device->vma_hi);
3090 util_vma_heap_finish(&device->vma_cva);
3091 util_vma_heap_finish(&device->vma_lo);
3092 }
3093 fail_queue:
3094 anv_queue_finish(&device->queue);
3095 fail_context_id:
3096 anv_gem_destroy_context(device, device->context_id);
3097 fail_fd:
3098 close(device->fd);
3099 fail_device:
3100 vk_free(&device->vk.alloc, device);
3101
3102 return result;
3103 }
3104
3105 void anv_DestroyDevice(
3106 VkDevice _device,
3107 const VkAllocationCallbacks* pAllocator)
3108 {
3109 ANV_FROM_HANDLE(anv_device, device, _device);
3110
3111 if (!device)
3112 return;
3113
3114 anv_device_finish_blorp(device);
3115
3116 anv_pipeline_cache_finish(&device->default_pipeline_cache);
3117
3118 anv_queue_finish(&device->queue);
3119
3120 #ifdef HAVE_VALGRIND
3121 /* We only need to free these to prevent valgrind errors. The backing
3122 * BO will go away in a couple of lines so we don't actually leak.
3123 */
3124 if (device->info.gen >= 8)
3125 anv_state_reserved_pool_finish(&device->custom_border_colors);
3126 anv_state_pool_free(&device->dynamic_state_pool, device->border_colors);
3127 anv_state_pool_free(&device->dynamic_state_pool, device->slice_hash);
3128 #endif
3129