2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
33 #include "drm-uapi/i915_drm.h"
40 #define __gen_validate_value(x) VALGRIND_CHECK_MEM_IS_DEFINED(&(x), sizeof(x))
43 #define VG(x) ((void)0)
46 #include "common/gen_clflush.h"
47 #include "common/gen_decoder.h"
48 #include "common/gen_gem.h"
49 #include "dev/gen_device_info.h"
50 #include "blorp/blorp.h"
51 #include "compiler/brw_compiler.h"
52 #include "util/bitset.h"
53 #include "util/macros.h"
54 #include "util/hash_table.h"
55 #include "util/list.h"
56 #include "util/sparse_array.h"
57 #include "util/u_atomic.h"
58 #include "util/u_vector.h"
59 #include "util/u_math.h"
61 #include "util/xmlconfig.h"
63 #include "vk_debug_report.h"
65 /* Pre-declarations needed for WSI entrypoints */
68 typedef struct xcb_connection_t xcb_connection_t
;
69 typedef uint32_t xcb_visualid_t
;
70 typedef uint32_t xcb_window_t
;
73 struct anv_buffer_view
;
74 struct anv_image_view
;
77 struct gen_aux_map_context
;
79 struct gen_perf_config
;
81 #include <vulkan/vulkan.h>
82 #include <vulkan/vulkan_intel.h>
83 #include <vulkan/vk_icd.h>
85 #include "anv_android.h"
86 #include "anv_entrypoints.h"
87 #include "anv_extensions.h"
90 #include "dev/gen_debug.h"
91 #include "common/intel_log.h"
92 #include "wsi_common.h"
94 #define NSEC_PER_SEC 1000000000ull
96 /* anv Virtual Memory Layout
97 * =========================
99 * When the anv driver is determining the virtual graphics addresses of memory
100 * objects itself using the softpin mechanism, the following memory ranges
103 * Three special considerations to notice:
105 * (1) the dynamic state pool is located within the same 4 GiB as the low
106 * heap. This is to work around a VF cache issue described in a comment in
107 * anv_physical_device_init_heaps.
109 * (2) the binding table pool is located at lower addresses than the surface
110 * state pool, within a 4 GiB range. This allows surface state base addresses
111 * to cover both binding tables (16 bit offsets) and surface states (32 bit
114 * (3) the last 4 GiB of the address space is withheld from the high
115 * heap. Various hardware units will read past the end of an object for
116 * various reasons. This healthy margin prevents reads from wrapping around
119 #define LOW_HEAP_MIN_ADDRESS 0x000000001000ULL /* 4 KiB */
120 #define LOW_HEAP_MAX_ADDRESS 0x0000bfffffffULL
121 #define DYNAMIC_STATE_POOL_MIN_ADDRESS 0x0000c0000000ULL /* 3 GiB */
122 #define DYNAMIC_STATE_POOL_MAX_ADDRESS 0x0000ffffffffULL
123 #define BINDING_TABLE_POOL_MIN_ADDRESS 0x000100000000ULL /* 4 GiB */
124 #define BINDING_TABLE_POOL_MAX_ADDRESS 0x00013fffffffULL
125 #define SURFACE_STATE_POOL_MIN_ADDRESS 0x000140000000ULL /* 5 GiB */
126 #define SURFACE_STATE_POOL_MAX_ADDRESS 0x00017fffffffULL
127 #define INSTRUCTION_STATE_POOL_MIN_ADDRESS 0x000180000000ULL /* 6 GiB */
128 #define INSTRUCTION_STATE_POOL_MAX_ADDRESS 0x0001bfffffffULL
129 #define HIGH_HEAP_MIN_ADDRESS 0x0001c0000000ULL /* 7 GiB */
131 #define LOW_HEAP_SIZE \
132 (LOW_HEAP_MAX_ADDRESS - LOW_HEAP_MIN_ADDRESS + 1)
133 #define DYNAMIC_STATE_POOL_SIZE \
134 (DYNAMIC_STATE_POOL_MAX_ADDRESS - DYNAMIC_STATE_POOL_MIN_ADDRESS + 1)
135 #define BINDING_TABLE_POOL_SIZE \
136 (BINDING_TABLE_POOL_MAX_ADDRESS - BINDING_TABLE_POOL_MIN_ADDRESS + 1)
137 #define SURFACE_STATE_POOL_SIZE \
138 (SURFACE_STATE_POOL_MAX_ADDRESS - SURFACE_STATE_POOL_MIN_ADDRESS + 1)
139 #define INSTRUCTION_STATE_POOL_SIZE \
140 (INSTRUCTION_STATE_POOL_MAX_ADDRESS - INSTRUCTION_STATE_POOL_MIN_ADDRESS + 1)
142 /* Allowing different clear colors requires us to perform a depth resolve at
143 * the end of certain render passes. This is because while slow clears store
144 * the clear color in the HiZ buffer, fast clears (without a resolve) don't.
145 * See the PRMs for examples describing when additional resolves would be
146 * necessary. To enable fast clears without requiring extra resolves, we set
147 * the clear value to a globally-defined one. We could allow different values
148 * if the user doesn't expect coherent data during or after a render passes
149 * (VK_ATTACHMENT_STORE_OP_DONT_CARE), but such users (aside from the CTS)
150 * don't seem to exist yet. In almost all Vulkan applications tested thus far,
151 * 1.0f seems to be the only value used. The only application that doesn't set
152 * this value does so through the usage of an seemingly uninitialized clear
155 #define ANV_HZ_FC_VAL 1.0f
158 #define MAX_XFB_BUFFERS 4
159 #define MAX_XFB_STREAMS 4
162 #define MAX_VIEWPORTS 16
163 #define MAX_SCISSORS 16
164 #define MAX_PUSH_CONSTANTS_SIZE 128
165 #define MAX_DYNAMIC_BUFFERS 16
166 #define MAX_IMAGES 64
167 #define MAX_PUSH_DESCRIPTORS 32 /* Minimum requirement */
168 #define MAX_INLINE_UNIFORM_BLOCK_SIZE 4096
169 #define MAX_INLINE_UNIFORM_BLOCK_DESCRIPTORS 32
171 /* From the Skylake PRM Vol. 7 "Binding Table Surface State Model":
173 * "The surface state model is used when a Binding Table Index (specified
174 * in the message descriptor) of less than 240 is specified. In this model,
175 * the Binding Table Index is used to index into the binding table, and the
176 * binding table entry contains a pointer to the SURFACE_STATE."
178 * Binding table values above 240 are used for various things in the hardware
179 * such as stateless, stateless with incoherent cache, SLM, and bindless.
181 #define MAX_BINDING_TABLE_SIZE 240
183 /* The kernel relocation API has a limitation of a 32-bit delta value
184 * applied to the address before it is written which, in spite of it being
185 * unsigned, is treated as signed . Because of the way that this maps to
186 * the Vulkan API, we cannot handle an offset into a buffer that does not
187 * fit into a signed 32 bits. The only mechanism we have for dealing with
188 * this at the moment is to limit all VkDeviceMemory objects to a maximum
189 * of 2GB each. The Vulkan spec allows us to do this:
191 * "Some platforms may have a limit on the maximum size of a single
192 * allocation. For example, certain systems may fail to create
193 * allocations with a size greater than or equal to 4GB. Such a limit is
194 * implementation-dependent, and if such a failure occurs then the error
195 * VK_ERROR_OUT_OF_DEVICE_MEMORY should be returned."
197 * We don't use vk_error here because it's not an error so much as an
198 * indication to the application that the allocation is too large.
200 #define MAX_MEMORY_ALLOCATION_SIZE (1ull << 31)
202 #define ANV_SVGS_VB_INDEX MAX_VBS
203 #define ANV_DRAWID_VB_INDEX (MAX_VBS + 1)
205 /* We reserve this MI ALU register for the purpose of handling predication.
206 * Other code which uses the MI ALU should leave it alone.
208 #define ANV_PREDICATE_RESULT_REG 0x2678 /* MI_ALU_REG15 */
210 /* For gen12 we set the streamout buffers using 4 separate commands
211 * (3DSTATE_SO_BUFFER_INDEX_*) instead of 3DSTATE_SO_BUFFER. However the layout
212 * of the 3DSTATE_SO_BUFFER_INDEX_* commands is identical to that of
213 * 3DSTATE_SO_BUFFER apart from the SOBufferIndex field, so for now we use the
214 * 3DSTATE_SO_BUFFER command, but change the 3DCommandSubOpcode.
215 * SO_BUFFER_INDEX_0_CMD is actually the 3DCommandSubOpcode for
216 * 3DSTATE_SO_BUFFER_INDEX_0.
218 #define SO_BUFFER_INDEX_0_CMD 0x60
219 #define anv_printflike(a, b) __attribute__((__format__(__printf__, a, b)))
221 static inline uint32_t
222 align_down_npot_u32(uint32_t v
, uint32_t a
)
227 static inline uint32_t
228 align_u32(uint32_t v
, uint32_t a
)
230 assert(a
!= 0 && a
== (a
& -a
));
231 return (v
+ a
- 1) & ~(a
- 1);
234 static inline uint64_t
235 align_u64(uint64_t v
, uint64_t a
)
237 assert(a
!= 0 && a
== (a
& -a
));
238 return (v
+ a
- 1) & ~(a
- 1);
241 static inline int32_t
242 align_i32(int32_t v
, int32_t a
)
244 assert(a
!= 0 && a
== (a
& -a
));
245 return (v
+ a
- 1) & ~(a
- 1);
248 /** Alignment must be a power of 2. */
250 anv_is_aligned(uintmax_t n
, uintmax_t a
)
252 assert(a
== (a
& -a
));
253 return (n
& (a
- 1)) == 0;
256 static inline uint32_t
257 anv_minify(uint32_t n
, uint32_t levels
)
259 if (unlikely(n
== 0))
262 return MAX2(n
>> levels
, 1);
266 anv_clamp_f(float f
, float min
, float max
)
279 anv_clear_mask(uint32_t *inout_mask
, uint32_t clear_mask
)
281 if (*inout_mask
& clear_mask
) {
282 *inout_mask
&= ~clear_mask
;
289 static inline union isl_color_value
290 vk_to_isl_color(VkClearColorValue color
)
292 return (union isl_color_value
) {
302 #define for_each_bit(b, dword) \
303 for (uint32_t __dword = (dword); \
304 (b) = __builtin_ffs(__dword) - 1, __dword; \
305 __dword &= ~(1 << (b)))
307 #define typed_memcpy(dest, src, count) ({ \
308 STATIC_ASSERT(sizeof(*src) == sizeof(*dest)); \
309 memcpy((dest), (src), (count) * sizeof(*(src))); \
312 /* Mapping from anv object to VkDebugReportObjectTypeEXT. New types need
313 * to be added here in order to utilize mapping in debug/error/perf macros.
315 #define REPORT_OBJECT_TYPE(o) \
316 __builtin_choose_expr ( \
317 __builtin_types_compatible_p (__typeof (o), struct anv_instance*), \
318 VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT, \
319 __builtin_choose_expr ( \
320 __builtin_types_compatible_p (__typeof (o), struct anv_physical_device*), \
321 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, \
322 __builtin_choose_expr ( \
323 __builtin_types_compatible_p (__typeof (o), struct anv_device*), \
324 VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, \
325 __builtin_choose_expr ( \
326 __builtin_types_compatible_p (__typeof (o), const struct anv_device*), \
327 VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, \
328 __builtin_choose_expr ( \
329 __builtin_types_compatible_p (__typeof (o), struct anv_queue*), \
330 VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT, \
331 __builtin_choose_expr ( \
332 __builtin_types_compatible_p (__typeof (o), struct anv_semaphore*), \
333 VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT, \
334 __builtin_choose_expr ( \
335 __builtin_types_compatible_p (__typeof (o), struct anv_cmd_buffer*), \
336 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, \
337 __builtin_choose_expr ( \
338 __builtin_types_compatible_p (__typeof (o), struct anv_fence*), \
339 VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT, \
340 __builtin_choose_expr ( \
341 __builtin_types_compatible_p (__typeof (o), struct anv_device_memory*), \
342 VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, \
343 __builtin_choose_expr ( \
344 __builtin_types_compatible_p (__typeof (o), struct anv_buffer*), \
345 VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, \
346 __builtin_choose_expr ( \
347 __builtin_types_compatible_p (__typeof (o), struct anv_image*), \
348 VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, \
349 __builtin_choose_expr ( \
350 __builtin_types_compatible_p (__typeof (o), const struct anv_image*), \
351 VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, \
352 __builtin_choose_expr ( \
353 __builtin_types_compatible_p (__typeof (o), struct anv_event*), \
354 VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT, \
355 __builtin_choose_expr ( \
356 __builtin_types_compatible_p (__typeof (o), struct anv_query_pool*), \
357 VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, \
358 __builtin_choose_expr ( \
359 __builtin_types_compatible_p (__typeof (o), struct anv_buffer_view*), \
360 VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT, \
361 __builtin_choose_expr ( \
362 __builtin_types_compatible_p (__typeof (o), struct anv_image_view*), \
363 VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT, \
364 __builtin_choose_expr ( \
365 __builtin_types_compatible_p (__typeof (o), struct anv_shader_module*), \
366 VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT, \
367 __builtin_choose_expr ( \
368 __builtin_types_compatible_p (__typeof (o), struct anv_pipeline_cache*), \
369 VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_CACHE_EXT, \
370 __builtin_choose_expr ( \
371 __builtin_types_compatible_p (__typeof (o), struct anv_pipeline_layout*), \
372 VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT, \
373 __builtin_choose_expr ( \
374 __builtin_types_compatible_p (__typeof (o), struct anv_render_pass*), \
375 VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, \
376 __builtin_choose_expr ( \
377 __builtin_types_compatible_p (__typeof (o), struct anv_pipeline*), \
378 VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT, \
379 __builtin_choose_expr ( \
380 __builtin_types_compatible_p (__typeof (o), struct anv_descriptor_set_layout*), \
381 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT, \
382 __builtin_choose_expr ( \
383 __builtin_types_compatible_p (__typeof (o), struct anv_sampler*), \
384 VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT, \
385 __builtin_choose_expr ( \
386 __builtin_types_compatible_p (__typeof (o), struct anv_descriptor_pool*), \
387 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT, \
388 __builtin_choose_expr ( \
389 __builtin_types_compatible_p (__typeof (o), struct anv_descriptor_set*), \
390 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, \
391 __builtin_choose_expr ( \
392 __builtin_types_compatible_p (__typeof (o), struct anv_framebuffer*), \
393 VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT, \
394 __builtin_choose_expr ( \
395 __builtin_types_compatible_p (__typeof (o), struct anv_cmd_pool*), \
396 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT, \
397 __builtin_choose_expr ( \
398 __builtin_types_compatible_p (__typeof (o), struct anv_surface*), \
399 VK_DEBUG_REPORT_OBJECT_TYPE_SURFACE_KHR_EXT, \
400 __builtin_choose_expr ( \
401 __builtin_types_compatible_p (__typeof (o), struct wsi_swapchain*), \
402 VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT, \
403 __builtin_choose_expr ( \
404 __builtin_types_compatible_p (__typeof (o), struct vk_debug_callback*), \
405 VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_CALLBACK_EXT_EXT, \
406 __builtin_choose_expr ( \
407 __builtin_types_compatible_p (__typeof (o), void*), \
408 VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, \
409 /* The void expression results in a compile-time error \
410 when assigning the result to something. */ \
411 (void)0)))))))))))))))))))))))))))))))
413 /* Whenever we generate an error, pass it through this function. Useful for
414 * debugging, where we can break on it. Only call at error site, not when
415 * propagating errors. Might be useful to plug in a stack trace here.
418 VkResult
__vk_errorv(struct anv_instance
*instance
, const void *object
,
419 VkDebugReportObjectTypeEXT type
, VkResult error
,
420 const char *file
, int line
, const char *format
,
423 VkResult
__vk_errorf(struct anv_instance
*instance
, const void *object
,
424 VkDebugReportObjectTypeEXT type
, VkResult error
,
425 const char *file
, int line
, const char *format
, ...)
426 anv_printflike(7, 8);
429 #define vk_error(error) __vk_errorf(NULL, NULL,\
430 VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,\
431 error, __FILE__, __LINE__, NULL)
432 #define vk_errorv(instance, obj, error, format, args)\
433 __vk_errorv(instance, obj, REPORT_OBJECT_TYPE(obj), error,\
434 __FILE__, __LINE__, format, args)
435 #define vk_errorf(instance, obj, error, format, ...)\
436 __vk_errorf(instance, obj, REPORT_OBJECT_TYPE(obj), error,\
437 __FILE__, __LINE__, format, ## __VA_ARGS__)
439 #define vk_error(error) error
440 #define vk_errorf(instance, obj, error, format, ...) error
444 * Warn on ignored extension structs.
446 * The Vulkan spec requires us to ignore unsupported or unknown structs in
447 * a pNext chain. In debug mode, emitting warnings for ignored structs may
448 * help us discover structs that we should not have ignored.
451 * From the Vulkan 1.0.38 spec:
453 * Any component of the implementation (the loader, any enabled layers,
454 * and drivers) must skip over, without processing (other than reading the
455 * sType and pNext members) any chained structures with sType values not
456 * defined by extensions supported by that component.
458 #define anv_debug_ignored_stype(sType) \
459 intel_logd("%s: ignored VkStructureType %u\n", __func__, (sType))
461 void __anv_perf_warn(struct anv_instance
*instance
, const void *object
,
462 VkDebugReportObjectTypeEXT type
, const char *file
,
463 int line
, const char *format
, ...)
464 anv_printflike(6, 7);
465 void anv_loge(const char *format
, ...) anv_printflike(1, 2);
466 void anv_loge_v(const char *format
, va_list va
);
469 * Print a FINISHME message, including its source location.
471 #define anv_finishme(format, ...) \
473 static bool reported = false; \
475 intel_logw("%s:%d: FINISHME: " format, __FILE__, __LINE__, \
482 * Print a perf warning message. Set INTEL_DEBUG=perf to see these.
484 #define anv_perf_warn(instance, obj, format, ...) \
486 static bool reported = false; \
487 if (!reported && unlikely(INTEL_DEBUG & DEBUG_PERF)) { \
488 __anv_perf_warn(instance, obj, REPORT_OBJECT_TYPE(obj), __FILE__, __LINE__,\
489 format, ##__VA_ARGS__); \
494 /* A non-fatal assert. Useful for debugging. */
496 #define anv_assert(x) ({ \
497 if (unlikely(!(x))) \
498 intel_loge("%s:%d ASSERT: %s", __FILE__, __LINE__, #x); \
501 #define anv_assert(x)
504 /* A multi-pointer allocator
506 * When copying data structures from the user (such as a render pass), it's
507 * common to need to allocate data for a bunch of different things. Instead
508 * of doing several allocations and having to handle all of the error checking
509 * that entails, it can be easier to do a single allocation. This struct
510 * helps facilitate that. The intended usage looks like this:
513 * anv_multialloc_add(&ma, &main_ptr, 1);
514 * anv_multialloc_add(&ma, &substruct1, substruct1Count);
515 * anv_multialloc_add(&ma, &substruct2, substruct2Count);
517 * if (!anv_multialloc_alloc(&ma, pAllocator, VK_ALLOCATION_SCOPE_FOO))
518 * return vk_error(VK_ERROR_OUT_OF_HOST_MEORY);
520 struct anv_multialloc
{
528 #define ANV_MULTIALLOC_INIT \
529 ((struct anv_multialloc) { 0, })
531 #define ANV_MULTIALLOC(_name) \
532 struct anv_multialloc _name = ANV_MULTIALLOC_INIT
534 __attribute__((always_inline
))
536 _anv_multialloc_add(struct anv_multialloc
*ma
,
537 void **ptr
, size_t size
, size_t align
)
539 size_t offset
= align_u64(ma
->size
, align
);
540 ma
->size
= offset
+ size
;
541 ma
->align
= MAX2(ma
->align
, align
);
543 /* Store the offset in the pointer. */
544 *ptr
= (void *)(uintptr_t)offset
;
546 assert(ma
->ptr_count
< ARRAY_SIZE(ma
->ptrs
));
547 ma
->ptrs
[ma
->ptr_count
++] = ptr
;
550 #define anv_multialloc_add_size(_ma, _ptr, _size) \
551 _anv_multialloc_add((_ma), (void **)(_ptr), (_size), __alignof__(**(_ptr)))
553 #define anv_multialloc_add(_ma, _ptr, _count) \
554 anv_multialloc_add_size(_ma, _ptr, (_count) * sizeof(**(_ptr)));
556 __attribute__((always_inline
))
558 anv_multialloc_alloc(struct anv_multialloc
*ma
,
559 const VkAllocationCallbacks
*alloc
,
560 VkSystemAllocationScope scope
)
562 void *ptr
= vk_alloc(alloc
, ma
->size
, ma
->align
, scope
);
566 /* Fill out each of the pointers with their final value.
568 * for (uint32_t i = 0; i < ma->ptr_count; i++)
569 * *ma->ptrs[i] = ptr + (uintptr_t)*ma->ptrs[i];
571 * Unfortunately, even though ma->ptr_count is basically guaranteed to be a
572 * constant, GCC is incapable of figuring this out and unrolling the loop
573 * so we have to give it a little help.
575 STATIC_ASSERT(ARRAY_SIZE(ma
->ptrs
) == 8);
576 #define _ANV_MULTIALLOC_UPDATE_POINTER(_i) \
577 if ((_i) < ma->ptr_count) \
578 *ma->ptrs[_i] = ptr + (uintptr_t)*ma->ptrs[_i]
579 _ANV_MULTIALLOC_UPDATE_POINTER(0);
580 _ANV_MULTIALLOC_UPDATE_POINTER(1);
581 _ANV_MULTIALLOC_UPDATE_POINTER(2);
582 _ANV_MULTIALLOC_UPDATE_POINTER(3);
583 _ANV_MULTIALLOC_UPDATE_POINTER(4);
584 _ANV_MULTIALLOC_UPDATE_POINTER(5);
585 _ANV_MULTIALLOC_UPDATE_POINTER(6);
586 _ANV_MULTIALLOC_UPDATE_POINTER(7);
587 #undef _ANV_MULTIALLOC_UPDATE_POINTER
592 __attribute__((always_inline
))
594 anv_multialloc_alloc2(struct anv_multialloc
*ma
,
595 const VkAllocationCallbacks
*parent_alloc
,
596 const VkAllocationCallbacks
*alloc
,
597 VkSystemAllocationScope scope
)
599 return anv_multialloc_alloc(ma
, alloc
? alloc
: parent_alloc
, scope
);
607 /* Index into the current validation list. This is used by the
608 * validation list building alrogithm to track which buffers are already
609 * in the validation list so that we can ensure uniqueness.
613 /* Index for use with util_sparse_array_free_list */
616 /* Last known offset. This value is provided by the kernel when we
617 * execbuf and is used as the presumed offset for the next bunch of
624 /* Map for internally mapped BOs.
626 * If ANV_BO_WRAPPER is set in flags, map points to the wrapped BO.
630 /** Flags to pass to the kernel through drm_i915_exec_object2::flags */
633 /** True if this BO may be shared with other processes */
636 /** True if this BO is a wrapper
638 * When set to true, none of the fields in this BO are meaningful except
639 * for anv_bo::is_wrapper and anv_bo::map which points to the actual BO.
640 * See also anv_bo_unwrap(). Wrapper BOs are not allowed when use_softpin
641 * is set in the physical device.
645 /** See also ANV_BO_ALLOC_FIXED_ADDRESS */
646 bool has_fixed_address
:1;
648 /** True if this BO wraps a host pointer */
649 bool from_host_ptr
:1;
652 static inline struct anv_bo
*
653 anv_bo_unwrap(struct anv_bo
*bo
)
655 while (bo
->is_wrapper
)
660 /* Represents a lock-free linked list of "free" things. This is used by
661 * both the block pool and the state pools. Unfortunately, in order to
662 * solve the ABA problem, we can't use a single uint32_t head.
664 union anv_free_list
{
668 /* A simple count that is incremented every time the head changes. */
671 /* Make sure it's aligned to 64 bits. This will make atomic operations
672 * faster on 32 bit platforms.
674 uint64_t u64
__attribute__ ((aligned (8)));
677 #define ANV_FREE_LIST_EMPTY ((union anv_free_list) { { UINT32_MAX, 0 } })
679 struct anv_block_state
{
685 /* Make sure it's aligned to 64 bits. This will make atomic operations
686 * faster on 32 bit platforms.
688 uint64_t u64
__attribute__ ((aligned (8)));
692 #define anv_block_pool_foreach_bo(bo, pool) \
693 for (struct anv_bo **_pp_bo = (pool)->bos, *bo; \
694 _pp_bo != &(pool)->bos[(pool)->nbos] && (bo = *_pp_bo, true); \
697 #define ANV_MAX_BLOCK_POOL_BOS 20
699 struct anv_block_pool
{
700 struct anv_device
*device
;
703 /* Wrapper BO for use in relocation lists. This BO is simply a wrapper
704 * around the actual BO so that we grow the pool after the wrapper BO has
705 * been put in a relocation list. This is only used in the non-softpin
708 struct anv_bo wrapper_bo
;
710 struct anv_bo
*bos
[ANV_MAX_BLOCK_POOL_BOS
];
716 /* The address where the start of the pool is pinned. The various bos that
717 * are created as the pool grows will have addresses in the range
718 * [start_address, start_address + BLOCK_POOL_MEMFD_SIZE).
720 uint64_t start_address
;
722 /* The offset from the start of the bo to the "center" of the block
723 * pool. Pointers to allocated blocks are given by
724 * bo.map + center_bo_offset + offsets.
726 uint32_t center_bo_offset
;
728 /* Current memory map of the block pool. This pointer may or may not
729 * point to the actual beginning of the block pool memory. If
730 * anv_block_pool_alloc_back has ever been called, then this pointer
731 * will point to the "center" position of the buffer and all offsets
732 * (negative or positive) given out by the block pool alloc functions
733 * will be valid relative to this pointer.
735 * In particular, map == bo.map + center_offset
737 * DO NOT access this pointer directly. Use anv_block_pool_map() instead,
738 * since it will handle the softpin case as well, where this points to NULL.
744 * Array of mmaps and gem handles owned by the block pool, reclaimed when
745 * the block pool is destroyed.
747 struct u_vector mmap_cleanups
;
749 struct anv_block_state state
;
751 struct anv_block_state back_state
;
754 /* Block pools are backed by a fixed-size 1GB memfd */
755 #define BLOCK_POOL_MEMFD_SIZE (1ul << 30)
757 /* The center of the block pool is also the middle of the memfd. This may
758 * change in the future if we decide differently for some reason.
760 #define BLOCK_POOL_MEMFD_CENTER (BLOCK_POOL_MEMFD_SIZE / 2)
762 static inline uint32_t
763 anv_block_pool_size(struct anv_block_pool
*pool
)
765 return pool
->state
.end
+ pool
->back_state
.end
;
775 #define ANV_STATE_NULL ((struct anv_state) { .alloc_size = 0 })
777 struct anv_fixed_size_state_pool
{
778 union anv_free_list free_list
;
779 struct anv_block_state block
;
782 #define ANV_MIN_STATE_SIZE_LOG2 6
783 #define ANV_MAX_STATE_SIZE_LOG2 21
785 #define ANV_STATE_BUCKETS (ANV_MAX_STATE_SIZE_LOG2 - ANV_MIN_STATE_SIZE_LOG2 + 1)
787 struct anv_free_entry
{
789 struct anv_state state
;
792 struct anv_state_table
{
793 struct anv_device
*device
;
795 struct anv_free_entry
*map
;
797 struct anv_block_state state
;
798 struct u_vector cleanups
;
801 struct anv_state_pool
{
802 struct anv_block_pool block_pool
;
804 struct anv_state_table table
;
806 /* The size of blocks which will be allocated from the block pool */
809 /** Free list for "back" allocations */
810 union anv_free_list back_alloc_free_list
;
812 struct anv_fixed_size_state_pool buckets
[ANV_STATE_BUCKETS
];
815 struct anv_state_stream_block
;
817 struct anv_state_stream
{
818 struct anv_state_pool
*state_pool
;
820 /* The size of blocks to allocate from the state pool */
823 /* Current block we're allocating from */
824 struct anv_state block
;
826 /* Offset into the current block at which to allocate the next state */
829 /* List of all blocks allocated from this pool */
830 struct anv_state_stream_block
*block_list
;
833 /* The block_pool functions exported for testing only. The block pool should
834 * only be used via a state pool (see below).
836 VkResult
anv_block_pool_init(struct anv_block_pool
*pool
,
837 struct anv_device
*device
,
838 uint64_t start_address
,
839 uint32_t initial_size
);
840 void anv_block_pool_finish(struct anv_block_pool
*pool
);
841 int32_t anv_block_pool_alloc(struct anv_block_pool
*pool
,
842 uint32_t block_size
, uint32_t *padding
);
843 int32_t anv_block_pool_alloc_back(struct anv_block_pool
*pool
,
844 uint32_t block_size
);
845 void* anv_block_pool_map(struct anv_block_pool
*pool
, int32_t offset
);
847 VkResult
anv_state_pool_init(struct anv_state_pool
*pool
,
848 struct anv_device
*device
,
849 uint64_t start_address
,
850 uint32_t block_size
);
851 void anv_state_pool_finish(struct anv_state_pool
*pool
);
852 struct anv_state
anv_state_pool_alloc(struct anv_state_pool
*pool
,
853 uint32_t state_size
, uint32_t alignment
);
854 struct anv_state
anv_state_pool_alloc_back(struct anv_state_pool
*pool
);
855 void anv_state_pool_free(struct anv_state_pool
*pool
, struct anv_state state
);
856 void anv_state_stream_init(struct anv_state_stream
*stream
,
857 struct anv_state_pool
*state_pool
,
858 uint32_t block_size
);
859 void anv_state_stream_finish(struct anv_state_stream
*stream
);
860 struct anv_state
anv_state_stream_alloc(struct anv_state_stream
*stream
,
861 uint32_t size
, uint32_t alignment
);
863 VkResult
anv_state_table_init(struct anv_state_table
*table
,
864 struct anv_device
*device
,
865 uint32_t initial_entries
);
866 void anv_state_table_finish(struct anv_state_table
*table
);
867 VkResult
anv_state_table_add(struct anv_state_table
*table
, uint32_t *idx
,
869 void anv_free_list_push(union anv_free_list
*list
,
870 struct anv_state_table
*table
,
871 uint32_t idx
, uint32_t count
);
872 struct anv_state
* anv_free_list_pop(union anv_free_list
*list
,
873 struct anv_state_table
*table
);
876 static inline struct anv_state
*
877 anv_state_table_get(struct anv_state_table
*table
, uint32_t idx
)
879 return &table
->map
[idx
].state
;
882 * Implements a pool of re-usable BOs. The interface is identical to that
883 * of block_pool except that each block is its own BO.
886 struct anv_device
*device
;
890 struct util_sparse_array_free_list free_list
[16];
893 void anv_bo_pool_init(struct anv_bo_pool
*pool
, struct anv_device
*device
,
895 void anv_bo_pool_finish(struct anv_bo_pool
*pool
);
896 VkResult
anv_bo_pool_alloc(struct anv_bo_pool
*pool
, uint32_t size
,
897 struct anv_bo
**bo_out
);
898 void anv_bo_pool_free(struct anv_bo_pool
*pool
, struct anv_bo
*bo
);
900 struct anv_scratch_pool
{
901 /* Indexed by Per-Thread Scratch Space number (the hardware value) and stage */
902 struct anv_bo
*bos
[16][MESA_SHADER_STAGES
];
905 void anv_scratch_pool_init(struct anv_device
*device
,
906 struct anv_scratch_pool
*pool
);
907 void anv_scratch_pool_finish(struct anv_device
*device
,
908 struct anv_scratch_pool
*pool
);
909 struct anv_bo
*anv_scratch_pool_alloc(struct anv_device
*device
,
910 struct anv_scratch_pool
*pool
,
911 gl_shader_stage stage
,
912 unsigned per_thread_scratch
);
914 /** Implements a BO cache that ensures a 1-1 mapping of GEM BOs to anv_bos */
915 struct anv_bo_cache
{
916 struct util_sparse_array bo_map
;
917 pthread_mutex_t mutex
;
920 VkResult
anv_bo_cache_init(struct anv_bo_cache
*cache
);
921 void anv_bo_cache_finish(struct anv_bo_cache
*cache
);
923 struct anv_memory_type
{
924 /* Standard bits passed on to the client */
925 VkMemoryPropertyFlags propertyFlags
;
928 /* Driver-internal book-keeping */
929 VkBufferUsageFlags valid_buffer_usage
;
932 struct anv_memory_heap
{
933 /* Standard bits passed on to the client */
935 VkMemoryHeapFlags flags
;
937 /* Driver-internal book-keeping */
940 bool supports_48bit_addresses
;
944 struct anv_physical_device
{
945 VK_LOADER_DATA _loader_data
;
947 struct anv_instance
* instance
;
958 struct gen_device_info info
;
959 /** Amount of "GPU memory" we want to advertise
961 * Clearly, this value is bogus since Intel is a UMA architecture. On
962 * gen7 platforms, we are limited by GTT size unless we want to implement
963 * fine-grained tracking and GTT splitting. On Broadwell and above we are
964 * practically unlimited. However, we will never report more than 3/4 of
965 * the total system ram to try and avoid running out of RAM.
967 bool supports_48bit_addresses
;
968 struct brw_compiler
* compiler
;
969 struct isl_device isl_dev
;
970 struct gen_perf_config
* perf
;
971 int cmd_parser_version
;
973 bool has_exec_capture
;
976 bool has_syncobj_wait
;
977 bool has_context_priority
;
979 bool has_context_isolation
;
980 bool has_mem_available
;
981 bool always_use_bindless
;
983 /** True if we can access buffers using A64 messages */
984 bool has_a64_buffer_access
;
985 /** True if we can use bindless access for images */
986 bool has_bindless_images
;
987 /** True if we can use bindless access for samplers */
988 bool has_bindless_samplers
;
990 struct anv_device_extension_table supported_extensions
;
991 struct anv_physical_device_dispatch_table dispatch
;
994 uint32_t subslice_total
;
998 struct anv_memory_type types
[VK_MAX_MEMORY_TYPES
];
1000 struct anv_memory_heap heaps
[VK_MAX_MEMORY_HEAPS
];
1003 uint8_t driver_build_sha1
[20];
1004 uint8_t pipeline_cache_uuid
[VK_UUID_SIZE
];
1005 uint8_t driver_uuid
[VK_UUID_SIZE
];
1006 uint8_t device_uuid
[VK_UUID_SIZE
];
1008 struct disk_cache
* disk_cache
;
1010 struct wsi_device wsi_device
;
1015 struct anv_app_info
{
1016 const char* app_name
;
1017 uint32_t app_version
;
1018 const char* engine_name
;
1019 uint32_t engine_version
;
1020 uint32_t api_version
;
1023 struct anv_instance
{
1024 VK_LOADER_DATA _loader_data
;
1026 VkAllocationCallbacks alloc
;
1028 struct anv_app_info app_info
;
1030 struct anv_instance_extension_table enabled_extensions
;
1031 struct anv_instance_dispatch_table dispatch
;
1032 struct anv_device_dispatch_table device_dispatch
;
1034 int physicalDeviceCount
;
1035 struct anv_physical_device physicalDevice
;
1037 bool pipeline_cache_enabled
;
1039 struct vk_debug_report_instance debug_report_callbacks
;
1041 struct driOptionCache dri_options
;
1042 struct driOptionCache available_dri_options
;
1045 VkResult
anv_init_wsi(struct anv_physical_device
*physical_device
);
1046 void anv_finish_wsi(struct anv_physical_device
*physical_device
);
1048 uint32_t anv_physical_device_api_version(struct anv_physical_device
*dev
);
1049 bool anv_physical_device_extension_supported(struct anv_physical_device
*dev
,
1053 VK_LOADER_DATA _loader_data
;
1055 struct anv_device
* device
;
1057 VkDeviceQueueCreateFlags flags
;
1060 struct anv_pipeline_cache
{
1061 struct anv_device
* device
;
1062 pthread_mutex_t mutex
;
1064 struct hash_table
* nir_cache
;
1066 struct hash_table
* cache
;
1069 struct nir_xfb_info
;
1070 struct anv_pipeline_bind_map
;
1072 void anv_pipeline_cache_init(struct anv_pipeline_cache
*cache
,
1073 struct anv_device
*device
,
1074 bool cache_enabled
);
1075 void anv_pipeline_cache_finish(struct anv_pipeline_cache
*cache
);
1077 struct anv_shader_bin
*
1078 anv_pipeline_cache_search(struct anv_pipeline_cache
*cache
,
1079 const void *key
, uint32_t key_size
);
1080 struct anv_shader_bin
*
1081 anv_pipeline_cache_upload_kernel(struct anv_pipeline_cache
*cache
,
1082 const void *key_data
, uint32_t key_size
,
1083 const void *kernel_data
, uint32_t kernel_size
,
1084 const void *constant_data
,
1085 uint32_t constant_data_size
,
1086 const struct brw_stage_prog_data
*prog_data
,
1087 uint32_t prog_data_size
,
1088 const struct brw_compile_stats
*stats
,
1090 const struct nir_xfb_info
*xfb_info
,
1091 const struct anv_pipeline_bind_map
*bind_map
);
1093 struct anv_shader_bin
*
1094 anv_device_search_for_kernel(struct anv_device
*device
,
1095 struct anv_pipeline_cache
*cache
,
1096 const void *key_data
, uint32_t key_size
,
1097 bool *user_cache_bit
);
1099 struct anv_shader_bin
*
1100 anv_device_upload_kernel(struct anv_device
*device
,
1101 struct anv_pipeline_cache
*cache
,
1102 const void *key_data
, uint32_t key_size
,
1103 const void *kernel_data
, uint32_t kernel_size
,
1104 const void *constant_data
,
1105 uint32_t constant_data_size
,
1106 const struct brw_stage_prog_data
*prog_data
,
1107 uint32_t prog_data_size
,
1108 const struct brw_compile_stats
*stats
,
1110 const struct nir_xfb_info
*xfb_info
,
1111 const struct anv_pipeline_bind_map
*bind_map
);
1114 struct nir_shader_compiler_options
;
1117 anv_device_search_for_nir(struct anv_device
*device
,
1118 struct anv_pipeline_cache
*cache
,
1119 const struct nir_shader_compiler_options
*nir_options
,
1120 unsigned char sha1_key
[20],
1124 anv_device_upload_nir(struct anv_device
*device
,
1125 struct anv_pipeline_cache
*cache
,
1126 const struct nir_shader
*nir
,
1127 unsigned char sha1_key
[20]);
1130 VK_LOADER_DATA _loader_data
;
1132 VkAllocationCallbacks alloc
;
1134 struct anv_instance
* instance
;
1135 uint32_t chipset_id
;
1137 struct gen_device_info info
;
1138 struct isl_device isl_dev
;
1141 bool can_chain_batches
;
1142 bool robust_buffer_access
;
1143 struct anv_device_extension_table enabled_extensions
;
1144 struct anv_device_dispatch_table dispatch
;
1146 pthread_mutex_t vma_mutex
;
1147 struct util_vma_heap vma_lo
;
1148 struct util_vma_heap vma_hi
;
1149 uint64_t vma_lo_available
;
1150 uint64_t vma_hi_available
;
1152 /** List of all anv_device_memory objects */
1153 struct list_head memory_objects
;
1155 struct anv_bo_pool batch_bo_pool
;
1157 struct anv_bo_cache bo_cache
;
1159 struct anv_state_pool dynamic_state_pool
;
1160 struct anv_state_pool instruction_state_pool
;
1161 struct anv_state_pool binding_table_pool
;
1162 struct anv_state_pool surface_state_pool
;
1164 struct anv_bo
* workaround_bo
;
1165 struct anv_bo
* trivial_batch_bo
;
1166 struct anv_bo
* hiz_clear_bo
;
1168 struct anv_pipeline_cache default_pipeline_cache
;
1169 struct blorp_context blorp
;
1171 struct anv_state border_colors
;
1173 struct anv_state slice_hash
;
1175 struct anv_queue queue
;
1177 struct anv_scratch_pool scratch_pool
;
1179 uint32_t default_mocs
;
1180 uint32_t external_mocs
;
1182 pthread_mutex_t mutex
;
1183 pthread_cond_t queue_submit
;
1186 struct gen_batch_decode_ctx decoder_ctx
;
1188 * When decoding a anv_cmd_buffer, we might need to search for BOs through
1189 * the cmd_buffer's list.
1191 struct anv_cmd_buffer
*cmd_buffer_being_decoded
;
1193 int perf_fd
; /* -1 if no opened */
1194 uint64_t perf_metric
; /* 0 if unset */
1196 struct gen_aux_map_context
*aux_map_ctx
;
1199 static inline struct anv_state_pool
*
1200 anv_binding_table_pool(struct anv_device
*device
)
1202 if (device
->instance
->physicalDevice
.use_softpin
)
1203 return &device
->binding_table_pool
;
1205 return &device
->surface_state_pool
;
1208 static inline struct anv_state
1209 anv_binding_table_pool_alloc(struct anv_device
*device
) {
1210 if (device
->instance
->physicalDevice
.use_softpin
)
1211 return anv_state_pool_alloc(&device
->binding_table_pool
,
1212 device
->binding_table_pool
.block_size
, 0);
1214 return anv_state_pool_alloc_back(&device
->surface_state_pool
);
1218 anv_binding_table_pool_free(struct anv_device
*device
, struct anv_state state
) {
1219 anv_state_pool_free(anv_binding_table_pool(device
), state
);
1222 static inline uint32_t
1223 anv_mocs_for_bo(const struct anv_device
*device
, const struct anv_bo
*bo
)
1225 if (bo
->is_external
)
1226 return device
->external_mocs
;
1228 return device
->default_mocs
;
1231 void anv_device_init_blorp(struct anv_device
*device
);
1232 void anv_device_finish_blorp(struct anv_device
*device
);
1234 VkResult
_anv_device_set_lost(struct anv_device
*device
,
1235 const char *file
, int line
,
1236 const char *msg
, ...)
1237 anv_printflike(4, 5);
1238 #define anv_device_set_lost(dev, ...) \
1239 _anv_device_set_lost(dev, __FILE__, __LINE__, __VA_ARGS__)
1242 anv_device_is_lost(struct anv_device
*device
)
1244 return unlikely(device
->_lost
);
1247 VkResult
anv_device_execbuf(struct anv_device
*device
,
1248 struct drm_i915_gem_execbuffer2
*execbuf
,
1249 struct anv_bo
**execbuf_bos
);
1250 VkResult
anv_device_query_status(struct anv_device
*device
);
1253 enum anv_bo_alloc_flags
{
1254 /** Specifies that the BO must have a 32-bit address
1256 * This is the opposite of EXEC_OBJECT_SUPPORTS_48B_ADDRESS.
1258 ANV_BO_ALLOC_32BIT_ADDRESS
= (1 << 0),
1260 /** Specifies that the BO may be shared externally */
1261 ANV_BO_ALLOC_EXTERNAL
= (1 << 1),
1263 /** Specifies that the BO should be mapped */
1264 ANV_BO_ALLOC_MAPPED
= (1 << 2),
1266 /** Specifies that the BO should be snooped so we get coherency */
1267 ANV_BO_ALLOC_SNOOPED
= (1 << 3),
1269 /** Specifies that the BO should be captured in error states */
1270 ANV_BO_ALLOC_CAPTURE
= (1 << 4),
1272 /** Specifies that the BO will have an address assigned by the caller */
1273 ANV_BO_ALLOC_FIXED_ADDRESS
= (1 << 5),
1275 /** Enables implicit synchronization on the BO
1277 * This is the opposite of EXEC_OBJECT_ASYNC.
1279 ANV_BO_ALLOC_IMPLICIT_SYNC
= (1 << 6),
1281 /** Enables implicit synchronization on the BO
1283 * This is equivalent to EXEC_OBJECT_WRITE.
1285 ANV_BO_ALLOC_IMPLICIT_WRITE
= (1 << 7),
1288 VkResult
anv_device_alloc_bo(struct anv_device
*device
, uint64_t size
,
1289 enum anv_bo_alloc_flags alloc_flags
,
1290 struct anv_bo
**bo
);
1291 VkResult
anv_device_import_bo_from_host_ptr(struct anv_device
*device
,
1292 void *host_ptr
, uint32_t size
,
1293 enum anv_bo_alloc_flags alloc_flags
,
1294 struct anv_bo
**bo_out
);
1295 VkResult
anv_device_import_bo(struct anv_device
*device
, int fd
,
1296 enum anv_bo_alloc_flags alloc_flags
,
1297 struct anv_bo
**bo
);
1298 VkResult
anv_device_export_bo(struct anv_device
*device
,
1299 struct anv_bo
*bo
, int *fd_out
);
1300 void anv_device_release_bo(struct anv_device
*device
,
1303 static inline struct anv_bo
*
1304 anv_device_lookup_bo(struct anv_device
*device
, uint32_t gem_handle
)
1306 return util_sparse_array_get(&device
->bo_cache
.bo_map
, gem_handle
);
1309 VkResult
anv_device_bo_busy(struct anv_device
*device
, struct anv_bo
*bo
);
1310 VkResult
anv_device_wait(struct anv_device
*device
, struct anv_bo
*bo
,
1313 VkResult
anv_queue_init(struct anv_device
*device
, struct anv_queue
*queue
);
1314 void anv_queue_finish(struct anv_queue
*queue
);
1317 uint64_t anv_gettime_ns(void);
1318 uint64_t anv_get_absolute_timeout(uint64_t timeout
);
1320 void* anv_gem_mmap(struct anv_device
*device
,
1321 uint32_t gem_handle
, uint64_t offset
, uint64_t size
, uint32_t flags
);
1322 void anv_gem_munmap(void *p
, uint64_t size
);
1323 uint32_t anv_gem_create(struct anv_device
*device
, uint64_t size
);
1324 void anv_gem_close(struct anv_device
*device
, uint32_t gem_handle
);
1325 uint32_t anv_gem_userptr(struct anv_device
*device
, void *mem
, size_t size
);
1326 int anv_gem_busy(struct anv_device
*device
, uint32_t gem_handle
);
1327 int anv_gem_wait(struct anv_device
*device
, uint32_t gem_handle
, int64_t *timeout_ns
);
1328 int anv_gem_execbuffer(struct anv_device
*device
,
1329 struct drm_i915_gem_execbuffer2
*execbuf
);
1330 int anv_gem_set_tiling(struct anv_device
*device
, uint32_t gem_handle
,
1331 uint32_t stride
, uint32_t tiling
);
1332 int anv_gem_create_context(struct anv_device
*device
);
1333 bool anv_gem_has_context_priority(int fd
);
1334 int anv_gem_destroy_context(struct anv_device
*device
, int context
);
1335 int anv_gem_set_context_param(int fd
, int context
, uint32_t param
,
1337 int anv_gem_get_context_param(int fd
, int context
, uint32_t param
,
1339 int anv_gem_get_param(int fd
, uint32_t param
);
1340 int anv_gem_get_tiling(struct anv_device
*device
, uint32_t gem_handle
);
1341 bool anv_gem_get_bit6_swizzle(int fd
, uint32_t tiling
);
1342 int anv_gem_get_aperture(int fd
, uint64_t *size
);
1343 int anv_gem_gpu_get_reset_stats(struct anv_device
*device
,
1344 uint32_t *active
, uint32_t *pending
);
1345 int anv_gem_handle_to_fd(struct anv_device
*device
, uint32_t gem_handle
);
1346 int anv_gem_reg_read(struct anv_device
*device
,
1347 uint32_t offset
, uint64_t *result
);
1348 uint32_t anv_gem_fd_to_handle(struct anv_device
*device
, int fd
);
1349 int anv_gem_set_caching(struct anv_device
*device
, uint32_t gem_handle
, uint32_t caching
);
1350 int anv_gem_set_domain(struct anv_device
*device
, uint32_t gem_handle
,
1351 uint32_t read_domains
, uint32_t write_domain
);
1352 int anv_gem_sync_file_merge(struct anv_device
*device
, int fd1
, int fd2
);
1353 uint32_t anv_gem_syncobj_create(struct anv_device
*device
, uint32_t flags
);
1354 void anv_gem_syncobj_destroy(struct anv_device
*device
, uint32_t handle
);
1355 int anv_gem_syncobj_handle_to_fd(struct anv_device
*device
, uint32_t handle
);
1356 uint32_t anv_gem_syncobj_fd_to_handle(struct anv_device
*device
, int fd
);
1357 int anv_gem_syncobj_export_sync_file(struct anv_device
*device
,
1359 int anv_gem_syncobj_import_sync_file(struct anv_device
*device
,
1360 uint32_t handle
, int fd
);
1361 void anv_gem_syncobj_reset(struct anv_device
*device
, uint32_t handle
);
1362 bool anv_gem_supports_syncobj_wait(int fd
);
1363 int anv_gem_syncobj_wait(struct anv_device
*device
,
1364 uint32_t *handles
, uint32_t num_handles
,
1365 int64_t abs_timeout_ns
, bool wait_all
);
1367 bool anv_vma_alloc(struct anv_device
*device
, struct anv_bo
*bo
);
1368 void anv_vma_free(struct anv_device
*device
, struct anv_bo
*bo
);
1370 struct anv_reloc_list
{
1371 uint32_t num_relocs
;
1372 uint32_t array_length
;
1373 struct drm_i915_gem_relocation_entry
* relocs
;
1374 struct anv_bo
** reloc_bos
;
1379 VkResult
anv_reloc_list_init(struct anv_reloc_list
*list
,
1380 const VkAllocationCallbacks
*alloc
);
1381 void anv_reloc_list_finish(struct anv_reloc_list
*list
,
1382 const VkAllocationCallbacks
*alloc
);
1384 VkResult
anv_reloc_list_add(struct anv_reloc_list
*list
,
1385 const VkAllocationCallbacks
*alloc
,
1386 uint32_t offset
, struct anv_bo
*target_bo
,
1387 uint32_t delta
, uint64_t *address_u64_out
);
1389 struct anv_batch_bo
{
1390 /* Link in the anv_cmd_buffer.owned_batch_bos list */
1391 struct list_head link
;
1395 /* Bytes actually consumed in this batch BO */
1398 struct anv_reloc_list relocs
;
1402 const VkAllocationCallbacks
* alloc
;
1408 struct anv_reloc_list
* relocs
;
1410 /* This callback is called (with the associated user data) in the event
1411 * that the batch runs out of space.
1413 VkResult (*extend_cb
)(struct anv_batch
*, void *);
1417 * Current error status of the command buffer. Used to track inconsistent
1418 * or incomplete command buffer states that are the consequence of run-time
1419 * errors such as out of memory scenarios. We want to track this in the
1420 * batch because the command buffer object is not visible to some parts
1426 void *anv_batch_emit_dwords(struct anv_batch
*batch
, int num_dwords
);
1427 void anv_batch_emit_batch(struct anv_batch
*batch
, struct anv_batch
*other
);
1428 uint64_t anv_batch_emit_reloc(struct anv_batch
*batch
,
1429 void *location
, struct anv_bo
*bo
, uint32_t offset
);
1430 VkResult
anv_device_submit_simple_batch(struct anv_device
*device
,
1431 struct anv_batch
*batch
);
1433 static inline VkResult
1434 anv_batch_set_error(struct anv_batch
*batch
, VkResult error
)
1436 assert(error
!= VK_SUCCESS
);
1437 if (batch
->status
== VK_SUCCESS
)
1438 batch
->status
= error
;
1439 return batch
->status
;
1443 anv_batch_has_error(struct anv_batch
*batch
)
1445 return batch
->status
!= VK_SUCCESS
;
1448 struct anv_address
{
1453 #define ANV_NULL_ADDRESS ((struct anv_address) { NULL, 0 })
1456 anv_address_is_null(struct anv_address addr
)
1458 return addr
.bo
== NULL
&& addr
.offset
== 0;
1461 static inline uint64_t
1462 anv_address_physical(struct anv_address addr
)
1464 if (addr
.bo
&& (addr
.bo
->flags
& EXEC_OBJECT_PINNED
))
1465 return gen_canonical_address(addr
.bo
->offset
+ addr
.offset
);
1467 return gen_canonical_address(addr
.offset
);
1470 static inline struct anv_address
1471 anv_address_add(struct anv_address addr
, uint64_t offset
)
1473 addr
.offset
+= offset
;
1478 write_reloc(const struct anv_device
*device
, void *p
, uint64_t v
, bool flush
)
1480 unsigned reloc_size
= 0;
1481 if (device
->info
.gen
>= 8) {
1482 reloc_size
= sizeof(uint64_t);
1483 *(uint64_t *)p
= gen_canonical_address(v
);
1485 reloc_size
= sizeof(uint32_t);
1489 if (flush
&& !device
->info
.has_llc
)
1490 gen_flush_range(p
, reloc_size
);
1493 static inline uint64_t
1494 _anv_combine_address(struct anv_batch
*batch
, void *location
,
1495 const struct anv_address address
, uint32_t delta
)
1497 if (address
.bo
== NULL
) {
1498 return address
.offset
+ delta
;
1500 assert(batch
->start
<= location
&& location
< batch
->end
);
1502 return anv_batch_emit_reloc(batch
, location
, address
.bo
, address
.offset
+ delta
);
1506 #define __gen_address_type struct anv_address
1507 #define __gen_user_data struct anv_batch
1508 #define __gen_combine_address _anv_combine_address
1510 /* Wrapper macros needed to work around preprocessor argument issues. In
1511 * particular, arguments don't get pre-evaluated if they are concatenated.
1512 * This means that, if you pass GENX(3DSTATE_PS) into the emit macro, the
1513 * GENX macro won't get evaluated if the emit macro contains "cmd ## foo".
1514 * We can work around this easily enough with these helpers.
1516 #define __anv_cmd_length(cmd) cmd ## _length
1517 #define __anv_cmd_length_bias(cmd) cmd ## _length_bias
1518 #define __anv_cmd_header(cmd) cmd ## _header
1519 #define __anv_cmd_pack(cmd) cmd ## _pack
1520 #define __anv_reg_num(reg) reg ## _num
1522 #define anv_pack_struct(dst, struc, ...) do { \
1523 struct struc __template = { \
1526 __anv_cmd_pack(struc)(NULL, dst, &__template); \
1527 VG(VALGRIND_CHECK_MEM_IS_DEFINED(dst, __anv_cmd_length(struc) * 4)); \
1530 #define anv_batch_emitn(batch, n, cmd, ...) ({ \
1531 void *__dst = anv_batch_emit_dwords(batch, n); \
1533 struct cmd __template = { \
1534 __anv_cmd_header(cmd), \
1535 .DWordLength = n - __anv_cmd_length_bias(cmd), \
1538 __anv_cmd_pack(cmd)(batch, __dst, &__template); \
1543 #define anv_batch_emit_merge(batch, dwords0, dwords1) \
1547 STATIC_ASSERT(ARRAY_SIZE(dwords0) == ARRAY_SIZE(dwords1)); \
1548 dw = anv_batch_emit_dwords((batch), ARRAY_SIZE(dwords0)); \
1551 for (uint32_t i = 0; i < ARRAY_SIZE(dwords0); i++) \
1552 dw[i] = (dwords0)[i] | (dwords1)[i]; \
1553 VG(VALGRIND_CHECK_MEM_IS_DEFINED(dw, ARRAY_SIZE(dwords0) * 4));\
1556 #define anv_batch_emit(batch, cmd, name) \
1557 for (struct cmd name = { __anv_cmd_header(cmd) }, \
1558 *_dst = anv_batch_emit_dwords(batch, __anv_cmd_length(cmd)); \
1559 __builtin_expect(_dst != NULL, 1); \
1560 ({ __anv_cmd_pack(cmd)(batch, _dst, &name); \
1561 VG(VALGRIND_CHECK_MEM_IS_DEFINED(_dst, __anv_cmd_length(cmd) * 4)); \
1565 /* MEMORY_OBJECT_CONTROL_STATE:
1566 * .GraphicsDataTypeGFDT = 0,
1567 * .LLCCacheabilityControlLLCCC = 0,
1568 * .L3CacheabilityControlL3CC = 1,
1572 /* MEMORY_OBJECT_CONTROL_STATE:
1573 * .LLCeLLCCacheabilityControlLLCCC = 0,
1574 * .L3CacheabilityControlL3CC = 1,
1576 #define GEN75_MOCS 1
1578 /* MEMORY_OBJECT_CONTROL_STATE:
1579 * .MemoryTypeLLCeLLCCacheabilityControl = WB,
1580 * .TargetCache = L3DefertoPATforLLCeLLCselection,
1581 * .AgeforQUADLRU = 0
1583 #define GEN8_MOCS 0x78
1585 /* MEMORY_OBJECT_CONTROL_STATE:
1586 * .MemoryTypeLLCeLLCCacheabilityControl = UCwithFenceifcoherentcycle,
1587 * .TargetCache = L3DefertoPATforLLCeLLCselection,
1588 * .AgeforQUADLRU = 0
1590 #define GEN8_EXTERNAL_MOCS 0x18
1592 /* Skylake: MOCS is now an index into an array of 62 different caching
1593 * configurations programmed by the kernel.
1596 /* TC=LLC/eLLC, LeCC=WB, LRUM=3, L3CC=WB */
1597 #define GEN9_MOCS (2 << 1)
1599 /* TC=LLC/eLLC, LeCC=WB, LRUM=3, L3CC=WB */
1600 #define GEN9_EXTERNAL_MOCS (1 << 1)
1602 /* Cannonlake MOCS defines are duplicates of Skylake MOCS defines. */
1603 #define GEN10_MOCS GEN9_MOCS
1604 #define GEN10_EXTERNAL_MOCS GEN9_EXTERNAL_MOCS
1606 /* Ice Lake MOCS defines are duplicates of Skylake MOCS defines. */
1607 #define GEN11_MOCS GEN9_MOCS
1608 #define GEN11_EXTERNAL_MOCS GEN9_EXTERNAL_MOCS
1610 /* TigerLake MOCS */
1611 #define GEN12_MOCS GEN9_MOCS
1612 /* TC=1/LLC Only, LeCC=1/Uncacheable, LRUM=0, L3CC=1/Uncacheable */
1613 #define GEN12_EXTERNAL_MOCS (3 << 1)
1615 struct anv_device_memory
{
1616 struct list_head link
;
1619 struct anv_memory_type
* type
;
1620 VkDeviceSize map_size
;
1623 /* If set, we are holding reference to AHardwareBuffer
1624 * which we must release when memory is freed.
1626 struct AHardwareBuffer
* ahw
;
1628 /* If set, this memory comes from a host pointer. */
1633 * Header for Vertex URB Entry (VUE)
1635 struct anv_vue_header
{
1637 uint32_t RTAIndex
; /* RenderTargetArrayIndex */
1638 uint32_t ViewportIndex
;
1642 /** Struct representing a sampled image descriptor
1644 * This descriptor layout is used for sampled images, bare sampler, and
1645 * combined image/sampler descriptors.
1647 struct anv_sampled_image_descriptor
{
1648 /** Bindless image handle
1650 * This is expected to already be shifted such that the 20-bit
1651 * SURFACE_STATE table index is in the top 20 bits.
1655 /** Bindless sampler handle
1657 * This is assumed to be a 32B-aligned SAMPLER_STATE pointer relative
1658 * to the dynamic state base address.
1663 struct anv_texture_swizzle_descriptor
{
1666 * See also nir_intrinsic_channel_select_intel
1670 /** Unused padding to ensure the struct is a multiple of 64 bits */
1674 /** Struct representing a storage image descriptor */
1675 struct anv_storage_image_descriptor
{
1676 /** Bindless image handles
1678 * These are expected to already be shifted such that the 20-bit
1679 * SURFACE_STATE table index is in the top 20 bits.
1681 uint32_t read_write
;
1682 uint32_t write_only
;
1685 /** Struct representing a address/range descriptor
1687 * The fields of this struct correspond directly to the data layout of
1688 * nir_address_format_64bit_bounded_global addresses. The last field is the
1689 * offset in the NIR address so it must be zero so that when you load the
1690 * descriptor you get a pointer to the start of the range.
1692 struct anv_address_range_descriptor
{
1698 enum anv_descriptor_data
{
1699 /** The descriptor contains a BTI reference to a surface state */
1700 ANV_DESCRIPTOR_SURFACE_STATE
= (1 << 0),
1701 /** The descriptor contains a BTI reference to a sampler state */
1702 ANV_DESCRIPTOR_SAMPLER_STATE
= (1 << 1),
1703 /** The descriptor contains an actual buffer view */
1704 ANV_DESCRIPTOR_BUFFER_VIEW
= (1 << 2),
1705 /** The descriptor contains auxiliary image layout data */
1706 ANV_DESCRIPTOR_IMAGE_PARAM
= (1 << 3),
1707 /** The descriptor contains auxiliary image layout data */
1708 ANV_DESCRIPTOR_INLINE_UNIFORM
= (1 << 4),
1709 /** anv_address_range_descriptor with a buffer address and range */
1710 ANV_DESCRIPTOR_ADDRESS_RANGE
= (1 << 5),
1711 /** Bindless surface handle */
1712 ANV_DESCRIPTOR_SAMPLED_IMAGE
= (1 << 6),
1713 /** Storage image handles */
1714 ANV_DESCRIPTOR_STORAGE_IMAGE
= (1 << 7),
1715 /** Storage image handles */
1716 ANV_DESCRIPTOR_TEXTURE_SWIZZLE
= (1 << 8),
1719 struct anv_descriptor_set_binding_layout
{
1721 /* The type of the descriptors in this binding */
1722 VkDescriptorType type
;
1725 /* Flags provided when this binding was created */
1726 VkDescriptorBindingFlagsEXT flags
;
1728 /* Bitfield representing the type of data this descriptor contains */
1729 enum anv_descriptor_data data
;
1731 /* Maximum number of YCbCr texture/sampler planes */
1732 uint8_t max_plane_count
;
1734 /* Number of array elements in this binding (or size in bytes for inline
1737 uint16_t array_size
;
1739 /* Index into the flattend descriptor set */
1740 uint16_t descriptor_index
;
1742 /* Index into the dynamic state array for a dynamic buffer */
1743 int16_t dynamic_offset_index
;
1745 /* Index into the descriptor set buffer views */
1746 int16_t buffer_view_index
;
1748 /* Offset into the descriptor buffer where this descriptor lives */
1749 uint32_t descriptor_offset
;
1751 /* Immutable samplers (or NULL if no immutable samplers) */
1752 struct anv_sampler
**immutable_samplers
;
1755 unsigned anv_descriptor_size(const struct anv_descriptor_set_binding_layout
*layout
);
1757 unsigned anv_descriptor_type_size(const struct anv_physical_device
*pdevice
,
1758 VkDescriptorType type
);
1760 bool anv_descriptor_supports_bindless(const struct anv_physical_device
*pdevice
,
1761 const struct anv_descriptor_set_binding_layout
*binding
,
1764 bool anv_descriptor_requires_bindless(const struct anv_physical_device
*pdevice
,
1765 const struct anv_descriptor_set_binding_layout
*binding
,
1768 struct anv_descriptor_set_layout
{
1769 /* Descriptor set layouts can be destroyed at almost any time */
1772 /* Number of bindings in this descriptor set */
1773 uint16_t binding_count
;
1775 /* Total size of the descriptor set with room for all array entries */
1778 /* Shader stages affected by this descriptor set */
1779 uint16_t shader_stages
;
1781 /* Number of buffer views in this descriptor set */
1782 uint16_t buffer_view_count
;
1784 /* Number of dynamic offsets used by this descriptor set */
1785 uint16_t dynamic_offset_count
;
1787 /* Size of the descriptor buffer for this descriptor set */
1788 uint32_t descriptor_buffer_size
;
1790 /* Bindings in this descriptor set */
1791 struct anv_descriptor_set_binding_layout binding
[0];
1795 anv_descriptor_set_layout_ref(struct anv_descriptor_set_layout
*layout
)
1797 assert(layout
&& layout
->ref_cnt
>= 1);
1798 p_atomic_inc(&layout
->ref_cnt
);
1802 anv_descriptor_set_layout_unref(struct anv_device
*device
,
1803 struct anv_descriptor_set_layout
*layout
)
1805 assert(layout
&& layout
->ref_cnt
>= 1);
1806 if (p_atomic_dec_zero(&layout
->ref_cnt
))
1807 vk_free(&device
->alloc
, layout
);
1810 struct anv_descriptor
{
1811 VkDescriptorType type
;
1815 VkImageLayout layout
;
1816 struct anv_image_view
*image_view
;
1817 struct anv_sampler
*sampler
;
1821 struct anv_buffer
*buffer
;
1826 struct anv_buffer_view
*buffer_view
;
1830 struct anv_descriptor_set
{
1831 struct anv_descriptor_pool
*pool
;
1832 struct anv_descriptor_set_layout
*layout
;
1835 /* State relative to anv_descriptor_pool::bo */
1836 struct anv_state desc_mem
;
1837 /* Surface state for the descriptor buffer */
1838 struct anv_state desc_surface_state
;
1840 uint32_t buffer_view_count
;
1841 struct anv_buffer_view
*buffer_views
;
1843 /* Link to descriptor pool's desc_sets list . */
1844 struct list_head pool_link
;
1846 struct anv_descriptor descriptors
[0];
1849 struct anv_buffer_view
{
1850 enum isl_format format
; /**< VkBufferViewCreateInfo::format */
1851 uint64_t range
; /**< VkBufferViewCreateInfo::range */
1853 struct anv_address address
;
1855 struct anv_state surface_state
;
1856 struct anv_state storage_surface_state
;
1857 struct anv_state writeonly_storage_surface_state
;
1859 struct brw_image_param storage_image_param
;
1862 struct anv_push_descriptor_set
{
1863 struct anv_descriptor_set set
;
1865 /* Put this field right behind anv_descriptor_set so it fills up the
1866 * descriptors[0] field. */
1867 struct anv_descriptor descriptors
[MAX_PUSH_DESCRIPTORS
];
1869 /** True if the descriptor set buffer has been referenced by a draw or
1872 bool set_used_on_gpu
;
1874 struct anv_buffer_view buffer_views
[MAX_PUSH_DESCRIPTORS
];
1877 struct anv_descriptor_pool
{
1883 struct util_vma_heap bo_heap
;
1885 struct anv_state_stream surface_state_stream
;
1886 void *surface_state_free_list
;
1888 struct list_head desc_sets
;
1893 enum anv_descriptor_template_entry_type
{
1894 ANV_DESCRIPTOR_TEMPLATE_ENTRY_TYPE_IMAGE
,
1895 ANV_DESCRIPTOR_TEMPLATE_ENTRY_TYPE_BUFFER
,
1896 ANV_DESCRIPTOR_TEMPLATE_ENTRY_TYPE_BUFFER_VIEW
1899 struct anv_descriptor_template_entry
{
1900 /* The type of descriptor in this entry */
1901 VkDescriptorType type
;
1903 /* Binding in the descriptor set */
1906 /* Offset at which to write into the descriptor set binding */
1907 uint32_t array_element
;
1909 /* Number of elements to write into the descriptor set binding */
1910 uint32_t array_count
;
1912 /* Offset into the user provided data */
1915 /* Stride between elements into the user provided data */
1919 struct anv_descriptor_update_template
{
1920 VkPipelineBindPoint bind_point
;
1922 /* The descriptor set this template corresponds to. This value is only
1923 * valid if the template was created with the templateType
1924 * VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET.
1928 /* Number of entries in this template */
1929 uint32_t entry_count
;
1931 /* Entries of the template */
1932 struct anv_descriptor_template_entry entries
[0];
1936 anv_descriptor_set_layout_size(const struct anv_descriptor_set_layout
*layout
);
1939 anv_descriptor_set_write_image_view(struct anv_device
*device
,
1940 struct anv_descriptor_set
*set
,
1941 const VkDescriptorImageInfo
* const info
,
1942 VkDescriptorType type
,
1947 anv_descriptor_set_write_buffer_view(struct anv_device
*device
,
1948 struct anv_descriptor_set
*set
,
1949 VkDescriptorType type
,
1950 struct anv_buffer_view
*buffer_view
,
1955 anv_descriptor_set_write_buffer(struct anv_device
*device
,
1956 struct anv_descriptor_set
*set
,
1957 struct anv_state_stream
*alloc_stream
,
1958 VkDescriptorType type
,
1959 struct anv_buffer
*buffer
,
1962 VkDeviceSize offset
,
1963 VkDeviceSize range
);
1965 anv_descriptor_set_write_inline_uniform_data(struct anv_device
*device
,
1966 struct anv_descriptor_set
*set
,
1973 anv_descriptor_set_write_template(struct anv_device
*device
,
1974 struct anv_descriptor_set
*set
,
1975 struct anv_state_stream
*alloc_stream
,
1976 const struct anv_descriptor_update_template
*template,
1980 anv_descriptor_set_create(struct anv_device
*device
,
1981 struct anv_descriptor_pool
*pool
,
1982 struct anv_descriptor_set_layout
*layout
,
1983 struct anv_descriptor_set
**out_set
);
1986 anv_descriptor_set_destroy(struct anv_device
*device
,
1987 struct anv_descriptor_pool
*pool
,
1988 struct anv_descriptor_set
*set
);
1990 #define ANV_DESCRIPTOR_SET_DESCRIPTORS (UINT8_MAX - 3)
1991 #define ANV_DESCRIPTOR_SET_NUM_WORK_GROUPS (UINT8_MAX - 2)
1992 #define ANV_DESCRIPTOR_SET_SHADER_CONSTANTS (UINT8_MAX - 1)
1993 #define ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS UINT8_MAX
1995 struct anv_pipeline_binding
{
1996 /* The descriptor set this surface corresponds to. The special value of
1997 * ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS indicates that the offset refers
1998 * to a color attachment and not a regular descriptor.
2002 /* Binding in the descriptor set */
2005 /* Index in the binding */
2008 /* Plane in the binding index */
2011 /* Input attachment index (relative to the subpass) */
2012 uint8_t input_attachment_index
;
2014 /* For a storage image, whether it is write-only */
2018 struct anv_pipeline_layout
{
2020 struct anv_descriptor_set_layout
*layout
;
2021 uint32_t dynamic_offset_start
;
2026 unsigned char sha1
[20];
2030 struct anv_device
* device
;
2033 VkBufferUsageFlags usage
;
2035 /* Set when bound */
2036 struct anv_address address
;
2039 static inline uint64_t
2040 anv_buffer_get_range(struct anv_buffer
*buffer
, uint64_t offset
, uint64_t range
)
2042 assert(offset
<= buffer
->size
);
2043 if (range
== VK_WHOLE_SIZE
) {
2044 return buffer
->size
- offset
;
2046 assert(range
+ offset
>= range
);
2047 assert(range
+ offset
<= buffer
->size
);
2052 enum anv_cmd_dirty_bits
{
2053 ANV_CMD_DIRTY_DYNAMIC_VIEWPORT
= 1 << 0, /* VK_DYNAMIC_STATE_VIEWPORT */
2054 ANV_CMD_DIRTY_DYNAMIC_SCISSOR
= 1 << 1, /* VK_DYNAMIC_STATE_SCISSOR */
2055 ANV_CMD_DIRTY_DYNAMIC_LINE_WIDTH
= 1 << 2, /* VK_DYNAMIC_STATE_LINE_WIDTH */
2056 ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS
= 1 << 3, /* VK_DYNAMIC_STATE_DEPTH_BIAS */
2057 ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS
= 1 << 4, /* VK_DYNAMIC_STATE_BLEND_CONSTANTS */
2058 ANV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS
= 1 << 5, /* VK_DYNAMIC_STATE_DEPTH_BOUNDS */
2059 ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK
= 1 << 6, /* VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK */
2060 ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK
= 1 << 7, /* VK_DYNAMIC_STATE_STENCIL_WRITE_MASK */
2061 ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE
= 1 << 8, /* VK_DYNAMIC_STATE_STENCIL_REFERENCE */
2062 ANV_CMD_DIRTY_PIPELINE
= 1 << 9,
2063 ANV_CMD_DIRTY_INDEX_BUFFER
= 1 << 10,
2064 ANV_CMD_DIRTY_RENDER_TARGETS
= 1 << 11,
2065 ANV_CMD_DIRTY_XFB_ENABLE
= 1 << 12,
2066 ANV_CMD_DIRTY_DYNAMIC_LINE_STIPPLE
= 1 << 13, /* VK_DYNAMIC_STATE_LINE_STIPPLE_EXT */
2068 typedef uint32_t anv_cmd_dirty_mask_t
;
2070 #define ANV_CMD_DIRTY_DYNAMIC_ALL \
2071 (ANV_CMD_DIRTY_DYNAMIC_VIEWPORT | \
2072 ANV_CMD_DIRTY_DYNAMIC_SCISSOR | \
2073 ANV_CMD_DIRTY_DYNAMIC_LINE_WIDTH | \
2074 ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS | \
2075 ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS | \
2076 ANV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS | \
2077 ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK | \
2078 ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK | \
2079 ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE | \
2080 ANV_CMD_DIRTY_DYNAMIC_LINE_STIPPLE)
2082 static inline enum anv_cmd_dirty_bits
2083 anv_cmd_dirty_bit_for_vk_dynamic_state(VkDynamicState vk_state
)
2086 case VK_DYNAMIC_STATE_VIEWPORT
:
2087 return ANV_CMD_DIRTY_DYNAMIC_VIEWPORT
;
2088 case VK_DYNAMIC_STATE_SCISSOR
:
2089 return ANV_CMD_DIRTY_DYNAMIC_SCISSOR
;
2090 case VK_DYNAMIC_STATE_LINE_WIDTH
:
2091 return ANV_CMD_DIRTY_DYNAMIC_LINE_WIDTH
;
2092 case VK_DYNAMIC_STATE_DEPTH_BIAS
:
2093 return ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS
;
2094 case VK_DYNAMIC_STATE_BLEND_CONSTANTS
:
2095 return ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS
;
2096 case VK_DYNAMIC_STATE_DEPTH_BOUNDS
:
2097 return ANV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS
;
2098 case VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK
:
2099 return ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK
;
2100 case VK_DYNAMIC_STATE_STENCIL_WRITE_MASK
:
2101 return ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK
;
2102 case VK_DYNAMIC_STATE_STENCIL_REFERENCE
:
2103 return ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE
;
2104 case VK_DYNAMIC_STATE_LINE_STIPPLE_EXT
:
2105 return ANV_CMD_DIRTY_DYNAMIC_LINE_STIPPLE
;
2107 assert(!"Unsupported dynamic state");
2113 enum anv_pipe_bits
{
2114 ANV_PIPE_DEPTH_CACHE_FLUSH_BIT
= (1 << 0),
2115 ANV_PIPE_STALL_AT_SCOREBOARD_BIT
= (1 << 1),
2116 ANV_PIPE_STATE_CACHE_INVALIDATE_BIT
= (1 << 2),
2117 ANV_PIPE_CONSTANT_CACHE_INVALIDATE_BIT
= (1 << 3),
2118 ANV_PIPE_VF_CACHE_INVALIDATE_BIT
= (1 << 4),
2119 ANV_PIPE_DATA_CACHE_FLUSH_BIT
= (1 << 5),
2120 ANV_PIPE_TILE_CACHE_FLUSH_BIT
= (1 << 6),
2121 ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT
= (1 << 10),
2122 ANV_PIPE_INSTRUCTION_CACHE_INVALIDATE_BIT
= (1 << 11),
2123 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT
= (1 << 12),
2124 ANV_PIPE_DEPTH_STALL_BIT
= (1 << 13),
2125 ANV_PIPE_CS_STALL_BIT
= (1 << 20),
2127 /* This bit does not exist directly in PIPE_CONTROL. Instead it means that
2128 * a flush has happened but not a CS stall. The next time we do any sort
2129 * of invalidation we need to insert a CS stall at that time. Otherwise,
2130 * we would have to CS stall on every flush which could be bad.
2132 ANV_PIPE_NEEDS_CS_STALL_BIT
= (1 << 21),
2134 /* This bit does not exist directly in PIPE_CONTROL. It means that render
2135 * target operations related to transfer commands with VkBuffer as
2136 * destination are ongoing. Some operations like copies on the command
2137 * streamer might need to be aware of this to trigger the appropriate stall
2138 * before they can proceed with the copy.
2140 ANV_PIPE_RENDER_TARGET_BUFFER_WRITES
= (1 << 22),
2143 #define ANV_PIPE_FLUSH_BITS ( \
2144 ANV_PIPE_DEPTH_CACHE_FLUSH_BIT | \
2145 ANV_PIPE_DATA_CACHE_FLUSH_BIT | \
2146 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT | \
2147 ANV_PIPE_TILE_CACHE_FLUSH_BIT)
2149 #define ANV_PIPE_STALL_BITS ( \
2150 ANV_PIPE_STALL_AT_SCOREBOARD_BIT | \
2151 ANV_PIPE_DEPTH_STALL_BIT | \
2152 ANV_PIPE_CS_STALL_BIT)
2154 #define ANV_PIPE_INVALIDATE_BITS ( \
2155 ANV_PIPE_STATE_CACHE_INVALIDATE_BIT | \
2156 ANV_PIPE_CONSTANT_CACHE_INVALIDATE_BIT | \
2157 ANV_PIPE_VF_CACHE_INVALIDATE_BIT | \
2158 ANV_PIPE_DATA_CACHE_FLUSH_BIT | \
2159 ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT | \
2160 ANV_PIPE_INSTRUCTION_CACHE_INVALIDATE_BIT)
2162 static inline enum anv_pipe_bits
2163 anv_pipe_flush_bits_for_access_flags(VkAccessFlags flags
)
2165 enum anv_pipe_bits pipe_bits
= 0;
2168 for_each_bit(b
, flags
) {
2169 switch ((VkAccessFlagBits
)(1 << b
)) {
2170 case VK_ACCESS_SHADER_WRITE_BIT
:
2171 /* We're transitioning a buffer that was previously used as write
2172 * destination through the data port. To make its content available
2173 * to future operations, flush the data cache.
2175 pipe_bits
|= ANV_PIPE_DATA_CACHE_FLUSH_BIT
;
2177 case VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT
:
2178 /* We're transitioning a buffer that was previously used as render
2179 * target. To make its content available to future operations, flush
2180 * the render target cache.
2182 pipe_bits
|= ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT
;
2184 case VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT
:
2185 /* We're transitioning a buffer that was previously used as depth
2186 * buffer. To make its content available to future operations, flush
2189 pipe_bits
|= ANV_PIPE_DEPTH_CACHE_FLUSH_BIT
;
2191 case VK_ACCESS_TRANSFER_WRITE_BIT
:
2192 /* We're transitioning a buffer that was previously used as a
2193 * transfer write destination. Generic write operations include color
2194 * & depth operations as well as buffer operations like :
2195 * - vkCmdClearColorImage()
2196 * - vkCmdClearDepthStencilImage()
2197 * - vkCmdBlitImage()
2198 * - vkCmdCopy*(), vkCmdUpdate*(), vkCmdFill*()
2200 * Most of these operations are implemented using Blorp which writes
2201 * through the render target, so flush that cache to make it visible
2202 * to future operations. And for depth related operations we also
2203 * need to flush the depth cache.
2205 pipe_bits
|= ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT
;
2206 pipe_bits
|= ANV_PIPE_DEPTH_CACHE_FLUSH_BIT
;
2208 case VK_ACCESS_MEMORY_WRITE_BIT
:
2209 /* We're transitioning a buffer for generic write operations. Flush
2212 pipe_bits
|= ANV_PIPE_FLUSH_BITS
;
2215 break; /* Nothing to do */
2222 static inline enum anv_pipe_bits
2223 anv_pipe_invalidate_bits_for_access_flags(VkAccessFlags flags
)
2225 enum anv_pipe_bits pipe_bits
= 0;
2228 for_each_bit(b
, flags
) {
2229 switch ((VkAccessFlagBits
)(1 << b
)) {
2230 case VK_ACCESS_INDIRECT_COMMAND_READ_BIT
:
2231 /* Indirect draw commands take a buffer as input that we're going to
2232 * read from the command streamer to load some of the HW registers
2233 * (see genX_cmd_buffer.c:load_indirect_parameters). This requires a
2234 * command streamer stall so that all the cache flushes have
2235 * completed before the command streamer loads from memory.
2237 pipe_bits
|= ANV_PIPE_CS_STALL_BIT
;
2238 /* Indirect draw commands also set gl_BaseVertex & gl_BaseIndex
2239 * through a vertex buffer, so invalidate that cache.
2241 pipe_bits
|= ANV_PIPE_VF_CACHE_INVALIDATE_BIT
;
2242 /* For CmdDipatchIndirect, we also load gl_NumWorkGroups through a
2243 * UBO from the buffer, so we need to invalidate constant cache.
2245 pipe_bits
|= ANV_PIPE_CONSTANT_CACHE_INVALIDATE_BIT
;
2247 case VK_ACCESS_INDEX_READ_BIT
:
2248 case VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT
:
2249 /* We transitioning a buffer to be used for as input for vkCmdDraw*
2250 * commands, so we invalidate the VF cache to make sure there is no
2251 * stale data when we start rendering.
2253 pipe_bits
|= ANV_PIPE_VF_CACHE_INVALIDATE_BIT
;
2255 case VK_ACCESS_UNIFORM_READ_BIT
:
2256 /* We transitioning a buffer to be used as uniform data. Because
2257 * uniform is accessed through the data port & sampler, we need to
2258 * invalidate the texture cache (sampler) & constant cache (data
2259 * port) to avoid stale data.
2261 pipe_bits
|= ANV_PIPE_CONSTANT_CACHE_INVALIDATE_BIT
;
2262 pipe_bits
|= ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT
;
2264 case VK_ACCESS_SHADER_READ_BIT
:
2265 case VK_ACCESS_INPUT_ATTACHMENT_READ_BIT
:
2266 case VK_ACCESS_TRANSFER_READ_BIT
:
2267 /* Transitioning a buffer to be read through the sampler, so
2268 * invalidate the texture cache, we don't want any stale data.
2270 pipe_bits
|= ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT
;
2272 case VK_ACCESS_MEMORY_READ_BIT
:
2273 /* Transitioning a buffer for generic read, invalidate all the
2276 pipe_bits
|= ANV_PIPE_INVALIDATE_BITS
;
2278 case VK_ACCESS_MEMORY_WRITE_BIT
:
2279 /* Generic write, make sure all previously written things land in
2282 pipe_bits
|= ANV_PIPE_FLUSH_BITS
;
2284 case VK_ACCESS_CONDITIONAL_RENDERING_READ_BIT_EXT
:
2285 /* Transitioning a buffer for conditional rendering. We'll load the
2286 * content of this buffer into HW registers using the command
2287 * streamer, so we need to stall the command streamer to make sure
2288 * any in-flight flush operations have completed.
2290 pipe_bits
|= ANV_PIPE_CS_STALL_BIT
;
2293 break; /* Nothing to do */
2300 #define VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV ( \
2301 VK_IMAGE_ASPECT_COLOR_BIT | \
2302 VK_IMAGE_ASPECT_PLANE_0_BIT | \
2303 VK_IMAGE_ASPECT_PLANE_1_BIT | \
2304 VK_IMAGE_ASPECT_PLANE_2_BIT)
2305 #define VK_IMAGE_ASPECT_PLANES_BITS_ANV ( \
2306 VK_IMAGE_ASPECT_PLANE_0_BIT | \
2307 VK_IMAGE_ASPECT_PLANE_1_BIT | \
2308 VK_IMAGE_ASPECT_PLANE_2_BIT)
2310 struct anv_vertex_binding
{
2311 struct anv_buffer
* buffer
;
2312 VkDeviceSize offset
;
2315 struct anv_xfb_binding
{
2316 struct anv_buffer
* buffer
;
2317 VkDeviceSize offset
;
2321 #define ANV_PARAM_PUSH(offset) ((1 << 16) | (uint32_t)(offset))
2322 #define ANV_PARAM_IS_PUSH(param) ((uint32_t)(param) >> 16 == 1)
2323 #define ANV_PARAM_PUSH_OFFSET(param) ((param) & 0xffff)
2325 #define ANV_PARAM_DYN_OFFSET(offset) ((2 << 16) | (uint32_t)(offset))
2326 #define ANV_PARAM_IS_DYN_OFFSET(param) ((uint32_t)(param) >> 16 == 2)
2327 #define ANV_PARAM_DYN_OFFSET_IDX(param) ((param) & 0xffff)
2329 struct anv_push_constants
{
2330 /* Push constant data provided by the client through vkPushConstants */
2331 uint8_t client_data
[MAX_PUSH_CONSTANTS_SIZE
];
2333 /* Used for vkCmdDispatchBase */
2334 uint32_t base_work_group_id
[3];
2337 struct anv_dynamic_state
{
2340 VkViewport viewports
[MAX_VIEWPORTS
];
2345 VkRect2D scissors
[MAX_SCISSORS
];
2356 float blend_constants
[4];
2366 } stencil_compare_mask
;
2371 } stencil_write_mask
;
2376 } stencil_reference
;
2384 extern const struct anv_dynamic_state default_dynamic_state
;
2386 uint32_t anv_dynamic_state_copy(struct anv_dynamic_state
*dest
,
2387 const struct anv_dynamic_state
*src
,
2388 uint32_t copy_mask
);
2390 struct anv_surface_state
{
2391 struct anv_state state
;
2392 /** Address of the surface referred to by this state
2394 * This address is relative to the start of the BO.
2396 struct anv_address address
;
2397 /* Address of the aux surface, if any
2399 * This field is ANV_NULL_ADDRESS if and only if no aux surface exists.
2401 * With the exception of gen8, the bottom 12 bits of this address' offset
2402 * include extra aux information.
2404 struct anv_address aux_address
;
2405 /* Address of the clear color, if any
2407 * This address is relative to the start of the BO.
2409 struct anv_address clear_address
;
2413 * Attachment state when recording a renderpass instance.
2415 * The clear value is valid only if there exists a pending clear.
2417 struct anv_attachment_state
{
2418 enum isl_aux_usage aux_usage
;
2419 enum isl_aux_usage input_aux_usage
;
2420 struct anv_surface_state color
;
2421 struct anv_surface_state input
;
2423 VkImageLayout current_layout
;
2424 VkImageLayout current_stencil_layout
;
2425 VkImageAspectFlags pending_clear_aspects
;
2426 VkImageAspectFlags pending_load_aspects
;
2428 VkClearValue clear_value
;
2429 bool clear_color_is_zero_one
;
2430 bool clear_color_is_zero
;
2432 /* When multiview is active, attachments with a renderpass clear
2433 * operation have their respective layers cleared on the first
2434 * subpass that uses them, and only in that subpass. We keep track
2435 * of this using a bitfield to indicate which layers of an attachment
2436 * have not been cleared yet when multiview is active.
2438 uint32_t pending_clear_views
;
2439 struct anv_image_view
* image_view
;
2442 /** State tracking for particular pipeline bind point
2444 * This struct is the base struct for anv_cmd_graphics_state and
2445 * anv_cmd_compute_state. These are used to track state which is bound to a
2446 * particular type of pipeline. Generic state that applies per-stage such as
2447 * binding table offsets and push constants is tracked generically with a
2448 * per-stage array in anv_cmd_state.
2450 struct anv_cmd_pipeline_state
{
2451 struct anv_pipeline
*pipeline
;
2452 struct anv_pipeline_layout
*layout
;
2454 struct anv_descriptor_set
*descriptors
[MAX_SETS
];
2455 uint32_t dynamic_offsets
[MAX_DYNAMIC_BUFFERS
];
2457 struct anv_push_descriptor_set
*push_descriptors
[MAX_SETS
];
2460 /** State tracking for graphics pipeline
2462 * This has anv_cmd_pipeline_state as a base struct to track things which get
2463 * bound to a graphics pipeline. Along with general pipeline bind point state
2464 * which is in the anv_cmd_pipeline_state base struct, it also contains other
2465 * state which is graphics-specific.
2467 struct anv_cmd_graphics_state
{
2468 struct anv_cmd_pipeline_state base
;
2470 anv_cmd_dirty_mask_t dirty
;
2473 struct anv_dynamic_state dynamic
;
2476 struct anv_buffer
*index_buffer
;
2477 uint32_t index_type
; /**< 3DSTATE_INDEX_BUFFER.IndexFormat */
2478 uint32_t index_offset
;
2482 /** State tracking for compute pipeline
2484 * This has anv_cmd_pipeline_state as a base struct to track things which get
2485 * bound to a compute pipeline. Along with general pipeline bind point state
2486 * which is in the anv_cmd_pipeline_state base struct, it also contains other
2487 * state which is compute-specific.
2489 struct anv_cmd_compute_state
{
2490 struct anv_cmd_pipeline_state base
;
2492 bool pipeline_dirty
;
2494 struct anv_address num_workgroups
;
2497 /** State required while building cmd buffer */
2498 struct anv_cmd_state
{
2499 /* PIPELINE_SELECT.PipelineSelection */
2500 uint32_t current_pipeline
;
2501 const struct gen_l3_config
* current_l3_config
;
2502 uint32_t last_aux_map_state
;
2504 struct anv_cmd_graphics_state gfx
;
2505 struct anv_cmd_compute_state compute
;
2507 enum anv_pipe_bits pending_pipe_bits
;
2508 VkShaderStageFlags descriptors_dirty
;
2509 VkShaderStageFlags push_constants_dirty
;
2511 struct anv_framebuffer
* framebuffer
;
2512 struct anv_render_pass
* pass
;
2513 struct anv_subpass
* subpass
;
2514 VkRect2D render_area
;
2515 uint32_t restart_index
;
2516 struct anv_vertex_binding vertex_bindings
[MAX_VBS
];
2518 struct anv_xfb_binding xfb_bindings
[MAX_XFB_BUFFERS
];
2519 VkShaderStageFlags push_constant_stages
;
2520 struct anv_push_constants push_constants
[MESA_SHADER_STAGES
];
2521 struct anv_state binding_tables
[MESA_SHADER_STAGES
];
2522 struct anv_state samplers
[MESA_SHADER_STAGES
];
2525 * Whether or not the gen8 PMA fix is enabled. We ensure that, at the top
2526 * of any command buffer it is disabled by disabling it in EndCommandBuffer
2527 * and before invoking the secondary in ExecuteCommands.
2529 bool pma_fix_enabled
;
2532 * Whether or not we know for certain that HiZ is enabled for the current
2533 * subpass. If, for whatever reason, we are unsure as to whether HiZ is
2534 * enabled or not, this will be false.
2538 bool conditional_render_enabled
;
2541 * Last rendering scale argument provided to
2542 * genX(cmd_buffer_emit_hashing_mode)().
2544 unsigned current_hash_scale
;
2547 * Array length is anv_cmd_state::pass::attachment_count. Array content is
2548 * valid only when recording a render pass instance.
2550 struct anv_attachment_state
* attachments
;
2553 * Surface states for color render targets. These are stored in a single
2554 * flat array. For depth-stencil attachments, the surface state is simply
2557 struct anv_state render_pass_states
;
2560 * A null surface state of the right size to match the framebuffer. This
2561 * is one of the states in render_pass_states.
2563 struct anv_state null_surface_state
;
2566 struct anv_cmd_pool
{
2567 VkAllocationCallbacks alloc
;
2568 struct list_head cmd_buffers
;
2571 #define ANV_CMD_BUFFER_BATCH_SIZE 8192
2573 enum anv_cmd_buffer_exec_mode
{
2574 ANV_CMD_BUFFER_EXEC_MODE_PRIMARY
,
2575 ANV_CMD_BUFFER_EXEC_MODE_EMIT
,
2576 ANV_CMD_BUFFER_EXEC_MODE_GROW_AND_EMIT
,
2577 ANV_CMD_BUFFER_EXEC_MODE_CHAIN
,
2578 ANV_CMD_BUFFER_EXEC_MODE_COPY_AND_CHAIN
,
2581 struct anv_cmd_buffer
{
2582 VK_LOADER_DATA _loader_data
;
2584 struct anv_device
* device
;
2586 struct anv_cmd_pool
* pool
;
2587 struct list_head pool_link
;
2589 struct anv_batch batch
;
2591 /* Fields required for the actual chain of anv_batch_bo's.
2593 * These fields are initialized by anv_cmd_buffer_init_batch_bo_chain().
2595 struct list_head batch_bos
;
2596 enum anv_cmd_buffer_exec_mode exec_mode
;
2598 /* A vector of anv_batch_bo pointers for every batch or surface buffer
2599 * referenced by this command buffer
2601 * initialized by anv_cmd_buffer_init_batch_bo_chain()
2603 struct u_vector seen_bbos
;
2605 /* A vector of int32_t's for every block of binding tables.
2607 * initialized by anv_cmd_buffer_init_batch_bo_chain()
2609 struct u_vector bt_block_states
;
2612 struct anv_reloc_list surface_relocs
;
2613 /** Last seen surface state block pool center bo offset */
2614 uint32_t last_ss_pool_center
;
2616 /* Serial for tracking buffer completion */
2619 /* Stream objects for storing temporary data */
2620 struct anv_state_stream surface_state_stream
;
2621 struct anv_state_stream dynamic_state_stream
;
2623 VkCommandBufferUsageFlags usage_flags
;
2624 VkCommandBufferLevel level
;
2626 struct anv_cmd_state state
;
2628 /* Set by SetPerformanceMarkerINTEL, written into queries by CmdBeginQuery */
2629 uint64_t intel_perf_marker
;
2632 VkResult
anv_cmd_buffer_init_batch_bo_chain(struct anv_cmd_buffer
*cmd_buffer
);
2633 void anv_cmd_buffer_fini_batch_bo_chain(struct anv_cmd_buffer
*cmd_buffer
);
2634 void anv_cmd_buffer_reset_batch_bo_chain(struct anv_cmd_buffer
*cmd_buffer
);
2635 void anv_cmd_buffer_end_batch_buffer(struct anv_cmd_buffer
*cmd_buffer
);
2636 void anv_cmd_buffer_add_secondary(struct anv_cmd_buffer
*primary
,
2637 struct anv_cmd_buffer
*secondary
);
2638 void anv_cmd_buffer_prepare_execbuf(struct anv_cmd_buffer
*cmd_buffer
);
2639 VkResult
anv_cmd_buffer_execbuf(struct anv_device
*device
,
2640 struct anv_cmd_buffer
*cmd_buffer
,
2641 const VkSemaphore
*in_semaphores
,
2642 uint32_t num_in_semaphores
,
2643 const VkSemaphore
*out_semaphores
,
2644 uint32_t num_out_semaphores
,
2647 VkResult
anv_cmd_buffer_reset(struct anv_cmd_buffer
*cmd_buffer
);
2649 struct anv_state
anv_cmd_buffer_emit_dynamic(struct anv_cmd_buffer
*cmd_buffer
,
2650 const void *data
, uint32_t size
, uint32_t alignment
);
2651 struct anv_state
anv_cmd_buffer_merge_dynamic(struct anv_cmd_buffer
*cmd_buffer
,
2652 uint32_t *a
, uint32_t *b
,
2653 uint32_t dwords
, uint32_t alignment
);
2656 anv_cmd_buffer_surface_base_address(struct anv_cmd_buffer
*cmd_buffer
);
2658 anv_cmd_buffer_alloc_binding_table(struct anv_cmd_buffer
*cmd_buffer
,
2659 uint32_t entries
, uint32_t *state_offset
);
2661 anv_cmd_buffer_alloc_surface_state(struct anv_cmd_buffer
*cmd_buffer
);
2663 anv_cmd_buffer_alloc_dynamic_state(struct anv_cmd_buffer
*cmd_buffer
,
2664 uint32_t size
, uint32_t alignment
);
2667 anv_cmd_buffer_new_binding_table_block(struct anv_cmd_buffer
*cmd_buffer
);
2669 void gen8_cmd_buffer_emit_viewport(struct anv_cmd_buffer
*cmd_buffer
);
2670 void gen8_cmd_buffer_emit_depth_viewport(struct anv_cmd_buffer
*cmd_buffer
,
2671 bool depth_clamp_enable
);
2672 void gen7_cmd_buffer_emit_scissor(struct anv_cmd_buffer
*cmd_buffer
);
2674 void anv_cmd_buffer_setup_attachments(struct anv_cmd_buffer
*cmd_buffer
,
2675 struct anv_render_pass
*pass
,
2676 struct anv_framebuffer
*framebuffer
,
2677 const VkClearValue
*clear_values
);
2679 void anv_cmd_buffer_emit_state_base_address(struct anv_cmd_buffer
*cmd_buffer
);
2682 anv_cmd_buffer_push_constants(struct anv_cmd_buffer
*cmd_buffer
,
2683 gl_shader_stage stage
);
2685 anv_cmd_buffer_cs_push_constants(struct anv_cmd_buffer
*cmd_buffer
);
2687 const struct anv_image_view
*
2688 anv_cmd_buffer_get_depth_stencil_view(const struct anv_cmd_buffer
*cmd_buffer
);
2691 anv_cmd_buffer_alloc_blorp_binding_table(struct anv_cmd_buffer
*cmd_buffer
,
2692 uint32_t num_entries
,
2693 uint32_t *state_offset
,
2694 struct anv_state
*bt_state
);
2696 void anv_cmd_buffer_dump(struct anv_cmd_buffer
*cmd_buffer
);
2698 void anv_cmd_emit_conditional_render_predicate(struct anv_cmd_buffer
*cmd_buffer
);
2700 enum anv_fence_type
{
2701 ANV_FENCE_TYPE_NONE
= 0,
2703 ANV_FENCE_TYPE_SYNCOBJ
,
2707 enum anv_bo_fence_state
{
2708 /** Indicates that this is a new (or newly reset fence) */
2709 ANV_BO_FENCE_STATE_RESET
,
2711 /** Indicates that this fence has been submitted to the GPU but is still
2712 * (as far as we know) in use by the GPU.
2714 ANV_BO_FENCE_STATE_SUBMITTED
,
2716 ANV_BO_FENCE_STATE_SIGNALED
,
2719 struct anv_fence_impl
{
2720 enum anv_fence_type type
;
2723 /** Fence implementation for BO fences
2725 * These fences use a BO and a set of CPU-tracked state flags. The BO
2726 * is added to the object list of the last execbuf call in a QueueSubmit
2727 * and is marked EXEC_WRITE. The state flags track when the BO has been
2728 * submitted to the kernel. We need to do this because Vulkan lets you
2729 * wait on a fence that has not yet been submitted and I915_GEM_BUSY
2730 * will say it's idle in this case.
2734 enum anv_bo_fence_state state
;
2737 /** DRM syncobj handle for syncobj-based fences */
2741 struct wsi_fence
*fence_wsi
;
2746 /* Permanent fence state. Every fence has some form of permanent state
2747 * (type != ANV_SEMAPHORE_TYPE_NONE). This may be a BO to fence on (for
2748 * cross-process fences) or it could just be a dummy for use internally.
2750 struct anv_fence_impl permanent
;
2752 /* Temporary fence state. A fence *may* have temporary state. That state
2753 * is added to the fence by an import operation and is reset back to
2754 * ANV_SEMAPHORE_TYPE_NONE when the fence is reset. A fence with temporary
2755 * state cannot be signaled because the fence must already be signaled
2756 * before the temporary state can be exported from the fence in the other
2757 * process and imported here.
2759 struct anv_fence_impl temporary
;
2764 struct anv_state state
;
2767 enum anv_semaphore_type
{
2768 ANV_SEMAPHORE_TYPE_NONE
= 0,
2769 ANV_SEMAPHORE_TYPE_DUMMY
,
2770 ANV_SEMAPHORE_TYPE_BO
,
2771 ANV_SEMAPHORE_TYPE_SYNC_FILE
,
2772 ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ
,
2775 struct anv_semaphore_impl
{
2776 enum anv_semaphore_type type
;
2779 /* A BO representing this semaphore when type == ANV_SEMAPHORE_TYPE_BO.
2780 * This BO will be added to the object list on any execbuf2 calls for
2781 * which this semaphore is used as a wait or signal fence. When used as
2782 * a signal fence, the EXEC_OBJECT_WRITE flag will be set.
2786 /* The sync file descriptor when type == ANV_SEMAPHORE_TYPE_SYNC_FILE.
2787 * If the semaphore is in the unsignaled state due to either just being
2788 * created or because it has been used for a wait, fd will be -1.
2792 /* Sync object handle when type == ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ.
2793 * Unlike GEM BOs, DRM sync objects aren't deduplicated by the kernel on
2794 * import so we don't need to bother with a userspace cache.
2800 struct anv_semaphore
{
2801 /* Permanent semaphore state. Every semaphore has some form of permanent
2802 * state (type != ANV_SEMAPHORE_TYPE_NONE). This may be a BO to fence on
2803 * (for cross-process semaphores0 or it could just be a dummy for use
2806 struct anv_semaphore_impl permanent
;
2808 /* Temporary semaphore state. A semaphore *may* have temporary state.
2809 * That state is added to the semaphore by an import operation and is reset
2810 * back to ANV_SEMAPHORE_TYPE_NONE when the semaphore is waited on. A
2811 * semaphore with temporary state cannot be signaled because the semaphore
2812 * must already be signaled before the temporary state can be exported from
2813 * the semaphore in the other process and imported here.
2815 struct anv_semaphore_impl temporary
;
2818 void anv_semaphore_reset_temporary(struct anv_device
*device
,
2819 struct anv_semaphore
*semaphore
);
2821 struct anv_shader_module
{
2822 unsigned char sha1
[20];
2827 static inline gl_shader_stage
2828 vk_to_mesa_shader_stage(VkShaderStageFlagBits vk_stage
)
2830 assert(__builtin_popcount(vk_stage
) == 1);
2831 return ffs(vk_stage
) - 1;
2834 static inline VkShaderStageFlagBits
2835 mesa_to_vk_shader_stage(gl_shader_stage mesa_stage
)
2837 return (1 << mesa_stage
);
2840 #define ANV_STAGE_MASK ((1 << MESA_SHADER_STAGES) - 1)
2842 #define anv_foreach_stage(stage, stage_bits) \
2843 for (gl_shader_stage stage, \
2844 __tmp = (gl_shader_stage)((stage_bits) & ANV_STAGE_MASK); \
2845 stage = __builtin_ffs(__tmp) - 1, __tmp; \
2846 __tmp &= ~(1 << (stage)))
2848 struct anv_pipeline_bind_map
{
2849 uint32_t surface_count
;
2850 uint32_t sampler_count
;
2852 struct anv_pipeline_binding
* surface_to_descriptor
;
2853 struct anv_pipeline_binding
* sampler_to_descriptor
;
2856 struct anv_shader_bin_key
{
2861 struct anv_shader_bin
{
2864 const struct anv_shader_bin_key
*key
;
2866 struct anv_state kernel
;
2867 uint32_t kernel_size
;
2869 struct anv_state constant_data
;
2870 uint32_t constant_data_size
;
2872 const struct brw_stage_prog_data
*prog_data
;
2873 uint32_t prog_data_size
;
2875 struct brw_compile_stats stats
[3];
2878 struct nir_xfb_info
*xfb_info
;
2880 struct anv_pipeline_bind_map bind_map
;
2883 struct anv_shader_bin
*
2884 anv_shader_bin_create(struct anv_device
*device
,
2885 const void *key
, uint32_t key_size
,
2886 const void *kernel
, uint32_t kernel_size
,
2887 const void *constant_data
, uint32_t constant_data_size
,
2888 const struct brw_stage_prog_data
*prog_data
,
2889 uint32_t prog_data_size
, const void *prog_data_param
,
2890 const struct brw_compile_stats
*stats
, uint32_t num_stats
,
2891 const struct nir_xfb_info
*xfb_info
,
2892 const struct anv_pipeline_bind_map
*bind_map
);
2895 anv_shader_bin_destroy(struct anv_device
*device
, struct anv_shader_bin
*shader
);
2898 anv_shader_bin_ref(struct anv_shader_bin
*shader
)
2900 assert(shader
&& shader
->ref_cnt
>= 1);
2901 p_atomic_inc(&shader
->ref_cnt
);
2905 anv_shader_bin_unref(struct anv_device
*device
, struct anv_shader_bin
*shader
)
2907 assert(shader
&& shader
->ref_cnt
>= 1);
2908 if (p_atomic_dec_zero(&shader
->ref_cnt
))
2909 anv_shader_bin_destroy(device
, shader
);
2912 /* 5 possible simultaneous shader stages and FS may have up to 3 binaries */
2913 #define MAX_PIPELINE_EXECUTABLES 7
2915 struct anv_pipeline_executable
{
2916 gl_shader_stage stage
;
2918 struct brw_compile_stats stats
;
2924 struct anv_pipeline
{
2925 struct anv_device
* device
;
2926 struct anv_batch batch
;
2927 uint32_t batch_data
[512];
2928 struct anv_reloc_list batch_relocs
;
2929 anv_cmd_dirty_mask_t dynamic_state_mask
;
2930 struct anv_dynamic_state dynamic_state
;
2934 VkPipelineCreateFlags flags
;
2935 struct anv_subpass
* subpass
;
2937 bool needs_data_cache
;
2939 struct anv_shader_bin
* shaders
[MESA_SHADER_STAGES
];
2941 uint32_t num_executables
;
2942 struct anv_pipeline_executable executables
[MAX_PIPELINE_EXECUTABLES
];
2945 const struct gen_l3_config
* l3_config
;
2946 uint32_t total_size
;
2949 VkShaderStageFlags active_stages
;
2950 struct anv_state blend_state
;
2953 struct anv_pipeline_vertex_binding
{
2956 uint32_t instance_divisor
;
2961 bool primitive_restart
;
2964 uint32_t cs_right_mask
;
2967 bool depth_test_enable
;
2968 bool writes_stencil
;
2969 bool stencil_test_enable
;
2970 bool depth_clamp_enable
;
2971 bool depth_clip_enable
;
2972 bool sample_shading_enable
;
2974 bool depth_bounds_test_enable
;
2978 uint32_t depth_stencil_state
[3];
2984 uint32_t wm_depth_stencil
[3];
2988 uint32_t wm_depth_stencil
[4];
2991 uint32_t interface_descriptor_data
[8];
2995 anv_pipeline_has_stage(const struct anv_pipeline
*pipeline
,
2996 gl_shader_stage stage
)
2998 return (pipeline
->active_stages
& mesa_to_vk_shader_stage(stage
)) != 0;
3001 #define ANV_DECL_GET_PROG_DATA_FUNC(prefix, stage) \
3002 static inline const struct brw_##prefix##_prog_data * \
3003 get_##prefix##_prog_data(const struct anv_pipeline *pipeline) \
3005 if (anv_pipeline_has_stage(pipeline, stage)) { \
3006 return (const struct brw_##prefix##_prog_data *) \
3007 pipeline->shaders[stage]->prog_data; \
3013 ANV_DECL_GET_PROG_DATA_FUNC(vs
, MESA_SHADER_VERTEX
)
3014 ANV_DECL_GET_PROG_DATA_FUNC(tcs
, MESA_SHADER_TESS_CTRL
)
3015 ANV_DECL_GET_PROG_DATA_FUNC(tes
, MESA_SHADER_TESS_EVAL
)
3016 ANV_DECL_GET_PROG_DATA_FUNC(gs
, MESA_SHADER_GEOMETRY
)
3017 ANV_DECL_GET_PROG_DATA_FUNC(wm
, MESA_SHADER_FRAGMENT
)
3018 ANV_DECL_GET_PROG_DATA_FUNC(cs
, MESA_SHADER_COMPUTE
)
3020 static inline const struct brw_vue_prog_data
*
3021 anv_pipeline_get_last_vue_prog_data(const struct anv_pipeline
*pipeline
)
3023 if (anv_pipeline_has_stage(pipeline
, MESA_SHADER_GEOMETRY
))
3024 return &get_gs_prog_data(pipeline
)->base
;
3025 else if (anv_pipeline_has_stage(pipeline
, MESA_SHADER_TESS_EVAL
))
3026 return &get_tes_prog_data(pipeline
)->base
;
3028 return &get_vs_prog_data(pipeline
)->base
;
3032 anv_pipeline_init(struct anv_pipeline
*pipeline
, struct anv_device
*device
,
3033 struct anv_pipeline_cache
*cache
,
3034 const VkGraphicsPipelineCreateInfo
*pCreateInfo
,
3035 const VkAllocationCallbacks
*alloc
);
3038 anv_pipeline_compile_cs(struct anv_pipeline
*pipeline
,
3039 struct anv_pipeline_cache
*cache
,
3040 const VkComputePipelineCreateInfo
*info
,
3041 const struct anv_shader_module
*module
,
3042 const char *entrypoint
,
3043 const VkSpecializationInfo
*spec_info
);
3045 struct anv_format_plane
{
3046 enum isl_format isl_format
:16;
3047 struct isl_swizzle swizzle
;
3049 /* Whether this plane contains chroma channels */
3052 /* For downscaling of YUV planes */
3053 uint8_t denominator_scales
[2];
3055 /* How to map sampled ycbcr planes to a single 4 component element. */
3056 struct isl_swizzle ycbcr_swizzle
;
3058 /* What aspect is associated to this plane */
3059 VkImageAspectFlags aspect
;
3064 struct anv_format_plane planes
[3];
3070 static inline uint32_t
3071 anv_image_aspect_to_plane(VkImageAspectFlags image_aspects
,
3072 VkImageAspectFlags aspect_mask
)
3074 switch (aspect_mask
) {
3075 case VK_IMAGE_ASPECT_COLOR_BIT
:
3076 case VK_IMAGE_ASPECT_DEPTH_BIT
:
3077 case VK_IMAGE_ASPECT_PLANE_0_BIT
:
3079 case VK_IMAGE_ASPECT_STENCIL_BIT
:
3080 if ((image_aspects
& VK_IMAGE_ASPECT_DEPTH_BIT
) == 0)
3083 case VK_IMAGE_ASPECT_PLANE_1_BIT
:
3085 case VK_IMAGE_ASPECT_PLANE_2_BIT
:
3088 /* Purposefully assert with depth/stencil aspects. */
3089 unreachable("invalid image aspect");
3093 static inline VkImageAspectFlags
3094 anv_plane_to_aspect(VkImageAspectFlags image_aspects
,
3097 if (image_aspects
& VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV
) {
3098 if (util_bitcount(image_aspects
) > 1)
3099 return VK_IMAGE_ASPECT_PLANE_0_BIT
<< plane
;
3100 return VK_IMAGE_ASPECT_COLOR_BIT
;
3102 if (image_aspects
& VK_IMAGE_ASPECT_DEPTH_BIT
)
3103 return VK_IMAGE_ASPECT_DEPTH_BIT
<< plane
;
3104 assert(image_aspects
== VK_IMAGE_ASPECT_STENCIL_BIT
);
3105 return VK_IMAGE_ASPECT_STENCIL_BIT
;
3108 #define anv_foreach_image_aspect_bit(b, image, aspects) \
3109 for_each_bit(b, anv_image_expand_aspects(image, aspects))
3111 const struct anv_format
*
3112 anv_get_format(VkFormat format
);
3114 static inline uint32_t
3115 anv_get_format_planes(VkFormat vk_format
)
3117 const struct anv_format
*format
= anv_get_format(vk_format
);
3119 return format
!= NULL
? format
->n_planes
: 0;
3122 struct anv_format_plane
3123 anv_get_format_plane(const struct gen_device_info
*devinfo
, VkFormat vk_format
,
3124 VkImageAspectFlagBits aspect
, VkImageTiling tiling
);
3126 static inline enum isl_format
3127 anv_get_isl_format(const struct gen_device_info
*devinfo
, VkFormat vk_format
,
3128 VkImageAspectFlags aspect
, VkImageTiling tiling
)
3130 return anv_get_format_plane(devinfo
, vk_format
, aspect
, tiling
).isl_format
;
3133 static inline struct isl_swizzle
3134 anv_swizzle_for_render(struct isl_swizzle swizzle
)
3136 /* Sometimes the swizzle will have alpha map to one. We do this to fake
3137 * RGB as RGBA for texturing
3139 assert(swizzle
.a
== ISL_CHANNEL_SELECT_ONE
||
3140 swizzle
.a
== ISL_CHANNEL_SELECT_ALPHA
);
3142 /* But it doesn't matter what we render to that channel */
3143 swizzle
.a
= ISL_CHANNEL_SELECT_ALPHA
;
3149 anv_pipeline_setup_l3_config(struct anv_pipeline
*pipeline
, bool needs_slm
);
3152 * Subsurface of an anv_image.
3154 struct anv_surface
{
3155 /** Valid only if isl_surf::size_B > 0. */
3156 struct isl_surf isl
;
3159 * Offset from VkImage's base address, as bound by vkBindImageMemory().
3165 VkImageType type
; /**< VkImageCreateInfo::imageType */
3166 /* The original VkFormat provided by the client. This may not match any
3167 * of the actual surface formats.
3170 const struct anv_format
*format
;
3172 VkImageAspectFlags aspects
;
3175 uint32_t array_size
;
3176 uint32_t samples
; /**< VkImageCreateInfo::samples */
3178 VkImageUsageFlags usage
; /**< VkImageCreateInfo::usage. */
3179 VkImageUsageFlags stencil_usage
;
3180 VkImageCreateFlags create_flags
; /* Flags used when creating image. */
3181 VkImageTiling tiling
; /** VkImageCreateInfo::tiling */
3183 /** True if this is needs to be bound to an appropriately tiled BO.
3185 * When not using modifiers, consumers such as X11, Wayland, and KMS need
3186 * the tiling passed via I915_GEM_SET_TILING. When exporting these buffers
3187 * we require a dedicated allocation so that we can know to allocate a
3190 bool needs_set_tiling
;
3193 * Must be DRM_FORMAT_MOD_INVALID unless tiling is
3194 * VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT.
3196 uint64_t drm_format_mod
;
3201 /* Whether the image is made of several underlying buffer objects rather a
3202 * single one with different offsets.
3206 /* All the formats that can be used when creating views of this image
3207 * are CCS_E compatible.
3209 bool ccs_e_compatible
;
3211 /* Image was created with external format. */
3212 bool external_format
;
3217 * For each foo, anv_image::planes[x].surface is valid if and only if
3218 * anv_image::aspects has a x aspect. Refer to anv_image_aspect_to_plane()
3219 * to figure the number associated with a given aspect.
3221 * The hardware requires that the depth buffer and stencil buffer be
3222 * separate surfaces. From Vulkan's perspective, though, depth and stencil
3223 * reside in the same VkImage. To satisfy both the hardware and Vulkan, we
3224 * allocate the depth and stencil buffers as separate surfaces in the same
3229 * -----------------------
3231 * ----------------------- |
3232 * | shadow surface0 | |
3233 * ----------------------- | Plane 0
3234 * | aux surface0 | |
3235 * ----------------------- |
3236 * | fast clear colors0 | \|/
3237 * -----------------------
3239 * ----------------------- |
3240 * | shadow surface1 | |
3241 * ----------------------- | Plane 1
3242 * | aux surface1 | |
3243 * ----------------------- |
3244 * | fast clear colors1 | \|/
3245 * -----------------------
3248 * -----------------------
3252 * Offset of the entire plane (whenever the image is disjoint this is
3260 struct anv_surface surface
;
3263 * A surface which shadows the main surface and may have different
3264 * tiling. This is used for sampling using a tiling that isn't supported
3265 * for other operations.
3267 struct anv_surface shadow_surface
;
3270 * For color images, this is the aux usage for this image when not used
3271 * as a color attachment.
3273 * For depth/stencil images, this is set to ISL_AUX_USAGE_HIZ if the
3274 * image has a HiZ buffer.
3276 enum isl_aux_usage aux_usage
;
3278 struct anv_surface aux_surface
;
3281 * Offset of the fast clear state (used to compute the
3282 * fast_clear_state_offset of the following planes).
3284 uint32_t fast_clear_state_offset
;
3287 * BO associated with this plane, set when bound.
3289 struct anv_address address
;
3292 * Address of the main surface used to fill the aux map table. This is
3293 * used at destruction of the image since the Vulkan spec does not
3294 * guarantee that the address.bo field we still be valid at destruction.
3296 uint64_t aux_map_surface_address
;
3299 * When destroying the image, also free the bo.
3305 /* The ordering of this enum is important */
3306 enum anv_fast_clear_type
{
3307 /** Image does not have/support any fast-clear blocks */
3308 ANV_FAST_CLEAR_NONE
= 0,
3309 /** Image has/supports fast-clear but only to the default value */
3310 ANV_FAST_CLEAR_DEFAULT_VALUE
= 1,
3311 /** Image has/supports fast-clear with an arbitrary fast-clear value */
3312 ANV_FAST_CLEAR_ANY
= 2,
3315 /* Returns the number of auxiliary buffer levels attached to an image. */
3316 static inline uint8_t
3317 anv_image_aux_levels(const struct anv_image
* const image
,
3318 VkImageAspectFlagBits aspect
)
3320 uint32_t plane
= anv_image_aspect_to_plane(image
->aspects
, aspect
);
3322 /* The Gen12 CCS aux surface is represented with only one level. */
3323 const uint8_t aux_logical_levels
=
3324 image
->planes
[plane
].aux_surface
.isl
.tiling
== ISL_TILING_GEN12_CCS
?
3325 image
->planes
[plane
].surface
.isl
.levels
:
3326 image
->planes
[plane
].aux_surface
.isl
.levels
;
3328 return image
->planes
[plane
].aux_surface
.isl
.size_B
> 0 ?
3329 aux_logical_levels
: 0;
3332 /* Returns the number of auxiliary buffer layers attached to an image. */
3333 static inline uint32_t
3334 anv_image_aux_layers(const struct anv_image
* const image
,
3335 VkImageAspectFlagBits aspect
,
3336 const uint8_t miplevel
)
3340 /* The miplevel must exist in the main buffer. */
3341 assert(miplevel
< image
->levels
);
3343 if (miplevel
>= anv_image_aux_levels(image
, aspect
)) {
3344 /* There are no layers with auxiliary data because the miplevel has no
3349 uint32_t plane
= anv_image_aspect_to_plane(image
->aspects
, aspect
);
3351 /* The Gen12 CCS aux surface is represented with only one layer. */
3352 const struct isl_extent4d
*aux_logical_level0_px
=
3353 image
->planes
[plane
].aux_surface
.isl
.tiling
== ISL_TILING_GEN12_CCS
?
3354 &image
->planes
[plane
].surface
.isl
.logical_level0_px
:
3355 &image
->planes
[plane
].aux_surface
.isl
.logical_level0_px
;
3357 return MAX2(aux_logical_level0_px
->array_len
,
3358 aux_logical_level0_px
->depth
>> miplevel
);
3362 static inline struct anv_address
3363 anv_image_get_clear_color_addr(const struct anv_device
*device
,
3364 const struct anv_image
*image
,
3365 VkImageAspectFlagBits aspect
)
3367 assert(image
->aspects
& VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV
);
3369 uint32_t plane
= anv_image_aspect_to_plane(image
->aspects
, aspect
);
3370 return anv_address_add(image
->planes
[plane
].address
,
3371 image
->planes
[plane
].fast_clear_state_offset
);
3374 static inline struct anv_address
3375 anv_image_get_fast_clear_type_addr(const struct anv_device
*device
,
3376 const struct anv_image
*image
,
3377 VkImageAspectFlagBits aspect
)
3379 struct anv_address addr
=
3380 anv_image_get_clear_color_addr(device
, image
, aspect
);
3382 const unsigned clear_color_state_size
= device
->info
.gen
>= 10 ?
3383 device
->isl_dev
.ss
.clear_color_state_size
:
3384 device
->isl_dev
.ss
.clear_value_size
;
3385 return anv_address_add(addr
, clear_color_state_size
);
3388 static inline struct anv_address
3389 anv_image_get_compression_state_addr(const struct anv_device
*device
,
3390 const struct anv_image
*image
,
3391 VkImageAspectFlagBits aspect
,
3392 uint32_t level
, uint32_t array_layer
)
3394 assert(level
< anv_image_aux_levels(image
, aspect
));
3395 assert(array_layer
< anv_image_aux_layers(image
, aspect
, level
));
3396 UNUSED
uint32_t plane
= anv_image_aspect_to_plane(image
->aspects
, aspect
);
3397 assert(image
->planes
[plane
].aux_usage
== ISL_AUX_USAGE_CCS_E
);
3399 struct anv_address addr
=
3400 anv_image_get_fast_clear_type_addr(device
, image
, aspect
);
3401 addr
.offset
+= 4; /* Go past the fast clear type */
3403 if (image
->type
== VK_IMAGE_TYPE_3D
) {
3404 for (uint32_t l
= 0; l
< level
; l
++)
3405 addr
.offset
+= anv_minify(image
->extent
.depth
, l
) * 4;
3407 addr
.offset
+= level
* image
->array_size
* 4;
3409 addr
.offset
+= array_layer
* 4;
3411 assert(addr
.offset
<
3412 image
->planes
[plane
].address
.offset
+ image
->planes
[plane
].size
);
3416 /* Returns true if a HiZ-enabled depth buffer can be sampled from. */
3418 anv_can_sample_with_hiz(const struct gen_device_info
* const devinfo
,
3419 const struct anv_image
*image
)
3421 if (!(image
->aspects
& VK_IMAGE_ASPECT_DEPTH_BIT
))
3424 /* Allow this feature on BDW even though it is disabled in the BDW devinfo
3425 * struct. There's documentation which suggests that this feature actually
3426 * reduces performance on BDW, but it has only been observed to help so
3427 * far. Sampling fast-cleared blocks on BDW must also be handled with care
3428 * (see depth_stencil_attachment_compute_aux_usage() for more info).
3430 if (devinfo
->gen
!= 8 && !devinfo
->has_sample_with_hiz
)
3433 return image
->samples
== 1;
3437 anv_image_plane_uses_aux_map(const struct anv_device
*device
,
3438 const struct anv_image
*image
,
3441 return device
->info
.has_aux_map
&&
3442 isl_aux_usage_has_ccs(image
->planes
[plane
].aux_usage
);
3446 anv_cmd_buffer_mark_image_written(struct anv_cmd_buffer
*cmd_buffer
,
3447 const struct anv_image
*image
,
3448 VkImageAspectFlagBits aspect
,
3449 enum isl_aux_usage aux_usage
,
3451 uint32_t base_layer
,
3452 uint32_t layer_count
);
3455 anv_image_clear_color(struct anv_cmd_buffer
*cmd_buffer
,
3456 const struct anv_image
*image
,
3457 VkImageAspectFlagBits aspect
,
3458 enum isl_aux_usage aux_usage
,
3459 enum isl_format format
, struct isl_swizzle swizzle
,
3460 uint32_t level
, uint32_t base_layer
, uint32_t layer_count
,
3461 VkRect2D area
, union isl_color_value clear_color
);
3463 anv_image_clear_depth_stencil(struct anv_cmd_buffer
*cmd_buffer
,
3464 const struct anv_image
*image
,
3465 VkImageAspectFlags aspects
,
3466 enum isl_aux_usage depth_aux_usage
,
3468 uint32_t base_layer
, uint32_t layer_count
,
3470 float depth_value
, uint8_t stencil_value
);
3472 anv_image_msaa_resolve(struct anv_cmd_buffer
*cmd_buffer
,
3473 const struct anv_image
*src_image
,
3474 enum isl_aux_usage src_aux_usage
,
3475 uint32_t src_level
, uint32_t src_base_layer
,
3476 const struct anv_image
*dst_image
,
3477 enum isl_aux_usage dst_aux_usage
,
3478 uint32_t dst_level
, uint32_t dst_base_layer
,
3479 VkImageAspectFlagBits aspect
,
3480 uint32_t src_x
, uint32_t src_y
,
3481 uint32_t dst_x
, uint32_t dst_y
,
3482 uint32_t width
, uint32_t height
,
3483 uint32_t layer_count
,
3484 enum blorp_filter filter
);
3486 anv_image_hiz_op(struct anv_cmd_buffer
*cmd_buffer
,
3487 const struct anv_image
*image
,
3488 VkImageAspectFlagBits aspect
, uint32_t level
,
3489 uint32_t base_layer
, uint32_t layer_count
,
3490 enum isl_aux_op hiz_op
);
3492 anv_image_hiz_clear(struct anv_cmd_buffer
*cmd_buffer
,
3493 const struct anv_image
*image
,
3494 VkImageAspectFlags aspects
,
3496 uint32_t base_layer
, uint32_t layer_count
,
3497 VkRect2D area
, uint8_t stencil_value
);
3499 anv_image_mcs_op(struct anv_cmd_buffer
*cmd_buffer
,
3500 const struct anv_image
*image
,
3501 enum isl_format format
,
3502 VkImageAspectFlagBits aspect
,
3503 uint32_t base_layer
, uint32_t layer_count
,
3504 enum isl_aux_op mcs_op
, union isl_color_value
*clear_value
,
3507 anv_image_ccs_op(struct anv_cmd_buffer
*cmd_buffer
,
3508 const struct anv_image
*image
,
3509 enum isl_format format
,
3510 VkImageAspectFlagBits aspect
, uint32_t level
,
3511 uint32_t base_layer
, uint32_t layer_count
,
3512 enum isl_aux_op ccs_op
, union isl_color_value
*clear_value
,
3516 anv_image_copy_to_shadow(struct anv_cmd_buffer
*cmd_buffer
,
3517 const struct anv_image
*image
,
3518 VkImageAspectFlagBits aspect
,
3519 uint32_t base_level
, uint32_t level_count
,
3520 uint32_t base_layer
, uint32_t layer_count
);
3523 anv_layout_to_aux_usage(const struct gen_device_info
* const devinfo
,
3524 const struct anv_image
*image
,
3525 const VkImageAspectFlagBits aspect
,
3526 const VkImageLayout layout
);
3528 enum anv_fast_clear_type
3529 anv_layout_to_fast_clear_type(const struct gen_device_info
* const devinfo
,
3530 const struct anv_image
* const image
,
3531 const VkImageAspectFlagBits aspect
,
3532 const VkImageLayout layout
);
3534 /* This is defined as a macro so that it works for both
3535 * VkImageSubresourceRange and VkImageSubresourceLayers
3537 #define anv_get_layerCount(_image, _range) \
3538 ((_range)->layerCount == VK_REMAINING_ARRAY_LAYERS ? \
3539 (_image)->array_size - (_range)->baseArrayLayer : (_range)->layerCount)
3541 static inline uint32_t
3542 anv_get_levelCount(const struct anv_image
*image
,
3543 const VkImageSubresourceRange
*range
)
3545 return range
->levelCount
== VK_REMAINING_MIP_LEVELS
?
3546 image
->levels
- range
->baseMipLevel
: range
->levelCount
;
3549 static inline VkImageAspectFlags
3550 anv_image_expand_aspects(const struct anv_image
*image
,
3551 VkImageAspectFlags aspects
)
3553 /* If the underlying image has color plane aspects and
3554 * VK_IMAGE_ASPECT_COLOR_BIT has been requested, then return the aspects of
3555 * the underlying image. */
3556 if ((image
->aspects
& VK_IMAGE_ASPECT_PLANES_BITS_ANV
) != 0 &&
3557 aspects
== VK_IMAGE_ASPECT_COLOR_BIT
)
3558 return image
->aspects
;
3564 anv_image_aspects_compatible(VkImageAspectFlags aspects1
,
3565 VkImageAspectFlags aspects2
)
3567 if (aspects1
== aspects2
)
3570 /* Only 1 color aspects are compatibles. */
3571 if ((aspects1
& VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV
) != 0 &&
3572 (aspects2
& VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV
) != 0 &&
3573 util_bitcount(aspects1
) == util_bitcount(aspects2
))
3579 struct anv_image_view
{
3580 const struct anv_image
*image
; /**< VkImageViewCreateInfo::image */
3582 VkImageAspectFlags aspect_mask
;
3584 VkExtent3D extent
; /**< Extent of VkImageViewCreateInfo::baseMipLevel. */
3588 uint32_t image_plane
;
3590 struct isl_view isl
;
3593 * RENDER_SURFACE_STATE when using image as a sampler surface with an
3594 * image layout of SHADER_READ_ONLY_OPTIMAL or
3595 * DEPTH_STENCIL_READ_ONLY_OPTIMAL.
3597 struct anv_surface_state optimal_sampler_surface_state
;
3600 * RENDER_SURFACE_STATE when using image as a sampler surface with an
3601 * image layout of GENERAL.
3603 struct anv_surface_state general_sampler_surface_state
;
3606 * RENDER_SURFACE_STATE when using image as a storage image. Separate
3607 * states for write-only and readable, using the real format for
3608 * write-only and the lowered format for readable.
3610 struct anv_surface_state storage_surface_state
;
3611 struct anv_surface_state writeonly_storage_surface_state
;
3613 struct brw_image_param storage_image_param
;
3617 enum anv_image_view_state_flags
{
3618 ANV_IMAGE_VIEW_STATE_STORAGE_WRITE_ONLY
= (1 << 0),
3619 ANV_IMAGE_VIEW_STATE_TEXTURE_OPTIMAL
= (1 << 1),
3622 void anv_image_fill_surface_state(struct anv_device
*device
,
3623 const struct anv_image
*image
,
3624 VkImageAspectFlagBits aspect
,
3625 const struct isl_view
*view
,
3626 isl_surf_usage_flags_t view_usage
,
3627 enum isl_aux_usage aux_usage
,
3628 const union isl_color_value
*clear_color
,
3629 enum anv_image_view_state_flags flags
,
3630 struct anv_surface_state
*state_inout
,
3631 struct brw_image_param
*image_param_out
);
3633 struct anv_image_create_info
{
3634 const VkImageCreateInfo
*vk_info
;
3636 /** An opt-in bitmask which filters an ISL-mapping of the Vulkan tiling. */
3637 isl_tiling_flags_t isl_tiling_flags
;
3639 /** These flags will be added to any derived from VkImageCreateInfo. */
3640 isl_surf_usage_flags_t isl_extra_usage_flags
;
3643 bool external_format
;
3646 VkResult
anv_image_create(VkDevice _device
,
3647 const struct anv_image_create_info
*info
,
3648 const VkAllocationCallbacks
* alloc
,
3651 const struct anv_surface
*
3652 anv_image_get_surface_for_aspect_mask(const struct anv_image
*image
,
3653 VkImageAspectFlags aspect_mask
);
3656 anv_isl_format_for_descriptor_type(VkDescriptorType type
);
3658 static inline struct VkExtent3D
3659 anv_sanitize_image_extent(const VkImageType imageType
,
3660 const struct VkExtent3D imageExtent
)
3662 switch (imageType
) {
3663 case VK_IMAGE_TYPE_1D
:
3664 return (VkExtent3D
) { imageExtent
.width
, 1, 1 };
3665 case VK_IMAGE_TYPE_2D
:
3666 return (VkExtent3D
) { imageExtent
.width
, imageExtent
.height
, 1 };
3667 case VK_IMAGE_TYPE_3D
:
3670 unreachable("invalid image type");
3674 static inline struct VkOffset3D
3675 anv_sanitize_image_offset(const VkImageType imageType
,
3676 const struct VkOffset3D imageOffset
)
3678 switch (imageType
) {
3679 case VK_IMAGE_TYPE_1D
:
3680 return (VkOffset3D
) { imageOffset
.x
, 0, 0 };
3681 case VK_IMAGE_TYPE_2D
:
3682 return (VkOffset3D
) { imageOffset
.x
, imageOffset
.y
, 0 };
3683 case VK_IMAGE_TYPE_3D
:
3686 unreachable("invalid image type");
3690 VkFormatFeatureFlags
3691 anv_get_image_format_features(const struct gen_device_info
*devinfo
,
3693 const struct anv_format
*anv_format
,
3694 VkImageTiling vk_tiling
);
3696 void anv_fill_buffer_surface_state(struct anv_device
*device
,
3697 struct anv_state state
,
3698 enum isl_format format
,
3699 struct anv_address address
,
3700 uint32_t range
, uint32_t stride
);
3703 anv_clear_color_from_att_state(union isl_color_value
*clear_color
,
3704 const struct anv_attachment_state
*att_state
,
3705 const struct anv_image_view
*iview
)
3707 const struct isl_format_layout
*view_fmtl
=
3708 isl_format_get_layout(iview
->planes
[0].isl
.format
);
3710 #define COPY_CLEAR_COLOR_CHANNEL(c, i) \
3711 if (view_fmtl->channels.c.bits) \
3712 clear_color->u32[i] = att_state->clear_value.color.uint32[i]
3714 COPY_CLEAR_COLOR_CHANNEL(r
, 0);
3715 COPY_CLEAR_COLOR_CHANNEL(g
, 1);
3716 COPY_CLEAR_COLOR_CHANNEL(b
, 2);
3717 COPY_CLEAR_COLOR_CHANNEL(a
, 3);
3719 #undef COPY_CLEAR_COLOR_CHANNEL
3723 struct anv_ycbcr_conversion
{
3724 const struct anv_format
* format
;
3725 VkSamplerYcbcrModelConversion ycbcr_model
;
3726 VkSamplerYcbcrRange ycbcr_range
;
3727 VkComponentSwizzle mapping
[4];
3728 VkChromaLocation chroma_offsets
[2];
3729 VkFilter chroma_filter
;
3730 bool chroma_reconstruction
;
3733 struct anv_sampler
{
3734 uint32_t state
[3][4];
3736 struct anv_ycbcr_conversion
*conversion
;
3738 /* Blob of sampler state data which is guaranteed to be 32-byte aligned
3739 * and with a 32-byte stride for use as bindless samplers.
3741 struct anv_state bindless_state
;
3744 struct anv_framebuffer
{
3749 uint32_t attachment_count
;
3750 struct anv_image_view
* attachments
[0];
3753 struct anv_subpass_attachment
{
3754 VkImageUsageFlagBits usage
;
3755 uint32_t attachment
;
3756 VkImageLayout layout
;
3758 /* Used only with attachment containing stencil data. */
3759 VkImageLayout stencil_layout
;
3762 struct anv_subpass
{
3763 uint32_t attachment_count
;
3766 * A pointer to all attachment references used in this subpass.
3767 * Only valid if ::attachment_count > 0.
3769 struct anv_subpass_attachment
* attachments
;
3770 uint32_t input_count
;
3771 struct anv_subpass_attachment
* input_attachments
;
3772 uint32_t color_count
;
3773 struct anv_subpass_attachment
* color_attachments
;
3774 struct anv_subpass_attachment
* resolve_attachments
;
3776 struct anv_subpass_attachment
* depth_stencil_attachment
;
3777 struct anv_subpass_attachment
* ds_resolve_attachment
;
3778 VkResolveModeFlagBitsKHR depth_resolve_mode
;
3779 VkResolveModeFlagBitsKHR stencil_resolve_mode
;
3783 /** Subpass has a depth/stencil self-dependency */
3784 bool has_ds_self_dep
;
3786 /** Subpass has at least one color resolve attachment */
3787 bool has_color_resolve
;
3790 static inline unsigned
3791 anv_subpass_view_count(const struct anv_subpass
*subpass
)
3793 return MAX2(1, util_bitcount(subpass
->view_mask
));
3796 struct anv_render_pass_attachment
{
3797 /* TODO: Consider using VkAttachmentDescription instead of storing each of
3798 * its members individually.
3802 VkImageUsageFlags usage
;
3803 VkAttachmentLoadOp load_op
;
3804 VkAttachmentStoreOp store_op
;
3805 VkAttachmentLoadOp stencil_load_op
;
3806 VkImageLayout initial_layout
;
3807 VkImageLayout final_layout
;
3808 VkImageLayout first_subpass_layout
;
3810 VkImageLayout stencil_initial_layout
;
3811 VkImageLayout stencil_final_layout
;
3813 /* The subpass id in which the attachment will be used last. */
3814 uint32_t last_subpass_idx
;
3817 struct anv_render_pass
{
3818 uint32_t attachment_count
;
3819 uint32_t subpass_count
;
3820 /* An array of subpass_count+1 flushes, one per subpass boundary */
3821 enum anv_pipe_bits
* subpass_flushes
;
3822 struct anv_render_pass_attachment
* attachments
;
3823 struct anv_subpass subpasses
[0];
3826 #define ANV_PIPELINE_STATISTICS_MASK 0x000007ff
3828 struct anv_query_pool
{
3830 VkQueryPipelineStatisticFlags pipeline_statistics
;
3831 /** Stride between slots, in bytes */
3833 /** Number of slots in this query pool */
3838 int anv_get_instance_entrypoint_index(const char *name
);
3839 int anv_get_device_entrypoint_index(const char *name
);
3840 int anv_get_physical_device_entrypoint_index(const char *name
);
3842 const char *anv_get_instance_entry_name(int index
);
3843 const char *anv_get_physical_device_entry_name(int index
);
3844 const char *anv_get_device_entry_name(int index
);
3847 anv_instance_entrypoint_is_enabled(int index
, uint32_t core_version
,
3848 const struct anv_instance_extension_table
*instance
);
3850 anv_physical_device_entrypoint_is_enabled(int index
, uint32_t core_version
,
3851 const struct anv_instance_extension_table
*instance
);
3853 anv_device_entrypoint_is_enabled(int index
, uint32_t core_version
,
3854 const struct anv_instance_extension_table
*instance
,
3855 const struct anv_device_extension_table
*device
);
3857 void *anv_lookup_entrypoint(const struct gen_device_info
*devinfo
,
3860 void anv_dump_image_to_ppm(struct anv_device
*device
,
3861 struct anv_image
*image
, unsigned miplevel
,
3862 unsigned array_layer
, VkImageAspectFlagBits aspect
,
3863 const char *filename
);
3865 enum anv_dump_action
{
3866 ANV_DUMP_FRAMEBUFFERS_BIT
= 0x1,
3869 void anv_dump_start(struct anv_device
*device
, enum anv_dump_action actions
);
3870 void anv_dump_finish(void);
3872 void anv_dump_add_attachments(struct anv_cmd_buffer
*cmd_buffer
);
3874 static inline uint32_t
3875 anv_get_subpass_id(const struct anv_cmd_state
* const cmd_state
)
3877 /* This function must be called from within a subpass. */
3878 assert(cmd_state
->pass
&& cmd_state
->subpass
);
3880 const uint32_t subpass_id
= cmd_state
->subpass
- cmd_state
->pass
->subpasses
;
3882 /* The id of this subpass shouldn't exceed the number of subpasses in this
3883 * render pass minus 1.
3885 assert(subpass_id
< cmd_state
->pass
->subpass_count
);
3889 struct gen_perf_config
*anv_get_perf(const struct gen_device_info
*devinfo
, int fd
);
3890 void anv_device_perf_init(struct anv_device
*device
);
3892 #define ANV_DEFINE_HANDLE_CASTS(__anv_type, __VkType) \
3894 static inline struct __anv_type * \
3895 __anv_type ## _from_handle(__VkType _handle) \
3897 return (struct __anv_type *) _handle; \
3900 static inline __VkType \
3901 __anv_type ## _to_handle(struct __anv_type *_obj) \
3903 return (__VkType) _obj; \
3906 #define ANV_DEFINE_NONDISP_HANDLE_CASTS(__anv_type, __VkType) \
3908 static inline struct __anv_type * \
3909 __anv_type ## _from_handle(__VkType _handle) \
3911 return (struct __anv_type *)(uintptr_t) _handle; \
3914 static inline __VkType \
3915 __anv_type ## _to_handle(struct __anv_type *_obj) \
3917 return (__VkType)(uintptr_t) _obj; \
3920 #define ANV_FROM_HANDLE(__anv_type, __name, __handle) \
3921 struct __anv_type *__name = __anv_type ## _from_handle(__handle)
3923 ANV_DEFINE_HANDLE_CASTS(anv_cmd_buffer
, VkCommandBuffer
)
3924 ANV_DEFINE_HANDLE_CASTS(anv_device
, VkDevice
)
3925 ANV_DEFINE_HANDLE_CASTS(anv_instance
, VkInstance
)
3926 ANV_DEFINE_HANDLE_CASTS(anv_physical_device
, VkPhysicalDevice
)
3927 ANV_DEFINE_HANDLE_CASTS(anv_queue
, VkQueue
)
3929 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_cmd_pool
, VkCommandPool
)
3930 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_buffer
, VkBuffer
)
3931 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_buffer_view
, VkBufferView
)
3932 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_descriptor_pool
, VkDescriptorPool
)
3933 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_descriptor_set
, VkDescriptorSet
)
3934 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_descriptor_set_layout
, VkDescriptorSetLayout
)
3935 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_descriptor_update_template
, VkDescriptorUpdateTemplate
)
3936 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_device_memory
, VkDeviceMemory
)
3937 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_fence
, VkFence
)
3938 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_event
, VkEvent
)
3939 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_framebuffer
, VkFramebuffer
)
3940 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_image
, VkImage
)
3941 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_image_view
, VkImageView
);
3942 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_pipeline_cache
, VkPipelineCache
)
3943 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_pipeline
, VkPipeline
)
3944 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_pipeline_layout
, VkPipelineLayout
)
3945 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_query_pool
, VkQueryPool
)
3946 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_render_pass
, VkRenderPass
)
3947 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_sampler
, VkSampler
)
3948 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_semaphore
, VkSemaphore
)
3949 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_shader_module
, VkShaderModule
)
3950 ANV_DEFINE_NONDISP_HANDLE_CASTS(vk_debug_report_callback
, VkDebugReportCallbackEXT
)
3951 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_ycbcr_conversion
, VkSamplerYcbcrConversion
)
3953 /* Gen-specific function declarations */
3955 # include "anv_genX.h"
3957 # define genX(x) gen7_##x
3958 # include "anv_genX.h"
3960 # define genX(x) gen75_##x
3961 # include "anv_genX.h"
3963 # define genX(x) gen8_##x
3964 # include "anv_genX.h"
3966 # define genX(x) gen9_##x
3967 # include "anv_genX.h"
3969 # define genX(x) gen10_##x
3970 # include "anv_genX.h"
3972 # define genX(x) gen11_##x
3973 # include "anv_genX.h"
3975 # define genX(x) gen12_##x
3976 # include "anv_genX.h"
3980 #endif /* ANV_PRIVATE_H */