2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
33 #include "drm-uapi/i915_drm.h"
40 #define __gen_validate_value(x) VALGRIND_CHECK_MEM_IS_DEFINED(&(x), sizeof(x))
43 #define VG(x) ((void)0)
46 #include "common/gen_clflush.h"
47 #include "common/gen_decoder.h"
48 #include "common/gen_gem.h"
49 #include "common/gen_l3_config.h"
50 #include "dev/gen_device_info.h"
51 #include "blorp/blorp.h"
52 #include "compiler/brw_compiler.h"
53 #include "util/bitset.h"
54 #include "util/macros.h"
55 #include "util/hash_table.h"
56 #include "util/list.h"
57 #include "util/sparse_array.h"
58 #include "util/u_atomic.h"
59 #include "util/u_vector.h"
60 #include "util/u_math.h"
62 #include "util/xmlconfig.h"
64 #include "vk_debug_report.h"
65 #include "vk_object.h"
67 /* Pre-declarations needed for WSI entrypoints */
70 typedef struct xcb_connection_t xcb_connection_t
;
71 typedef uint32_t xcb_visualid_t
;
72 typedef uint32_t xcb_window_t
;
76 struct anv_buffer_view
;
77 struct anv_image_view
;
80 struct gen_aux_map_context
;
81 struct gen_perf_config
;
83 #include <vulkan/vulkan.h>
84 #include <vulkan/vulkan_intel.h>
85 #include <vulkan/vk_icd.h>
87 #include "anv_android.h"
88 #include "anv_entrypoints.h"
89 #include "anv_extensions.h"
92 #include "dev/gen_debug.h"
93 #include "common/intel_log.h"
94 #include "wsi_common.h"
96 #define NSEC_PER_SEC 1000000000ull
98 /* anv Virtual Memory Layout
99 * =========================
101 * When the anv driver is determining the virtual graphics addresses of memory
102 * objects itself using the softpin mechanism, the following memory ranges
105 * Three special considerations to notice:
107 * (1) the dynamic state pool is located within the same 4 GiB as the low
108 * heap. This is to work around a VF cache issue described in a comment in
109 * anv_physical_device_init_heaps.
111 * (2) the binding table pool is located at lower addresses than the surface
112 * state pool, within a 4 GiB range. This allows surface state base addresses
113 * to cover both binding tables (16 bit offsets) and surface states (32 bit
116 * (3) the last 4 GiB of the address space is withheld from the high
117 * heap. Various hardware units will read past the end of an object for
118 * various reasons. This healthy margin prevents reads from wrapping around
121 #define LOW_HEAP_MIN_ADDRESS 0x000000001000ULL /* 4 KiB */
122 #define LOW_HEAP_MAX_ADDRESS 0x0000bfffffffULL
123 #define DYNAMIC_STATE_POOL_MIN_ADDRESS 0x0000c0000000ULL /* 3 GiB */
124 #define DYNAMIC_STATE_POOL_MAX_ADDRESS 0x0000ffffffffULL
125 #define BINDING_TABLE_POOL_MIN_ADDRESS 0x000100000000ULL /* 4 GiB */
126 #define BINDING_TABLE_POOL_MAX_ADDRESS 0x00013fffffffULL
127 #define SURFACE_STATE_POOL_MIN_ADDRESS 0x000140000000ULL /* 5 GiB */
128 #define SURFACE_STATE_POOL_MAX_ADDRESS 0x00017fffffffULL
129 #define INSTRUCTION_STATE_POOL_MIN_ADDRESS 0x000180000000ULL /* 6 GiB */
130 #define INSTRUCTION_STATE_POOL_MAX_ADDRESS 0x0001bfffffffULL
131 #define CLIENT_VISIBLE_HEAP_MIN_ADDRESS 0x0001c0000000ULL /* 7 GiB */
132 #define CLIENT_VISIBLE_HEAP_MAX_ADDRESS 0x0002bfffffffULL
133 #define HIGH_HEAP_MIN_ADDRESS 0x0002c0000000ULL /* 11 GiB */
135 #define LOW_HEAP_SIZE \
136 (LOW_HEAP_MAX_ADDRESS - LOW_HEAP_MIN_ADDRESS + 1)
137 #define DYNAMIC_STATE_POOL_SIZE \
138 (DYNAMIC_STATE_POOL_MAX_ADDRESS - DYNAMIC_STATE_POOL_MIN_ADDRESS + 1)
139 #define BINDING_TABLE_POOL_SIZE \
140 (BINDING_TABLE_POOL_MAX_ADDRESS - BINDING_TABLE_POOL_MIN_ADDRESS + 1)
141 #define SURFACE_STATE_POOL_SIZE \
142 (SURFACE_STATE_POOL_MAX_ADDRESS - SURFACE_STATE_POOL_MIN_ADDRESS + 1)
143 #define INSTRUCTION_STATE_POOL_SIZE \
144 (INSTRUCTION_STATE_POOL_MAX_ADDRESS - INSTRUCTION_STATE_POOL_MIN_ADDRESS + 1)
145 #define CLIENT_VISIBLE_HEAP_SIZE \
146 (CLIENT_VISIBLE_HEAP_MAX_ADDRESS - CLIENT_VISIBLE_HEAP_MIN_ADDRESS + 1)
148 /* Allowing different clear colors requires us to perform a depth resolve at
149 * the end of certain render passes. This is because while slow clears store
150 * the clear color in the HiZ buffer, fast clears (without a resolve) don't.
151 * See the PRMs for examples describing when additional resolves would be
152 * necessary. To enable fast clears without requiring extra resolves, we set
153 * the clear value to a globally-defined one. We could allow different values
154 * if the user doesn't expect coherent data during or after a render passes
155 * (VK_ATTACHMENT_STORE_OP_DONT_CARE), but such users (aside from the CTS)
156 * don't seem to exist yet. In almost all Vulkan applications tested thus far,
157 * 1.0f seems to be the only value used. The only application that doesn't set
158 * this value does so through the usage of an seemingly uninitialized clear
161 #define ANV_HZ_FC_VAL 1.0f
164 #define MAX_XFB_BUFFERS 4
165 #define MAX_XFB_STREAMS 4
168 #define MAX_VIEWPORTS 16
169 #define MAX_SCISSORS 16
170 #define MAX_PUSH_CONSTANTS_SIZE 128
171 #define MAX_DYNAMIC_BUFFERS 16
172 #define MAX_IMAGES 64
173 #define MAX_PUSH_DESCRIPTORS 32 /* Minimum requirement */
174 #define MAX_INLINE_UNIFORM_BLOCK_SIZE 4096
175 #define MAX_INLINE_UNIFORM_BLOCK_DESCRIPTORS 32
176 #define ANV_UBO_BOUNDS_CHECK_ALIGNMENT 32
177 #define ANV_SSBO_BOUNDS_CHECK_ALIGNMENT 4
178 #define MAX_VIEWS_FOR_PRIMITIVE_REPLICATION 16
180 /* From the Skylake PRM Vol. 7 "Binding Table Surface State Model":
182 * "The surface state model is used when a Binding Table Index (specified
183 * in the message descriptor) of less than 240 is specified. In this model,
184 * the Binding Table Index is used to index into the binding table, and the
185 * binding table entry contains a pointer to the SURFACE_STATE."
187 * Binding table values above 240 are used for various things in the hardware
188 * such as stateless, stateless with incoherent cache, SLM, and bindless.
190 #define MAX_BINDING_TABLE_SIZE 240
192 /* The kernel relocation API has a limitation of a 32-bit delta value
193 * applied to the address before it is written which, in spite of it being
194 * unsigned, is treated as signed . Because of the way that this maps to
195 * the Vulkan API, we cannot handle an offset into a buffer that does not
196 * fit into a signed 32 bits. The only mechanism we have for dealing with
197 * this at the moment is to limit all VkDeviceMemory objects to a maximum
198 * of 2GB each. The Vulkan spec allows us to do this:
200 * "Some platforms may have a limit on the maximum size of a single
201 * allocation. For example, certain systems may fail to create
202 * allocations with a size greater than or equal to 4GB. Such a limit is
203 * implementation-dependent, and if such a failure occurs then the error
204 * VK_ERROR_OUT_OF_DEVICE_MEMORY should be returned."
206 * We don't use vk_error here because it's not an error so much as an
207 * indication to the application that the allocation is too large.
209 #define MAX_MEMORY_ALLOCATION_SIZE (1ull << 31)
211 #define ANV_SVGS_VB_INDEX MAX_VBS
212 #define ANV_DRAWID_VB_INDEX (MAX_VBS + 1)
214 /* We reserve this MI ALU register for the purpose of handling predication.
215 * Other code which uses the MI ALU should leave it alone.
217 #define ANV_PREDICATE_RESULT_REG 0x2678 /* MI_ALU_REG15 */
219 /* For gen12 we set the streamout buffers using 4 separate commands
220 * (3DSTATE_SO_BUFFER_INDEX_*) instead of 3DSTATE_SO_BUFFER. However the layout
221 * of the 3DSTATE_SO_BUFFER_INDEX_* commands is identical to that of
222 * 3DSTATE_SO_BUFFER apart from the SOBufferIndex field, so for now we use the
223 * 3DSTATE_SO_BUFFER command, but change the 3DCommandSubOpcode.
224 * SO_BUFFER_INDEX_0_CMD is actually the 3DCommandSubOpcode for
225 * 3DSTATE_SO_BUFFER_INDEX_0.
227 #define SO_BUFFER_INDEX_0_CMD 0x60
228 #define anv_printflike(a, b) __attribute__((__format__(__printf__, a, b)))
230 static inline uint32_t
231 align_down_npot_u32(uint32_t v
, uint32_t a
)
236 static inline uint32_t
237 align_down_u32(uint32_t v
, uint32_t a
)
239 assert(a
!= 0 && a
== (a
& -a
));
243 static inline uint32_t
244 align_u32(uint32_t v
, uint32_t a
)
246 assert(a
!= 0 && a
== (a
& -a
));
247 return align_down_u32(v
+ a
- 1, a
);
250 static inline uint64_t
251 align_down_u64(uint64_t v
, uint64_t a
)
253 assert(a
!= 0 && a
== (a
& -a
));
257 static inline uint64_t
258 align_u64(uint64_t v
, uint64_t a
)
260 return align_down_u64(v
+ a
- 1, a
);
263 static inline int32_t
264 align_i32(int32_t v
, int32_t a
)
266 assert(a
!= 0 && a
== (a
& -a
));
267 return (v
+ a
- 1) & ~(a
- 1);
270 /** Alignment must be a power of 2. */
272 anv_is_aligned(uintmax_t n
, uintmax_t a
)
274 assert(a
== (a
& -a
));
275 return (n
& (a
- 1)) == 0;
278 static inline uint32_t
279 anv_minify(uint32_t n
, uint32_t levels
)
281 if (unlikely(n
== 0))
284 return MAX2(n
>> levels
, 1);
288 anv_clamp_f(float f
, float min
, float max
)
301 anv_clear_mask(uint32_t *inout_mask
, uint32_t clear_mask
)
303 if (*inout_mask
& clear_mask
) {
304 *inout_mask
&= ~clear_mask
;
311 static inline union isl_color_value
312 vk_to_isl_color(VkClearColorValue color
)
314 return (union isl_color_value
) {
324 static inline void *anv_unpack_ptr(uintptr_t ptr
, int bits
, int *flags
)
326 uintptr_t mask
= (1ull << bits
) - 1;
328 return (void *) (ptr
& ~mask
);
331 static inline uintptr_t anv_pack_ptr(void *ptr
, int bits
, int flags
)
333 uintptr_t value
= (uintptr_t) ptr
;
334 uintptr_t mask
= (1ull << bits
) - 1;
335 return value
| (mask
& flags
);
338 #define for_each_bit(b, dword) \
339 for (uint32_t __dword = (dword); \
340 (b) = __builtin_ffs(__dword) - 1, __dword; \
341 __dword &= ~(1 << (b)))
343 #define typed_memcpy(dest, src, count) ({ \
344 STATIC_ASSERT(sizeof(*src) == sizeof(*dest)); \
345 memcpy((dest), (src), (count) * sizeof(*(src))); \
348 /* Mapping from anv object to VkDebugReportObjectTypeEXT. New types need
349 * to be added here in order to utilize mapping in debug/error/perf macros.
351 #define REPORT_OBJECT_TYPE(o) \
352 __builtin_choose_expr ( \
353 __builtin_types_compatible_p (__typeof (o), struct anv_instance*), \
354 VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT, \
355 __builtin_choose_expr ( \
356 __builtin_types_compatible_p (__typeof (o), struct anv_physical_device*), \
357 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, \
358 __builtin_choose_expr ( \
359 __builtin_types_compatible_p (__typeof (o), struct anv_device*), \
360 VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, \
361 __builtin_choose_expr ( \
362 __builtin_types_compatible_p (__typeof (o), const struct anv_device*), \
363 VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, \
364 __builtin_choose_expr ( \
365 __builtin_types_compatible_p (__typeof (o), struct anv_queue*), \
366 VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT, \
367 __builtin_choose_expr ( \
368 __builtin_types_compatible_p (__typeof (o), struct anv_semaphore*), \
369 VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT, \
370 __builtin_choose_expr ( \
371 __builtin_types_compatible_p (__typeof (o), struct anv_cmd_buffer*), \
372 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, \
373 __builtin_choose_expr ( \
374 __builtin_types_compatible_p (__typeof (o), struct anv_fence*), \
375 VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT, \
376 __builtin_choose_expr ( \
377 __builtin_types_compatible_p (__typeof (o), struct anv_device_memory*), \
378 VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, \
379 __builtin_choose_expr ( \
380 __builtin_types_compatible_p (__typeof (o), struct anv_buffer*), \
381 VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, \
382 __builtin_choose_expr ( \
383 __builtin_types_compatible_p (__typeof (o), struct anv_image*), \
384 VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, \
385 __builtin_choose_expr ( \
386 __builtin_types_compatible_p (__typeof (o), const struct anv_image*), \
387 VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, \
388 __builtin_choose_expr ( \
389 __builtin_types_compatible_p (__typeof (o), struct anv_event*), \
390 VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT, \
391 __builtin_choose_expr ( \
392 __builtin_types_compatible_p (__typeof (o), struct anv_query_pool*), \
393 VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, \
394 __builtin_choose_expr ( \
395 __builtin_types_compatible_p (__typeof (o), struct anv_buffer_view*), \
396 VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT, \
397 __builtin_choose_expr ( \
398 __builtin_types_compatible_p (__typeof (o), struct anv_image_view*), \
399 VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT, \
400 __builtin_choose_expr ( \
401 __builtin_types_compatible_p (__typeof (o), struct anv_shader_module*), \
402 VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT, \
403 __builtin_choose_expr ( \
404 __builtin_types_compatible_p (__typeof (o), struct anv_pipeline_cache*), \
405 VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_CACHE_EXT, \
406 __builtin_choose_expr ( \
407 __builtin_types_compatible_p (__typeof (o), struct anv_pipeline_layout*), \
408 VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT, \
409 __builtin_choose_expr ( \
410 __builtin_types_compatible_p (__typeof (o), struct anv_render_pass*), \
411 VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, \
412 __builtin_choose_expr ( \
413 __builtin_types_compatible_p (__typeof (o), struct anv_pipeline*), \
414 VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT, \
415 __builtin_choose_expr ( \
416 __builtin_types_compatible_p (__typeof (o), struct anv_descriptor_set_layout*), \
417 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT, \
418 __builtin_choose_expr ( \
419 __builtin_types_compatible_p (__typeof (o), struct anv_sampler*), \
420 VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT, \
421 __builtin_choose_expr ( \
422 __builtin_types_compatible_p (__typeof (o), struct anv_descriptor_pool*), \
423 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT, \
424 __builtin_choose_expr ( \
425 __builtin_types_compatible_p (__typeof (o), struct anv_descriptor_set*), \
426 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, \
427 __builtin_choose_expr ( \
428 __builtin_types_compatible_p (__typeof (o), struct anv_framebuffer*), \
429 VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT, \
430 __builtin_choose_expr ( \
431 __builtin_types_compatible_p (__typeof (o), struct anv_cmd_pool*), \
432 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT, \
433 __builtin_choose_expr ( \
434 __builtin_types_compatible_p (__typeof (o), struct anv_surface*), \
435 VK_DEBUG_REPORT_OBJECT_TYPE_SURFACE_KHR_EXT, \
436 __builtin_choose_expr ( \
437 __builtin_types_compatible_p (__typeof (o), struct wsi_swapchain*), \
438 VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT, \
439 __builtin_choose_expr ( \
440 __builtin_types_compatible_p (__typeof (o), struct vk_debug_callback*), \
441 VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_CALLBACK_EXT_EXT, \
442 __builtin_choose_expr ( \
443 __builtin_types_compatible_p (__typeof (o), void*), \
444 VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, \
445 /* The void expression results in a compile-time error \
446 when assigning the result to something. */ \
447 (void)0)))))))))))))))))))))))))))))))
449 /* Whenever we generate an error, pass it through this function. Useful for
450 * debugging, where we can break on it. Only call at error site, not when
451 * propagating errors. Might be useful to plug in a stack trace here.
454 VkResult
__vk_errorv(struct anv_instance
*instance
, const void *object
,
455 VkDebugReportObjectTypeEXT type
, VkResult error
,
456 const char *file
, int line
, const char *format
,
459 VkResult
__vk_errorf(struct anv_instance
*instance
, const void *object
,
460 VkDebugReportObjectTypeEXT type
, VkResult error
,
461 const char *file
, int line
, const char *format
, ...)
462 anv_printflike(7, 8);
465 #define vk_error(error) __vk_errorf(NULL, NULL,\
466 VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,\
467 error, __FILE__, __LINE__, NULL)
468 #define vk_errorfi(instance, obj, error, format, ...)\
469 __vk_errorf(instance, obj, REPORT_OBJECT_TYPE(obj), error,\
470 __FILE__, __LINE__, format, ## __VA_ARGS__)
471 #define vk_errorf(device, obj, error, format, ...)\
472 vk_errorfi(anv_device_instance_or_null(device),\
473 obj, error, format, ## __VA_ARGS__)
475 #define vk_error(error) error
476 #define vk_errorfi(instance, obj, error, format, ...) error
477 #define vk_errorf(device, obj, error, format, ...) error
481 * Warn on ignored extension structs.
483 * The Vulkan spec requires us to ignore unsupported or unknown structs in
484 * a pNext chain. In debug mode, emitting warnings for ignored structs may
485 * help us discover structs that we should not have ignored.
488 * From the Vulkan 1.0.38 spec:
490 * Any component of the implementation (the loader, any enabled layers,
491 * and drivers) must skip over, without processing (other than reading the
492 * sType and pNext members) any chained structures with sType values not
493 * defined by extensions supported by that component.
495 #define anv_debug_ignored_stype(sType) \
496 intel_logd("%s: ignored VkStructureType %u\n", __func__, (sType))
498 void __anv_perf_warn(struct anv_device
*device
, const void *object
,
499 VkDebugReportObjectTypeEXT type
, const char *file
,
500 int line
, const char *format
, ...)
501 anv_printflike(6, 7);
502 void anv_loge(const char *format
, ...) anv_printflike(1, 2);
503 void anv_loge_v(const char *format
, va_list va
);
506 * Print a FINISHME message, including its source location.
508 #define anv_finishme(format, ...) \
510 static bool reported = false; \
512 intel_logw("%s:%d: FINISHME: " format, __FILE__, __LINE__, \
519 * Print a perf warning message. Set INTEL_DEBUG=perf to see these.
521 #define anv_perf_warn(instance, obj, format, ...) \
523 static bool reported = false; \
524 if (!reported && unlikely(INTEL_DEBUG & DEBUG_PERF)) { \
525 __anv_perf_warn(instance, obj, REPORT_OBJECT_TYPE(obj), __FILE__, __LINE__,\
526 format, ##__VA_ARGS__); \
531 /* A non-fatal assert. Useful for debugging. */
533 #define anv_assert(x) ({ \
534 if (unlikely(!(x))) \
535 intel_loge("%s:%d ASSERT: %s", __FILE__, __LINE__, #x); \
538 #define anv_assert(x)
541 /* A multi-pointer allocator
543 * When copying data structures from the user (such as a render pass), it's
544 * common to need to allocate data for a bunch of different things. Instead
545 * of doing several allocations and having to handle all of the error checking
546 * that entails, it can be easier to do a single allocation. This struct
547 * helps facilitate that. The intended usage looks like this:
550 * anv_multialloc_add(&ma, &main_ptr, 1);
551 * anv_multialloc_add(&ma, &substruct1, substruct1Count);
552 * anv_multialloc_add(&ma, &substruct2, substruct2Count);
554 * if (!anv_multialloc_alloc(&ma, pAllocator, VK_ALLOCATION_SCOPE_FOO))
555 * return vk_error(VK_ERROR_OUT_OF_HOST_MEORY);
557 struct anv_multialloc
{
565 #define ANV_MULTIALLOC_INIT \
566 ((struct anv_multialloc) { 0, })
568 #define ANV_MULTIALLOC(_name) \
569 struct anv_multialloc _name = ANV_MULTIALLOC_INIT
571 __attribute__((always_inline
))
573 _anv_multialloc_add(struct anv_multialloc
*ma
,
574 void **ptr
, size_t size
, size_t align
)
576 size_t offset
= align_u64(ma
->size
, align
);
577 ma
->size
= offset
+ size
;
578 ma
->align
= MAX2(ma
->align
, align
);
580 /* Store the offset in the pointer. */
581 *ptr
= (void *)(uintptr_t)offset
;
583 assert(ma
->ptr_count
< ARRAY_SIZE(ma
->ptrs
));
584 ma
->ptrs
[ma
->ptr_count
++] = ptr
;
587 #define anv_multialloc_add_size(_ma, _ptr, _size) \
588 _anv_multialloc_add((_ma), (void **)(_ptr), (_size), __alignof__(**(_ptr)))
590 #define anv_multialloc_add(_ma, _ptr, _count) \
591 anv_multialloc_add_size(_ma, _ptr, (_count) * sizeof(**(_ptr)));
593 __attribute__((always_inline
))
595 anv_multialloc_alloc(struct anv_multialloc
*ma
,
596 const VkAllocationCallbacks
*alloc
,
597 VkSystemAllocationScope scope
)
599 void *ptr
= vk_alloc(alloc
, ma
->size
, ma
->align
, scope
);
603 /* Fill out each of the pointers with their final value.
605 * for (uint32_t i = 0; i < ma->ptr_count; i++)
606 * *ma->ptrs[i] = ptr + (uintptr_t)*ma->ptrs[i];
608 * Unfortunately, even though ma->ptr_count is basically guaranteed to be a
609 * constant, GCC is incapable of figuring this out and unrolling the loop
610 * so we have to give it a little help.
612 STATIC_ASSERT(ARRAY_SIZE(ma
->ptrs
) == 8);
613 #define _ANV_MULTIALLOC_UPDATE_POINTER(_i) \
614 if ((_i) < ma->ptr_count) \
615 *ma->ptrs[_i] = ptr + (uintptr_t)*ma->ptrs[_i]
616 _ANV_MULTIALLOC_UPDATE_POINTER(0);
617 _ANV_MULTIALLOC_UPDATE_POINTER(1);
618 _ANV_MULTIALLOC_UPDATE_POINTER(2);
619 _ANV_MULTIALLOC_UPDATE_POINTER(3);
620 _ANV_MULTIALLOC_UPDATE_POINTER(4);
621 _ANV_MULTIALLOC_UPDATE_POINTER(5);
622 _ANV_MULTIALLOC_UPDATE_POINTER(6);
623 _ANV_MULTIALLOC_UPDATE_POINTER(7);
624 #undef _ANV_MULTIALLOC_UPDATE_POINTER
629 __attribute__((always_inline
))
631 anv_multialloc_alloc2(struct anv_multialloc
*ma
,
632 const VkAllocationCallbacks
*parent_alloc
,
633 const VkAllocationCallbacks
*alloc
,
634 VkSystemAllocationScope scope
)
636 return anv_multialloc_alloc(ma
, alloc
? alloc
: parent_alloc
, scope
);
644 /* Index into the current validation list. This is used by the
645 * validation list building alrogithm to track which buffers are already
646 * in the validation list so that we can ensure uniqueness.
650 /* Index for use with util_sparse_array_free_list */
653 /* Last known offset. This value is provided by the kernel when we
654 * execbuf and is used as the presumed offset for the next bunch of
659 /** Size of the buffer not including implicit aux */
662 /* Map for internally mapped BOs.
664 * If ANV_BO_WRAPPER is set in flags, map points to the wrapped BO.
668 /** Size of the implicit CCS range at the end of the buffer
670 * On Gen12, CCS data is always a direct 1/256 scale-down. A single 64K
671 * page of main surface data maps to a 256B chunk of CCS data and that
672 * mapping is provided on TGL-LP by the AUX table which maps virtual memory
673 * addresses in the main surface to virtual memory addresses for CCS data.
675 * Because we can't change these maps around easily and because Vulkan
676 * allows two VkImages to be bound to overlapping memory regions (as long
677 * as the app is careful), it's not feasible to make this mapping part of
678 * the image. (On Gen11 and earlier, the mapping was provided via
679 * RENDER_SURFACE_STATE so each image had its own main -> CCS mapping.)
680 * Instead, we attach the CCS data directly to the buffer object and setup
681 * the AUX table mapping at BO creation time.
683 * This field is for internal tracking use by the BO allocator only and
684 * should not be touched by other parts of the code. If something wants to
685 * know if a BO has implicit CCS data, it should instead look at the
686 * has_implicit_ccs boolean below.
688 * This data is not included in maps of this buffer.
692 /** Flags to pass to the kernel through drm_i915_exec_object2::flags */
695 /** True if this BO may be shared with other processes */
698 /** True if this BO is a wrapper
700 * When set to true, none of the fields in this BO are meaningful except
701 * for anv_bo::is_wrapper and anv_bo::map which points to the actual BO.
702 * See also anv_bo_unwrap(). Wrapper BOs are not allowed when use_softpin
703 * is set in the physical device.
707 /** See also ANV_BO_ALLOC_FIXED_ADDRESS */
708 bool has_fixed_address
:1;
710 /** True if this BO wraps a host pointer */
711 bool from_host_ptr
:1;
713 /** See also ANV_BO_ALLOC_CLIENT_VISIBLE_ADDRESS */
714 bool has_client_visible_address
:1;
716 /** True if this BO has implicit CCS data attached to it */
717 bool has_implicit_ccs
:1;
720 static inline struct anv_bo
*
721 anv_bo_ref(struct anv_bo
*bo
)
723 p_atomic_inc(&bo
->refcount
);
727 static inline struct anv_bo
*
728 anv_bo_unwrap(struct anv_bo
*bo
)
730 while (bo
->is_wrapper
)
735 /* Represents a lock-free linked list of "free" things. This is used by
736 * both the block pool and the state pools. Unfortunately, in order to
737 * solve the ABA problem, we can't use a single uint32_t head.
739 union anv_free_list
{
743 /* A simple count that is incremented every time the head changes. */
746 /* Make sure it's aligned to 64 bits. This will make atomic operations
747 * faster on 32 bit platforms.
749 uint64_t u64
__attribute__ ((aligned (8)));
752 #define ANV_FREE_LIST_EMPTY ((union anv_free_list) { { UINT32_MAX, 0 } })
754 struct anv_block_state
{
760 /* Make sure it's aligned to 64 bits. This will make atomic operations
761 * faster on 32 bit platforms.
763 uint64_t u64
__attribute__ ((aligned (8)));
767 #define anv_block_pool_foreach_bo(bo, pool) \
768 for (struct anv_bo **_pp_bo = (pool)->bos, *bo; \
769 _pp_bo != &(pool)->bos[(pool)->nbos] && (bo = *_pp_bo, true); \
772 #define ANV_MAX_BLOCK_POOL_BOS 20
774 struct anv_block_pool
{
775 struct anv_device
*device
;
778 /* Wrapper BO for use in relocation lists. This BO is simply a wrapper
779 * around the actual BO so that we grow the pool after the wrapper BO has
780 * been put in a relocation list. This is only used in the non-softpin
783 struct anv_bo wrapper_bo
;
785 struct anv_bo
*bos
[ANV_MAX_BLOCK_POOL_BOS
];
791 /* The address where the start of the pool is pinned. The various bos that
792 * are created as the pool grows will have addresses in the range
793 * [start_address, start_address + BLOCK_POOL_MEMFD_SIZE).
795 uint64_t start_address
;
797 /* The offset from the start of the bo to the "center" of the block
798 * pool. Pointers to allocated blocks are given by
799 * bo.map + center_bo_offset + offsets.
801 uint32_t center_bo_offset
;
803 /* Current memory map of the block pool. This pointer may or may not
804 * point to the actual beginning of the block pool memory. If
805 * anv_block_pool_alloc_back has ever been called, then this pointer
806 * will point to the "center" position of the buffer and all offsets
807 * (negative or positive) given out by the block pool alloc functions
808 * will be valid relative to this pointer.
810 * In particular, map == bo.map + center_offset
812 * DO NOT access this pointer directly. Use anv_block_pool_map() instead,
813 * since it will handle the softpin case as well, where this points to NULL.
819 * Array of mmaps and gem handles owned by the block pool, reclaimed when
820 * the block pool is destroyed.
822 struct u_vector mmap_cleanups
;
824 struct anv_block_state state
;
826 struct anv_block_state back_state
;
829 /* Block pools are backed by a fixed-size 1GB memfd */
830 #define BLOCK_POOL_MEMFD_SIZE (1ul << 30)
832 /* The center of the block pool is also the middle of the memfd. This may
833 * change in the future if we decide differently for some reason.
835 #define BLOCK_POOL_MEMFD_CENTER (BLOCK_POOL_MEMFD_SIZE / 2)
837 static inline uint32_t
838 anv_block_pool_size(struct anv_block_pool
*pool
)
840 return pool
->state
.end
+ pool
->back_state
.end
;
850 #define ANV_STATE_NULL ((struct anv_state) { .alloc_size = 0 })
852 struct anv_fixed_size_state_pool
{
853 union anv_free_list free_list
;
854 struct anv_block_state block
;
857 #define ANV_MIN_STATE_SIZE_LOG2 6
858 #define ANV_MAX_STATE_SIZE_LOG2 21
860 #define ANV_STATE_BUCKETS (ANV_MAX_STATE_SIZE_LOG2 - ANV_MIN_STATE_SIZE_LOG2 + 1)
862 struct anv_free_entry
{
864 struct anv_state state
;
867 struct anv_state_table
{
868 struct anv_device
*device
;
870 struct anv_free_entry
*map
;
872 struct anv_block_state state
;
873 struct u_vector cleanups
;
876 struct anv_state_pool
{
877 struct anv_block_pool block_pool
;
879 struct anv_state_table table
;
881 /* The size of blocks which will be allocated from the block pool */
884 /** Free list for "back" allocations */
885 union anv_free_list back_alloc_free_list
;
887 struct anv_fixed_size_state_pool buckets
[ANV_STATE_BUCKETS
];
890 struct anv_state_stream
{
891 struct anv_state_pool
*state_pool
;
893 /* The size of blocks to allocate from the state pool */
896 /* Current block we're allocating from */
897 struct anv_state block
;
899 /* Offset into the current block at which to allocate the next state */
902 /* List of all blocks allocated from this pool */
903 struct util_dynarray all_blocks
;
906 /* The block_pool functions exported for testing only. The block pool should
907 * only be used via a state pool (see below).
909 VkResult
anv_block_pool_init(struct anv_block_pool
*pool
,
910 struct anv_device
*device
,
911 uint64_t start_address
,
912 uint32_t initial_size
);
913 void anv_block_pool_finish(struct anv_block_pool
*pool
);
914 int32_t anv_block_pool_alloc(struct anv_block_pool
*pool
,
915 uint32_t block_size
, uint32_t *padding
);
916 int32_t anv_block_pool_alloc_back(struct anv_block_pool
*pool
,
917 uint32_t block_size
);
918 void* anv_block_pool_map(struct anv_block_pool
*pool
, int32_t offset
, uint32_t
921 VkResult
anv_state_pool_init(struct anv_state_pool
*pool
,
922 struct anv_device
*device
,
923 uint64_t start_address
,
924 uint32_t block_size
);
925 void anv_state_pool_finish(struct anv_state_pool
*pool
);
926 struct anv_state
anv_state_pool_alloc(struct anv_state_pool
*pool
,
927 uint32_t state_size
, uint32_t alignment
);
928 struct anv_state
anv_state_pool_alloc_back(struct anv_state_pool
*pool
);
929 void anv_state_pool_free(struct anv_state_pool
*pool
, struct anv_state state
);
930 void anv_state_stream_init(struct anv_state_stream
*stream
,
931 struct anv_state_pool
*state_pool
,
932 uint32_t block_size
);
933 void anv_state_stream_finish(struct anv_state_stream
*stream
);
934 struct anv_state
anv_state_stream_alloc(struct anv_state_stream
*stream
,
935 uint32_t size
, uint32_t alignment
);
937 VkResult
anv_state_table_init(struct anv_state_table
*table
,
938 struct anv_device
*device
,
939 uint32_t initial_entries
);
940 void anv_state_table_finish(struct anv_state_table
*table
);
941 VkResult
anv_state_table_add(struct anv_state_table
*table
, uint32_t *idx
,
943 void anv_free_list_push(union anv_free_list
*list
,
944 struct anv_state_table
*table
,
945 uint32_t idx
, uint32_t count
);
946 struct anv_state
* anv_free_list_pop(union anv_free_list
*list
,
947 struct anv_state_table
*table
);
950 static inline struct anv_state
*
951 anv_state_table_get(struct anv_state_table
*table
, uint32_t idx
)
953 return &table
->map
[idx
].state
;
956 * Implements a pool of re-usable BOs. The interface is identical to that
957 * of block_pool except that each block is its own BO.
960 struct anv_device
*device
;
962 struct util_sparse_array_free_list free_list
[16];
965 void anv_bo_pool_init(struct anv_bo_pool
*pool
, struct anv_device
*device
);
966 void anv_bo_pool_finish(struct anv_bo_pool
*pool
);
967 VkResult
anv_bo_pool_alloc(struct anv_bo_pool
*pool
, uint32_t size
,
968 struct anv_bo
**bo_out
);
969 void anv_bo_pool_free(struct anv_bo_pool
*pool
, struct anv_bo
*bo
);
971 struct anv_scratch_pool
{
972 /* Indexed by Per-Thread Scratch Space number (the hardware value) and stage */
973 struct anv_bo
*bos
[16][MESA_SHADER_STAGES
];
976 void anv_scratch_pool_init(struct anv_device
*device
,
977 struct anv_scratch_pool
*pool
);
978 void anv_scratch_pool_finish(struct anv_device
*device
,
979 struct anv_scratch_pool
*pool
);
980 struct anv_bo
*anv_scratch_pool_alloc(struct anv_device
*device
,
981 struct anv_scratch_pool
*pool
,
982 gl_shader_stage stage
,
983 unsigned per_thread_scratch
);
985 /** Implements a BO cache that ensures a 1-1 mapping of GEM BOs to anv_bos */
986 struct anv_bo_cache
{
987 struct util_sparse_array bo_map
;
988 pthread_mutex_t mutex
;
991 VkResult
anv_bo_cache_init(struct anv_bo_cache
*cache
);
992 void anv_bo_cache_finish(struct anv_bo_cache
*cache
);
994 struct anv_memory_type
{
995 /* Standard bits passed on to the client */
996 VkMemoryPropertyFlags propertyFlags
;
1000 struct anv_memory_heap
{
1001 /* Standard bits passed on to the client */
1003 VkMemoryHeapFlags flags
;
1005 /* Driver-internal book-keeping */
1009 struct anv_physical_device
{
1010 struct vk_object_base base
;
1012 /* Link in anv_instance::physical_devices */
1013 struct list_head link
;
1015 struct anv_instance
* instance
;
1025 struct gen_device_info info
;
1026 /** Amount of "GPU memory" we want to advertise
1028 * Clearly, this value is bogus since Intel is a UMA architecture. On
1029 * gen7 platforms, we are limited by GTT size unless we want to implement
1030 * fine-grained tracking and GTT splitting. On Broadwell and above we are
1031 * practically unlimited. However, we will never report more than 3/4 of
1032 * the total system ram to try and avoid running out of RAM.
1034 bool supports_48bit_addresses
;
1035 struct brw_compiler
* compiler
;
1036 struct isl_device isl_dev
;
1037 struct gen_perf_config
* perf
;
1038 int cmd_parser_version
;
1040 bool has_exec_async
;
1041 bool has_exec_capture
;
1042 bool has_exec_fence
;
1044 bool has_syncobj_wait
;
1045 bool has_context_priority
;
1046 bool has_context_isolation
;
1047 bool has_mem_available
;
1048 bool has_mmap_offset
;
1052 bool always_use_bindless
;
1054 /** True if we can access buffers using A64 messages */
1055 bool has_a64_buffer_access
;
1056 /** True if we can use bindless access for images */
1057 bool has_bindless_images
;
1058 /** True if we can use bindless access for samplers */
1059 bool has_bindless_samplers
;
1061 /** True if this device has implicit AUX
1063 * If true, CCS is handled as an implicit attachment to the BO rather than
1064 * as an explicitly bound surface.
1066 bool has_implicit_ccs
;
1068 bool always_flush_cache
;
1070 struct anv_device_extension_table supported_extensions
;
1073 uint32_t subslice_total
;
1076 uint32_t type_count
;
1077 struct anv_memory_type types
[VK_MAX_MEMORY_TYPES
];
1078 uint32_t heap_count
;
1079 struct anv_memory_heap heaps
[VK_MAX_MEMORY_HEAPS
];
1082 uint8_t driver_build_sha1
[20];
1083 uint8_t pipeline_cache_uuid
[VK_UUID_SIZE
];
1084 uint8_t driver_uuid
[VK_UUID_SIZE
];
1085 uint8_t device_uuid
[VK_UUID_SIZE
];
1087 struct disk_cache
* disk_cache
;
1089 struct wsi_device wsi_device
;
1094 struct anv_app_info
{
1095 const char* app_name
;
1096 uint32_t app_version
;
1097 const char* engine_name
;
1098 uint32_t engine_version
;
1099 uint32_t api_version
;
1102 struct anv_instance
{
1103 struct vk_object_base base
;
1105 VkAllocationCallbacks alloc
;
1107 struct anv_app_info app_info
;
1109 struct anv_instance_extension_table enabled_extensions
;
1110 struct anv_instance_dispatch_table dispatch
;
1111 struct anv_physical_device_dispatch_table physical_device_dispatch
;
1112 struct anv_device_dispatch_table device_dispatch
;
1114 bool physical_devices_enumerated
;
1115 struct list_head physical_devices
;
1117 bool pipeline_cache_enabled
;
1119 struct vk_debug_report_instance debug_report_callbacks
;
1121 struct driOptionCache dri_options
;
1122 struct driOptionCache available_dri_options
;
1125 VkResult
anv_init_wsi(struct anv_physical_device
*physical_device
);
1126 void anv_finish_wsi(struct anv_physical_device
*physical_device
);
1128 uint32_t anv_physical_device_api_version(struct anv_physical_device
*dev
);
1129 bool anv_physical_device_extension_supported(struct anv_physical_device
*dev
,
1132 struct anv_queue_submit
{
1133 struct anv_cmd_buffer
* cmd_buffer
;
1135 uint32_t fence_count
;
1136 uint32_t fence_array_length
;
1137 struct drm_i915_gem_exec_fence
* fences
;
1139 uint32_t temporary_semaphore_count
;
1140 uint32_t temporary_semaphore_array_length
;
1141 struct anv_semaphore_impl
* temporary_semaphores
;
1143 /* Semaphores to be signaled with a SYNC_FD. */
1144 struct anv_semaphore
** sync_fd_semaphores
;
1145 uint32_t sync_fd_semaphore_count
;
1146 uint32_t sync_fd_semaphore_array_length
;
1148 /* Allocated only with non shareable timelines. */
1149 struct anv_timeline
** wait_timelines
;
1150 uint32_t wait_timeline_count
;
1151 uint32_t wait_timeline_array_length
;
1152 uint64_t * wait_timeline_values
;
1154 struct anv_timeline
** signal_timelines
;
1155 uint32_t signal_timeline_count
;
1156 uint32_t signal_timeline_array_length
;
1157 uint64_t * signal_timeline_values
;
1160 bool need_out_fence
;
1163 uint32_t fence_bo_count
;
1164 uint32_t fence_bo_array_length
;
1165 /* An array of struct anv_bo pointers with lower bit used as a flag to
1166 * signal we will wait on that BO (see anv_(un)pack_ptr).
1168 uintptr_t * fence_bos
;
1170 const VkAllocationCallbacks
* alloc
;
1171 VkSystemAllocationScope alloc_scope
;
1173 struct anv_bo
* simple_bo
;
1174 uint32_t simple_bo_size
;
1176 struct list_head link
;
1180 struct vk_object_base base
;
1182 struct anv_device
* device
;
1185 * A list of struct anv_queue_submit to be submitted to i915.
1187 struct list_head queued_submits
;
1189 VkDeviceQueueCreateFlags flags
;
1192 struct anv_pipeline_cache
{
1193 struct vk_object_base base
;
1194 struct anv_device
* device
;
1195 pthread_mutex_t mutex
;
1197 struct hash_table
* nir_cache
;
1199 struct hash_table
* cache
;
1202 struct nir_xfb_info
;
1203 struct anv_pipeline_bind_map
;
1205 void anv_pipeline_cache_init(struct anv_pipeline_cache
*cache
,
1206 struct anv_device
*device
,
1207 bool cache_enabled
);
1208 void anv_pipeline_cache_finish(struct anv_pipeline_cache
*cache
);
1210 struct anv_shader_bin
*
1211 anv_pipeline_cache_search(struct anv_pipeline_cache
*cache
,
1212 const void *key
, uint32_t key_size
);
1213 struct anv_shader_bin
*
1214 anv_pipeline_cache_upload_kernel(struct anv_pipeline_cache
*cache
,
1215 gl_shader_stage stage
,
1216 const void *key_data
, uint32_t key_size
,
1217 const void *kernel_data
, uint32_t kernel_size
,
1218 const void *constant_data
,
1219 uint32_t constant_data_size
,
1220 const struct brw_stage_prog_data
*prog_data
,
1221 uint32_t prog_data_size
,
1222 const struct brw_compile_stats
*stats
,
1224 const struct nir_xfb_info
*xfb_info
,
1225 const struct anv_pipeline_bind_map
*bind_map
);
1227 struct anv_shader_bin
*
1228 anv_device_search_for_kernel(struct anv_device
*device
,
1229 struct anv_pipeline_cache
*cache
,
1230 const void *key_data
, uint32_t key_size
,
1231 bool *user_cache_bit
);
1233 struct anv_shader_bin
*
1234 anv_device_upload_kernel(struct anv_device
*device
,
1235 struct anv_pipeline_cache
*cache
,
1236 gl_shader_stage stage
,
1237 const void *key_data
, uint32_t key_size
,
1238 const void *kernel_data
, uint32_t kernel_size
,
1239 const void *constant_data
,
1240 uint32_t constant_data_size
,
1241 const struct brw_stage_prog_data
*prog_data
,
1242 uint32_t prog_data_size
,
1243 const struct brw_compile_stats
*stats
,
1245 const struct nir_xfb_info
*xfb_info
,
1246 const struct anv_pipeline_bind_map
*bind_map
);
1249 struct nir_shader_compiler_options
;
1252 anv_device_search_for_nir(struct anv_device
*device
,
1253 struct anv_pipeline_cache
*cache
,
1254 const struct nir_shader_compiler_options
*nir_options
,
1255 unsigned char sha1_key
[20],
1259 anv_device_upload_nir(struct anv_device
*device
,
1260 struct anv_pipeline_cache
*cache
,
1261 const struct nir_shader
*nir
,
1262 unsigned char sha1_key
[20]);
1265 struct vk_device vk
;
1267 struct anv_physical_device
* physical
;
1269 struct gen_device_info info
;
1270 struct isl_device isl_dev
;
1273 bool can_chain_batches
;
1274 bool robust_buffer_access
;
1275 struct anv_device_extension_table enabled_extensions
;
1276 struct anv_device_dispatch_table dispatch
;
1278 pthread_mutex_t vma_mutex
;
1279 struct util_vma_heap vma_lo
;
1280 struct util_vma_heap vma_cva
;
1281 struct util_vma_heap vma_hi
;
1283 /** List of all anv_device_memory objects */
1284 struct list_head memory_objects
;
1286 struct anv_bo_pool batch_bo_pool
;
1288 struct anv_bo_cache bo_cache
;
1290 struct anv_state_pool dynamic_state_pool
;
1291 struct anv_state_pool instruction_state_pool
;
1292 struct anv_state_pool binding_table_pool
;
1293 struct anv_state_pool surface_state_pool
;
1295 /** BO used for various workarounds
1297 * There are a number of workarounds on our hardware which require writing
1298 * data somewhere and it doesn't really matter where. For that, we use
1299 * this BO and just write to the first dword or so.
1301 * We also need to be able to handle NULL buffers bound as pushed UBOs.
1302 * For that, we use the high bytes (>= 1024) of the workaround BO.
1304 struct anv_bo
* workaround_bo
;
1305 struct anv_bo
* trivial_batch_bo
;
1306 struct anv_bo
* hiz_clear_bo
;
1307 struct anv_state null_surface_state
;
1309 struct anv_pipeline_cache default_pipeline_cache
;
1310 struct blorp_context blorp
;
1312 struct anv_state border_colors
;
1314 struct anv_state slice_hash
;
1316 struct anv_queue queue
;
1318 struct anv_scratch_pool scratch_pool
;
1320 pthread_mutex_t mutex
;
1321 pthread_cond_t queue_submit
;
1324 struct gen_batch_decode_ctx decoder_ctx
;
1326 * When decoding a anv_cmd_buffer, we might need to search for BOs through
1327 * the cmd_buffer's list.
1329 struct anv_cmd_buffer
*cmd_buffer_being_decoded
;
1331 int perf_fd
; /* -1 if no opened */
1332 uint64_t perf_metric
; /* 0 if unset */
1334 struct gen_aux_map_context
*aux_map_ctx
;
1337 static inline struct anv_instance
*
1338 anv_device_instance_or_null(const struct anv_device
*device
)
1340 return device
? device
->physical
->instance
: NULL
;
1343 static inline struct anv_state_pool
*
1344 anv_binding_table_pool(struct anv_device
*device
)
1346 if (device
->physical
->use_softpin
)
1347 return &device
->binding_table_pool
;
1349 return &device
->surface_state_pool
;
1352 static inline struct anv_state
1353 anv_binding_table_pool_alloc(struct anv_device
*device
) {
1354 if (device
->physical
->use_softpin
)
1355 return anv_state_pool_alloc(&device
->binding_table_pool
,
1356 device
->binding_table_pool
.block_size
, 0);
1358 return anv_state_pool_alloc_back(&device
->surface_state_pool
);
1362 anv_binding_table_pool_free(struct anv_device
*device
, struct anv_state state
) {
1363 anv_state_pool_free(anv_binding_table_pool(device
), state
);
1366 static inline uint32_t
1367 anv_mocs_for_bo(const struct anv_device
*device
, const struct anv_bo
*bo
)
1369 if (bo
->is_external
)
1370 return device
->isl_dev
.mocs
.external
;
1372 return device
->isl_dev
.mocs
.internal
;
1375 void anv_device_init_blorp(struct anv_device
*device
);
1376 void anv_device_finish_blorp(struct anv_device
*device
);
1378 void _anv_device_set_all_queue_lost(struct anv_device
*device
);
1379 VkResult
_anv_device_set_lost(struct anv_device
*device
,
1380 const char *file
, int line
,
1381 const char *msg
, ...)
1382 anv_printflike(4, 5);
1383 VkResult
_anv_queue_set_lost(struct anv_queue
*queue
,
1384 const char *file
, int line
,
1385 const char *msg
, ...)
1386 anv_printflike(4, 5);
1387 #define anv_device_set_lost(dev, ...) \
1388 _anv_device_set_lost(dev, __FILE__, __LINE__, __VA_ARGS__)
1389 #define anv_queue_set_lost(queue, ...) \
1390 _anv_queue_set_lost(queue, __FILE__, __LINE__, __VA_ARGS__)
1393 anv_device_is_lost(struct anv_device
*device
)
1395 return unlikely(p_atomic_read(&device
->_lost
));
1398 VkResult
anv_device_query_status(struct anv_device
*device
);
1401 enum anv_bo_alloc_flags
{
1402 /** Specifies that the BO must have a 32-bit address
1404 * This is the opposite of EXEC_OBJECT_SUPPORTS_48B_ADDRESS.
1406 ANV_BO_ALLOC_32BIT_ADDRESS
= (1 << 0),
1408 /** Specifies that the BO may be shared externally */
1409 ANV_BO_ALLOC_EXTERNAL
= (1 << 1),
1411 /** Specifies that the BO should be mapped */
1412 ANV_BO_ALLOC_MAPPED
= (1 << 2),
1414 /** Specifies that the BO should be snooped so we get coherency */
1415 ANV_BO_ALLOC_SNOOPED
= (1 << 3),
1417 /** Specifies that the BO should be captured in error states */
1418 ANV_BO_ALLOC_CAPTURE
= (1 << 4),
1420 /** Specifies that the BO will have an address assigned by the caller
1422 * Such BOs do not exist in any VMA heap.
1424 ANV_BO_ALLOC_FIXED_ADDRESS
= (1 << 5),
1426 /** Enables implicit synchronization on the BO
1428 * This is the opposite of EXEC_OBJECT_ASYNC.
1430 ANV_BO_ALLOC_IMPLICIT_SYNC
= (1 << 6),
1432 /** Enables implicit synchronization on the BO
1434 * This is equivalent to EXEC_OBJECT_WRITE.
1436 ANV_BO_ALLOC_IMPLICIT_WRITE
= (1 << 7),
1438 /** Has an address which is visible to the client */
1439 ANV_BO_ALLOC_CLIENT_VISIBLE_ADDRESS
= (1 << 8),
1441 /** This buffer has implicit CCS data attached to it */
1442 ANV_BO_ALLOC_IMPLICIT_CCS
= (1 << 9),
1445 VkResult
anv_device_alloc_bo(struct anv_device
*device
, uint64_t size
,
1446 enum anv_bo_alloc_flags alloc_flags
,
1447 uint64_t explicit_address
,
1448 struct anv_bo
**bo
);
1449 VkResult
anv_device_import_bo_from_host_ptr(struct anv_device
*device
,
1450 void *host_ptr
, uint32_t size
,
1451 enum anv_bo_alloc_flags alloc_flags
,
1452 uint64_t client_address
,
1453 struct anv_bo
**bo_out
);
1454 VkResult
anv_device_import_bo(struct anv_device
*device
, int fd
,
1455 enum anv_bo_alloc_flags alloc_flags
,
1456 uint64_t client_address
,
1457 struct anv_bo
**bo
);
1458 VkResult
anv_device_export_bo(struct anv_device
*device
,
1459 struct anv_bo
*bo
, int *fd_out
);
1460 void anv_device_release_bo(struct anv_device
*device
,
1463 static inline struct anv_bo
*
1464 anv_device_lookup_bo(struct anv_device
*device
, uint32_t gem_handle
)
1466 return util_sparse_array_get(&device
->bo_cache
.bo_map
, gem_handle
);
1469 VkResult
anv_device_bo_busy(struct anv_device
*device
, struct anv_bo
*bo
);
1470 VkResult
anv_device_wait(struct anv_device
*device
, struct anv_bo
*bo
,
1473 VkResult
anv_queue_init(struct anv_device
*device
, struct anv_queue
*queue
);
1474 void anv_queue_finish(struct anv_queue
*queue
);
1476 VkResult
anv_queue_execbuf_locked(struct anv_queue
*queue
, struct anv_queue_submit
*submit
);
1477 VkResult
anv_queue_submit_simple_batch(struct anv_queue
*queue
,
1478 struct anv_batch
*batch
);
1480 uint64_t anv_gettime_ns(void);
1481 uint64_t anv_get_absolute_timeout(uint64_t timeout
);
1483 void* anv_gem_mmap(struct anv_device
*device
,
1484 uint32_t gem_handle
, uint64_t offset
, uint64_t size
, uint32_t flags
);
1485 void anv_gem_munmap(struct anv_device
*device
, void *p
, uint64_t size
);
1486 uint32_t anv_gem_create(struct anv_device
*device
, uint64_t size
);
1487 void anv_gem_close(struct anv_device
*device
, uint32_t gem_handle
);
1488 uint32_t anv_gem_userptr(struct anv_device
*device
, void *mem
, size_t size
);
1489 int anv_gem_busy(struct anv_device
*device
, uint32_t gem_handle
);
1490 int anv_gem_wait(struct anv_device
*device
, uint32_t gem_handle
, int64_t *timeout_ns
);
1491 int anv_gem_execbuffer(struct anv_device
*device
,
1492 struct drm_i915_gem_execbuffer2
*execbuf
);
1493 int anv_gem_set_tiling(struct anv_device
*device
, uint32_t gem_handle
,
1494 uint32_t stride
, uint32_t tiling
);
1495 int anv_gem_create_context(struct anv_device
*device
);
1496 bool anv_gem_has_context_priority(int fd
);
1497 int anv_gem_destroy_context(struct anv_device
*device
, int context
);
1498 int anv_gem_set_context_param(int fd
, int context
, uint32_t param
,
1500 int anv_gem_get_context_param(int fd
, int context
, uint32_t param
,
1502 int anv_gem_get_param(int fd
, uint32_t param
);
1503 int anv_gem_get_tiling(struct anv_device
*device
, uint32_t gem_handle
);
1504 bool anv_gem_get_bit6_swizzle(int fd
, uint32_t tiling
);
1505 int anv_gem_get_aperture(int fd
, uint64_t *size
);
1506 int anv_gem_gpu_get_reset_stats(struct anv_device
*device
,
1507 uint32_t *active
, uint32_t *pending
);
1508 int anv_gem_handle_to_fd(struct anv_device
*device
, uint32_t gem_handle
);
1509 int anv_gem_reg_read(struct anv_device
*device
,
1510 uint32_t offset
, uint64_t *result
);
1511 uint32_t anv_gem_fd_to_handle(struct anv_device
*device
, int fd
);
1512 int anv_gem_set_caching(struct anv_device
*device
, uint32_t gem_handle
, uint32_t caching
);
1513 int anv_gem_set_domain(struct anv_device
*device
, uint32_t gem_handle
,
1514 uint32_t read_domains
, uint32_t write_domain
);
1515 int anv_gem_sync_file_merge(struct anv_device
*device
, int fd1
, int fd2
);
1516 uint32_t anv_gem_syncobj_create(struct anv_device
*device
, uint32_t flags
);
1517 void anv_gem_syncobj_destroy(struct anv_device
*device
, uint32_t handle
);
1518 int anv_gem_syncobj_handle_to_fd(struct anv_device
*device
, uint32_t handle
);
1519 uint32_t anv_gem_syncobj_fd_to_handle(struct anv_device
*device
, int fd
);
1520 int anv_gem_syncobj_export_sync_file(struct anv_device
*device
,
1522 int anv_gem_syncobj_import_sync_file(struct anv_device
*device
,
1523 uint32_t handle
, int fd
);
1524 void anv_gem_syncobj_reset(struct anv_device
*device
, uint32_t handle
);
1525 bool anv_gem_supports_syncobj_wait(int fd
);
1526 int anv_gem_syncobj_wait(struct anv_device
*device
,
1527 uint32_t *handles
, uint32_t num_handles
,
1528 int64_t abs_timeout_ns
, bool wait_all
);
1530 uint64_t anv_vma_alloc(struct anv_device
*device
,
1531 uint64_t size
, uint64_t align
,
1532 enum anv_bo_alloc_flags alloc_flags
,
1533 uint64_t client_address
);
1534 void anv_vma_free(struct anv_device
*device
,
1535 uint64_t address
, uint64_t size
);
1537 struct anv_reloc_list
{
1538 uint32_t num_relocs
;
1539 uint32_t array_length
;
1540 struct drm_i915_gem_relocation_entry
* relocs
;
1541 struct anv_bo
** reloc_bos
;
1546 VkResult
anv_reloc_list_init(struct anv_reloc_list
*list
,
1547 const VkAllocationCallbacks
*alloc
);
1548 void anv_reloc_list_finish(struct anv_reloc_list
*list
,
1549 const VkAllocationCallbacks
*alloc
);
1551 VkResult
anv_reloc_list_add(struct anv_reloc_list
*list
,
1552 const VkAllocationCallbacks
*alloc
,
1553 uint32_t offset
, struct anv_bo
*target_bo
,
1554 uint32_t delta
, uint64_t *address_u64_out
);
1556 struct anv_batch_bo
{
1557 /* Link in the anv_cmd_buffer.owned_batch_bos list */
1558 struct list_head link
;
1562 /* Bytes actually consumed in this batch BO */
1565 struct anv_reloc_list relocs
;
1569 const VkAllocationCallbacks
* alloc
;
1575 struct anv_reloc_list
* relocs
;
1577 /* This callback is called (with the associated user data) in the event
1578 * that the batch runs out of space.
1580 VkResult (*extend_cb
)(struct anv_batch
*, void *);
1584 * Current error status of the command buffer. Used to track inconsistent
1585 * or incomplete command buffer states that are the consequence of run-time
1586 * errors such as out of memory scenarios. We want to track this in the
1587 * batch because the command buffer object is not visible to some parts
1593 void *anv_batch_emit_dwords(struct anv_batch
*batch
, int num_dwords
);
1594 void anv_batch_emit_batch(struct anv_batch
*batch
, struct anv_batch
*other
);
1595 uint64_t anv_batch_emit_reloc(struct anv_batch
*batch
,
1596 void *location
, struct anv_bo
*bo
, uint32_t offset
);
1598 static inline VkResult
1599 anv_batch_set_error(struct anv_batch
*batch
, VkResult error
)
1601 assert(error
!= VK_SUCCESS
);
1602 if (batch
->status
== VK_SUCCESS
)
1603 batch
->status
= error
;
1604 return batch
->status
;
1608 anv_batch_has_error(struct anv_batch
*batch
)
1610 return batch
->status
!= VK_SUCCESS
;
1613 struct anv_address
{
1618 #define ANV_NULL_ADDRESS ((struct anv_address) { NULL, 0 })
1621 anv_address_is_null(struct anv_address addr
)
1623 return addr
.bo
== NULL
&& addr
.offset
== 0;
1626 static inline uint64_t
1627 anv_address_physical(struct anv_address addr
)
1629 if (addr
.bo
&& (addr
.bo
->flags
& EXEC_OBJECT_PINNED
))
1630 return gen_canonical_address(addr
.bo
->offset
+ addr
.offset
);
1632 return gen_canonical_address(addr
.offset
);
1635 static inline struct anv_address
1636 anv_address_add(struct anv_address addr
, uint64_t offset
)
1638 addr
.offset
+= offset
;
1643 write_reloc(const struct anv_device
*device
, void *p
, uint64_t v
, bool flush
)
1645 unsigned reloc_size
= 0;
1646 if (device
->info
.gen
>= 8) {
1647 reloc_size
= sizeof(uint64_t);
1648 *(uint64_t *)p
= gen_canonical_address(v
);
1650 reloc_size
= sizeof(uint32_t);
1654 if (flush
&& !device
->info
.has_llc
)
1655 gen_flush_range(p
, reloc_size
);
1658 static inline uint64_t
1659 _anv_combine_address(struct anv_batch
*batch
, void *location
,
1660 const struct anv_address address
, uint32_t delta
)
1662 if (address
.bo
== NULL
) {
1663 return address
.offset
+ delta
;
1665 assert(batch
->start
<= location
&& location
< batch
->end
);
1667 return anv_batch_emit_reloc(batch
, location
, address
.bo
, address
.offset
+ delta
);
1671 #define __gen_address_type struct anv_address
1672 #define __gen_user_data struct anv_batch
1673 #define __gen_combine_address _anv_combine_address
1675 /* Wrapper macros needed to work around preprocessor argument issues. In
1676 * particular, arguments don't get pre-evaluated if they are concatenated.
1677 * This means that, if you pass GENX(3DSTATE_PS) into the emit macro, the
1678 * GENX macro won't get evaluated if the emit macro contains "cmd ## foo".
1679 * We can work around this easily enough with these helpers.
1681 #define __anv_cmd_length(cmd) cmd ## _length
1682 #define __anv_cmd_length_bias(cmd) cmd ## _length_bias
1683 #define __anv_cmd_header(cmd) cmd ## _header
1684 #define __anv_cmd_pack(cmd) cmd ## _pack
1685 #define __anv_reg_num(reg) reg ## _num
1687 #define anv_pack_struct(dst, struc, ...) do { \
1688 struct struc __template = { \
1691 __anv_cmd_pack(struc)(NULL, dst, &__template); \
1692 VG(VALGRIND_CHECK_MEM_IS_DEFINED(dst, __anv_cmd_length(struc) * 4)); \
1695 #define anv_batch_emitn(batch, n, cmd, ...) ({ \
1696 void *__dst = anv_batch_emit_dwords(batch, n); \
1698 struct cmd __template = { \
1699 __anv_cmd_header(cmd), \
1700 .DWordLength = n - __anv_cmd_length_bias(cmd), \
1703 __anv_cmd_pack(cmd)(batch, __dst, &__template); \
1708 #define anv_batch_emit_merge(batch, dwords0, dwords1) \
1712 STATIC_ASSERT(ARRAY_SIZE(dwords0) == ARRAY_SIZE(dwords1)); \
1713 dw = anv_batch_emit_dwords((batch), ARRAY_SIZE(dwords0)); \
1716 for (uint32_t i = 0; i < ARRAY_SIZE(dwords0); i++) \
1717 dw[i] = (dwords0)[i] | (dwords1)[i]; \
1718 VG(VALGRIND_CHECK_MEM_IS_DEFINED(dw, ARRAY_SIZE(dwords0) * 4));\
1721 #define anv_batch_emit(batch, cmd, name) \
1722 for (struct cmd name = { __anv_cmd_header(cmd) }, \
1723 *_dst = anv_batch_emit_dwords(batch, __anv_cmd_length(cmd)); \
1724 __builtin_expect(_dst != NULL, 1); \
1725 ({ __anv_cmd_pack(cmd)(batch, _dst, &name); \
1726 VG(VALGRIND_CHECK_MEM_IS_DEFINED(_dst, __anv_cmd_length(cmd) * 4)); \
1730 struct anv_device_memory
{
1731 struct vk_object_base base
;
1733 struct list_head link
;
1736 struct anv_memory_type
* type
;
1737 VkDeviceSize map_size
;
1740 /* If set, we are holding reference to AHardwareBuffer
1741 * which we must release when memory is freed.
1743 struct AHardwareBuffer
* ahw
;
1745 /* If set, this memory comes from a host pointer. */
1750 * Header for Vertex URB Entry (VUE)
1752 struct anv_vue_header
{
1754 uint32_t RTAIndex
; /* RenderTargetArrayIndex */
1755 uint32_t ViewportIndex
;
1759 /** Struct representing a sampled image descriptor
1761 * This descriptor layout is used for sampled images, bare sampler, and
1762 * combined image/sampler descriptors.
1764 struct anv_sampled_image_descriptor
{
1765 /** Bindless image handle
1767 * This is expected to already be shifted such that the 20-bit
1768 * SURFACE_STATE table index is in the top 20 bits.
1772 /** Bindless sampler handle
1774 * This is assumed to be a 32B-aligned SAMPLER_STATE pointer relative
1775 * to the dynamic state base address.
1780 struct anv_texture_swizzle_descriptor
{
1783 * See also nir_intrinsic_channel_select_intel
1787 /** Unused padding to ensure the struct is a multiple of 64 bits */
1791 /** Struct representing a storage image descriptor */
1792 struct anv_storage_image_descriptor
{
1793 /** Bindless image handles
1795 * These are expected to already be shifted such that the 20-bit
1796 * SURFACE_STATE table index is in the top 20 bits.
1798 uint32_t read_write
;
1799 uint32_t write_only
;
1802 /** Struct representing a address/range descriptor
1804 * The fields of this struct correspond directly to the data layout of
1805 * nir_address_format_64bit_bounded_global addresses. The last field is the
1806 * offset in the NIR address so it must be zero so that when you load the
1807 * descriptor you get a pointer to the start of the range.
1809 struct anv_address_range_descriptor
{
1815 enum anv_descriptor_data
{
1816 /** The descriptor contains a BTI reference to a surface state */
1817 ANV_DESCRIPTOR_SURFACE_STATE
= (1 << 0),
1818 /** The descriptor contains a BTI reference to a sampler state */
1819 ANV_DESCRIPTOR_SAMPLER_STATE
= (1 << 1),
1820 /** The descriptor contains an actual buffer view */
1821 ANV_DESCRIPTOR_BUFFER_VIEW
= (1 << 2),
1822 /** The descriptor contains auxiliary image layout data */
1823 ANV_DESCRIPTOR_IMAGE_PARAM
= (1 << 3),
1824 /** The descriptor contains auxiliary image layout data */
1825 ANV_DESCRIPTOR_INLINE_UNIFORM
= (1 << 4),
1826 /** anv_address_range_descriptor with a buffer address and range */
1827 ANV_DESCRIPTOR_ADDRESS_RANGE
= (1 << 5),
1828 /** Bindless surface handle */
1829 ANV_DESCRIPTOR_SAMPLED_IMAGE
= (1 << 6),
1830 /** Storage image handles */
1831 ANV_DESCRIPTOR_STORAGE_IMAGE
= (1 << 7),
1832 /** Storage image handles */
1833 ANV_DESCRIPTOR_TEXTURE_SWIZZLE
= (1 << 8),
1836 struct anv_descriptor_set_binding_layout
{
1838 /* The type of the descriptors in this binding */
1839 VkDescriptorType type
;
1842 /* Flags provided when this binding was created */
1843 VkDescriptorBindingFlagsEXT flags
;
1845 /* Bitfield representing the type of data this descriptor contains */
1846 enum anv_descriptor_data data
;
1848 /* Maximum number of YCbCr texture/sampler planes */
1849 uint8_t max_plane_count
;
1851 /* Number of array elements in this binding (or size in bytes for inline
1854 uint16_t array_size
;
1856 /* Index into the flattend descriptor set */
1857 uint16_t descriptor_index
;
1859 /* Index into the dynamic state array for a dynamic buffer */
1860 int16_t dynamic_offset_index
;
1862 /* Index into the descriptor set buffer views */
1863 int16_t buffer_view_index
;
1865 /* Offset into the descriptor buffer where this descriptor lives */
1866 uint32_t descriptor_offset
;
1868 /* Immutable samplers (or NULL if no immutable samplers) */
1869 struct anv_sampler
**immutable_samplers
;
1872 unsigned anv_descriptor_size(const struct anv_descriptor_set_binding_layout
*layout
);
1874 unsigned anv_descriptor_type_size(const struct anv_physical_device
*pdevice
,
1875 VkDescriptorType type
);
1877 bool anv_descriptor_supports_bindless(const struct anv_physical_device
*pdevice
,
1878 const struct anv_descriptor_set_binding_layout
*binding
,
1881 bool anv_descriptor_requires_bindless(const struct anv_physical_device
*pdevice
,
1882 const struct anv_descriptor_set_binding_layout
*binding
,
1885 struct anv_descriptor_set_layout
{
1886 struct vk_object_base base
;
1888 /* Descriptor set layouts can be destroyed at almost any time */
1891 /* Number of bindings in this descriptor set */
1892 uint16_t binding_count
;
1894 /* Total size of the descriptor set with room for all array entries */
1897 /* Shader stages affected by this descriptor set */
1898 uint16_t shader_stages
;
1900 /* Number of buffer views in this descriptor set */
1901 uint16_t buffer_view_count
;
1903 /* Number of dynamic offsets used by this descriptor set */
1904 uint16_t dynamic_offset_count
;
1906 /* For each shader stage, which offsets apply to that stage */
1907 uint16_t stage_dynamic_offsets
[MESA_SHADER_STAGES
];
1909 /* Size of the descriptor buffer for this descriptor set */
1910 uint32_t descriptor_buffer_size
;
1912 /* Bindings in this descriptor set */
1913 struct anv_descriptor_set_binding_layout binding
[0];
1916 void anv_descriptor_set_layout_destroy(struct anv_device
*device
,
1917 struct anv_descriptor_set_layout
*layout
);
1920 anv_descriptor_set_layout_ref(struct anv_descriptor_set_layout
*layout
)
1922 assert(layout
&& layout
->ref_cnt
>= 1);
1923 p_atomic_inc(&layout
->ref_cnt
);
1927 anv_descriptor_set_layout_unref(struct anv_device
*device
,
1928 struct anv_descriptor_set_layout
*layout
)
1930 assert(layout
&& layout
->ref_cnt
>= 1);
1931 if (p_atomic_dec_zero(&layout
->ref_cnt
))
1932 anv_descriptor_set_layout_destroy(device
, layout
);
1935 struct anv_descriptor
{
1936 VkDescriptorType type
;
1940 VkImageLayout layout
;
1941 struct anv_image_view
*image_view
;
1942 struct anv_sampler
*sampler
;
1946 struct anv_buffer
*buffer
;
1951 struct anv_buffer_view
*buffer_view
;
1955 struct anv_descriptor_set
{
1956 struct vk_object_base base
;
1958 struct anv_descriptor_pool
*pool
;
1959 struct anv_descriptor_set_layout
*layout
;
1962 /* State relative to anv_descriptor_pool::bo */
1963 struct anv_state desc_mem
;
1964 /* Surface state for the descriptor buffer */
1965 struct anv_state desc_surface_state
;
1967 uint32_t buffer_view_count
;
1968 struct anv_buffer_view
*buffer_views
;
1970 /* Link to descriptor pool's desc_sets list . */
1971 struct list_head pool_link
;
1973 struct anv_descriptor descriptors
[0];
1976 struct anv_buffer_view
{
1977 struct vk_object_base base
;
1979 enum isl_format format
; /**< VkBufferViewCreateInfo::format */
1980 uint64_t range
; /**< VkBufferViewCreateInfo::range */
1982 struct anv_address address
;
1984 struct anv_state surface_state
;
1985 struct anv_state storage_surface_state
;
1986 struct anv_state writeonly_storage_surface_state
;
1988 struct brw_image_param storage_image_param
;
1991 struct anv_push_descriptor_set
{
1992 struct anv_descriptor_set set
;
1994 /* Put this field right behind anv_descriptor_set so it fills up the
1995 * descriptors[0] field. */
1996 struct anv_descriptor descriptors
[MAX_PUSH_DESCRIPTORS
];
1998 /** True if the descriptor set buffer has been referenced by a draw or
2001 bool set_used_on_gpu
;
2003 struct anv_buffer_view buffer_views
[MAX_PUSH_DESCRIPTORS
];
2006 struct anv_descriptor_pool
{
2007 struct vk_object_base base
;
2014 struct util_vma_heap bo_heap
;
2016 struct anv_state_stream surface_state_stream
;
2017 void *surface_state_free_list
;
2019 struct list_head desc_sets
;
2024 enum anv_descriptor_template_entry_type
{
2025 ANV_DESCRIPTOR_TEMPLATE_ENTRY_TYPE_IMAGE
,
2026 ANV_DESCRIPTOR_TEMPLATE_ENTRY_TYPE_BUFFER
,
2027 ANV_DESCRIPTOR_TEMPLATE_ENTRY_TYPE_BUFFER_VIEW
2030 struct anv_descriptor_template_entry
{
2031 /* The type of descriptor in this entry */
2032 VkDescriptorType type
;
2034 /* Binding in the descriptor set */
2037 /* Offset at which to write into the descriptor set binding */
2038 uint32_t array_element
;
2040 /* Number of elements to write into the descriptor set binding */
2041 uint32_t array_count
;
2043 /* Offset into the user provided data */
2046 /* Stride between elements into the user provided data */
2050 struct anv_descriptor_update_template
{
2051 struct vk_object_base base
;
2053 VkPipelineBindPoint bind_point
;
2055 /* The descriptor set this template corresponds to. This value is only
2056 * valid if the template was created with the templateType
2057 * VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET.
2061 /* Number of entries in this template */
2062 uint32_t entry_count
;
2064 /* Entries of the template */
2065 struct anv_descriptor_template_entry entries
[0];
2069 anv_descriptor_set_layout_size(const struct anv_descriptor_set_layout
*layout
);
2072 anv_descriptor_set_write_image_view(struct anv_device
*device
,
2073 struct anv_descriptor_set
*set
,
2074 const VkDescriptorImageInfo
* const info
,
2075 VkDescriptorType type
,
2080 anv_descriptor_set_write_buffer_view(struct anv_device
*device
,
2081 struct anv_descriptor_set
*set
,
2082 VkDescriptorType type
,
2083 struct anv_buffer_view
*buffer_view
,
2088 anv_descriptor_set_write_buffer(struct anv_device
*device
,
2089 struct anv_descriptor_set
*set
,
2090 struct anv_state_stream
*alloc_stream
,
2091 VkDescriptorType type
,
2092 struct anv_buffer
*buffer
,
2095 VkDeviceSize offset
,
2096 VkDeviceSize range
);
2098 anv_descriptor_set_write_inline_uniform_data(struct anv_device
*device
,
2099 struct anv_descriptor_set
*set
,
2106 anv_descriptor_set_write_template(struct anv_device
*device
,
2107 struct anv_descriptor_set
*set
,
2108 struct anv_state_stream
*alloc_stream
,
2109 const struct anv_descriptor_update_template
*template,
2113 anv_descriptor_set_create(struct anv_device
*device
,
2114 struct anv_descriptor_pool
*pool
,
2115 struct anv_descriptor_set_layout
*layout
,
2116 struct anv_descriptor_set
**out_set
);
2119 anv_descriptor_set_destroy(struct anv_device
*device
,
2120 struct anv_descriptor_pool
*pool
,
2121 struct anv_descriptor_set
*set
);
2123 #define ANV_DESCRIPTOR_SET_NULL (UINT8_MAX - 5)
2124 #define ANV_DESCRIPTOR_SET_PUSH_CONSTANTS (UINT8_MAX - 4)
2125 #define ANV_DESCRIPTOR_SET_DESCRIPTORS (UINT8_MAX - 3)
2126 #define ANV_DESCRIPTOR_SET_NUM_WORK_GROUPS (UINT8_MAX - 2)
2127 #define ANV_DESCRIPTOR_SET_SHADER_CONSTANTS (UINT8_MAX - 1)
2128 #define ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS UINT8_MAX
2130 struct anv_pipeline_binding
{
2131 /** Index in the descriptor set
2133 * This is a flattened index; the descriptor set layout is already taken
2138 /** The descriptor set this surface corresponds to.
2140 * The special ANV_DESCRIPTOR_SET_* values above indicates that this
2141 * binding is not a normal descriptor set but something else.
2146 /** Plane in the binding index for images */
2149 /** Input attachment index (relative to the subpass) */
2150 uint8_t input_attachment_index
;
2152 /** Dynamic offset index (for dynamic UBOs and SSBOs) */
2153 uint8_t dynamic_offset_index
;
2156 /** For a storage image, whether it is write-only */
2159 /** Pad to 64 bits so that there are no holes and we can safely memcmp
2160 * assuming POD zero-initialization.
2165 struct anv_push_range
{
2166 /** Index in the descriptor set */
2169 /** Descriptor set index */
2172 /** Dynamic offset index (for dynamic UBOs) */
2173 uint8_t dynamic_offset_index
;
2175 /** Start offset in units of 32B */
2178 /** Range in units of 32B */
2182 struct anv_pipeline_layout
{
2183 struct vk_object_base base
;
2186 struct anv_descriptor_set_layout
*layout
;
2187 uint32_t dynamic_offset_start
;
2192 unsigned char sha1
[20];
2196 struct vk_object_base base
;
2198 struct anv_device
* device
;
2201 VkBufferUsageFlags usage
;
2203 /* Set when bound */
2204 struct anv_address address
;
2207 static inline uint64_t
2208 anv_buffer_get_range(struct anv_buffer
*buffer
, uint64_t offset
, uint64_t range
)
2210 assert(offset
<= buffer
->size
);
2211 if (range
== VK_WHOLE_SIZE
) {
2212 return buffer
->size
- offset
;
2214 assert(range
+ offset
>= range
);
2215 assert(range
+ offset
<= buffer
->size
);
2220 enum anv_cmd_dirty_bits
{
2221 ANV_CMD_DIRTY_DYNAMIC_VIEWPORT
= 1 << 0, /* VK_DYNAMIC_STATE_VIEWPORT */
2222 ANV_CMD_DIRTY_DYNAMIC_SCISSOR
= 1 << 1, /* VK_DYNAMIC_STATE_SCISSOR */
2223 ANV_CMD_DIRTY_DYNAMIC_LINE_WIDTH
= 1 << 2, /* VK_DYNAMIC_STATE_LINE_WIDTH */
2224 ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS
= 1 << 3, /* VK_DYNAMIC_STATE_DEPTH_BIAS */
2225 ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS
= 1 << 4, /* VK_DYNAMIC_STATE_BLEND_CONSTANTS */
2226 ANV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS
= 1 << 5, /* VK_DYNAMIC_STATE_DEPTH_BOUNDS */
2227 ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK
= 1 << 6, /* VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK */
2228 ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK
= 1 << 7, /* VK_DYNAMIC_STATE_STENCIL_WRITE_MASK */
2229 ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE
= 1 << 8, /* VK_DYNAMIC_STATE_STENCIL_REFERENCE */
2230 ANV_CMD_DIRTY_PIPELINE
= 1 << 9,
2231 ANV_CMD_DIRTY_INDEX_BUFFER
= 1 << 10,
2232 ANV_CMD_DIRTY_RENDER_TARGETS
= 1 << 11,
2233 ANV_CMD_DIRTY_XFB_ENABLE
= 1 << 12,
2234 ANV_CMD_DIRTY_DYNAMIC_LINE_STIPPLE
= 1 << 13, /* VK_DYNAMIC_STATE_LINE_STIPPLE_EXT */
2236 typedef uint32_t anv_cmd_dirty_mask_t
;
2238 #define ANV_CMD_DIRTY_DYNAMIC_ALL \
2239 (ANV_CMD_DIRTY_DYNAMIC_VIEWPORT | \
2240 ANV_CMD_DIRTY_DYNAMIC_SCISSOR | \
2241 ANV_CMD_DIRTY_DYNAMIC_LINE_WIDTH | \
2242 ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS | \
2243 ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS | \
2244 ANV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS | \
2245 ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK | \
2246 ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK | \
2247 ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE | \
2248 ANV_CMD_DIRTY_DYNAMIC_LINE_STIPPLE)
2250 static inline enum anv_cmd_dirty_bits
2251 anv_cmd_dirty_bit_for_vk_dynamic_state(VkDynamicState vk_state
)
2254 case VK_DYNAMIC_STATE_VIEWPORT
:
2255 return ANV_CMD_DIRTY_DYNAMIC_VIEWPORT
;
2256 case VK_DYNAMIC_STATE_SCISSOR
:
2257 return ANV_CMD_DIRTY_DYNAMIC_SCISSOR
;
2258 case VK_DYNAMIC_STATE_LINE_WIDTH
:
2259 return ANV_CMD_DIRTY_DYNAMIC_LINE_WIDTH
;
2260 case VK_DYNAMIC_STATE_DEPTH_BIAS
:
2261 return ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS
;
2262 case VK_DYNAMIC_STATE_BLEND_CONSTANTS
:
2263 return ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS
;
2264 case VK_DYNAMIC_STATE_DEPTH_BOUNDS
:
2265 return ANV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS
;
2266 case VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK
:
2267 return ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK
;
2268 case VK_DYNAMIC_STATE_STENCIL_WRITE_MASK
:
2269 return ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK
;
2270 case VK_DYNAMIC_STATE_STENCIL_REFERENCE
:
2271 return ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE
;
2272 case VK_DYNAMIC_STATE_LINE_STIPPLE_EXT
:
2273 return ANV_CMD_DIRTY_DYNAMIC_LINE_STIPPLE
;
2275 assert(!"Unsupported dynamic state");
2281 enum anv_pipe_bits
{
2282 ANV_PIPE_DEPTH_CACHE_FLUSH_BIT
= (1 << 0),
2283 ANV_PIPE_STALL_AT_SCOREBOARD_BIT
= (1 << 1),
2284 ANV_PIPE_STATE_CACHE_INVALIDATE_BIT
= (1 << 2),
2285 ANV_PIPE_CONSTANT_CACHE_INVALIDATE_BIT
= (1 << 3),
2286 ANV_PIPE_VF_CACHE_INVALIDATE_BIT
= (1 << 4),
2287 ANV_PIPE_DATA_CACHE_FLUSH_BIT
= (1 << 5),
2288 ANV_PIPE_TILE_CACHE_FLUSH_BIT
= (1 << 6),
2289 ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT
= (1 << 10),
2290 ANV_PIPE_INSTRUCTION_CACHE_INVALIDATE_BIT
= (1 << 11),
2291 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT
= (1 << 12),
2292 ANV_PIPE_DEPTH_STALL_BIT
= (1 << 13),
2293 ANV_PIPE_CS_STALL_BIT
= (1 << 20),
2294 ANV_PIPE_END_OF_PIPE_SYNC_BIT
= (1 << 21),
2296 /* This bit does not exist directly in PIPE_CONTROL. Instead it means that
2297 * a flush has happened but not a CS stall. The next time we do any sort
2298 * of invalidation we need to insert a CS stall at that time. Otherwise,
2299 * we would have to CS stall on every flush which could be bad.
2301 ANV_PIPE_NEEDS_END_OF_PIPE_SYNC_BIT
= (1 << 22),
2303 /* This bit does not exist directly in PIPE_CONTROL. It means that render
2304 * target operations related to transfer commands with VkBuffer as
2305 * destination are ongoing. Some operations like copies on the command
2306 * streamer might need to be aware of this to trigger the appropriate stall
2307 * before they can proceed with the copy.
2309 ANV_PIPE_RENDER_TARGET_BUFFER_WRITES
= (1 << 23),
2311 /* This bit does not exist directly in PIPE_CONTROL. It means that Gen12
2312 * AUX-TT data has changed and we need to invalidate AUX-TT data. This is
2313 * done by writing the AUX-TT register.
2315 ANV_PIPE_AUX_TABLE_INVALIDATE_BIT
= (1 << 24),
2317 /* This bit does not exist directly in PIPE_CONTROL. It means that a
2318 * PIPE_CONTROL with a post-sync operation will follow. This is used to
2319 * implement a workaround for Gen9.
2321 ANV_PIPE_POST_SYNC_BIT
= (1 << 25),
2324 #define ANV_PIPE_FLUSH_BITS ( \
2325 ANV_PIPE_DEPTH_CACHE_FLUSH_BIT | \
2326 ANV_PIPE_DATA_CACHE_FLUSH_BIT | \
2327 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT | \
2328 ANV_PIPE_TILE_CACHE_FLUSH_BIT)
2330 #define ANV_PIPE_STALL_BITS ( \
2331 ANV_PIPE_STALL_AT_SCOREBOARD_BIT | \
2332 ANV_PIPE_DEPTH_STALL_BIT | \
2333 ANV_PIPE_CS_STALL_BIT)
2335 #define ANV_PIPE_INVALIDATE_BITS ( \
2336 ANV_PIPE_STATE_CACHE_INVALIDATE_BIT | \
2337 ANV_PIPE_CONSTANT_CACHE_INVALIDATE_BIT | \
2338 ANV_PIPE_VF_CACHE_INVALIDATE_BIT | \
2339 ANV_PIPE_DATA_CACHE_FLUSH_BIT | \
2340 ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT | \
2341 ANV_PIPE_INSTRUCTION_CACHE_INVALIDATE_BIT | \
2342 ANV_PIPE_AUX_TABLE_INVALIDATE_BIT)
2344 static inline enum anv_pipe_bits
2345 anv_pipe_flush_bits_for_access_flags(VkAccessFlags flags
)
2347 enum anv_pipe_bits pipe_bits
= 0;
2350 for_each_bit(b
, flags
) {
2351 switch ((VkAccessFlagBits
)(1 << b
)) {
2352 case VK_ACCESS_SHADER_WRITE_BIT
:
2353 /* We're transitioning a buffer that was previously used as write
2354 * destination through the data port. To make its content available
2355 * to future operations, flush the data cache.
2357 pipe_bits
|= ANV_PIPE_DATA_CACHE_FLUSH_BIT
;
2359 case VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT
:
2360 /* We're transitioning a buffer that was previously used as render
2361 * target. To make its content available to future operations, flush
2362 * the render target cache.
2364 pipe_bits
|= ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT
;
2366 case VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT
:
2367 /* We're transitioning a buffer that was previously used as depth
2368 * buffer. To make its content available to future operations, flush
2371 pipe_bits
|= ANV_PIPE_DEPTH_CACHE_FLUSH_BIT
;
2373 case VK_ACCESS_TRANSFER_WRITE_BIT
:
2374 /* We're transitioning a buffer that was previously used as a
2375 * transfer write destination. Generic write operations include color
2376 * & depth operations as well as buffer operations like :
2377 * - vkCmdClearColorImage()
2378 * - vkCmdClearDepthStencilImage()
2379 * - vkCmdBlitImage()
2380 * - vkCmdCopy*(), vkCmdUpdate*(), vkCmdFill*()
2382 * Most of these operations are implemented using Blorp which writes
2383 * through the render target, so flush that cache to make it visible
2384 * to future operations. And for depth related operations we also
2385 * need to flush the depth cache.
2387 pipe_bits
|= ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT
;
2388 pipe_bits
|= ANV_PIPE_DEPTH_CACHE_FLUSH_BIT
;
2390 case VK_ACCESS_MEMORY_WRITE_BIT
:
2391 /* We're transitioning a buffer for generic write operations. Flush
2394 pipe_bits
|= ANV_PIPE_FLUSH_BITS
;
2397 break; /* Nothing to do */
2404 static inline enum anv_pipe_bits
2405 anv_pipe_invalidate_bits_for_access_flags(VkAccessFlags flags
)
2407 enum anv_pipe_bits pipe_bits
= 0;
2410 for_each_bit(b
, flags
) {
2411 switch ((VkAccessFlagBits
)(1 << b
)) {
2412 case VK_ACCESS_INDIRECT_COMMAND_READ_BIT
:
2413 /* Indirect draw commands take a buffer as input that we're going to
2414 * read from the command streamer to load some of the HW registers
2415 * (see genX_cmd_buffer.c:load_indirect_parameters). This requires a
2416 * command streamer stall so that all the cache flushes have
2417 * completed before the command streamer loads from memory.
2419 pipe_bits
|= ANV_PIPE_CS_STALL_BIT
;
2420 /* Indirect draw commands also set gl_BaseVertex & gl_BaseIndex
2421 * through a vertex buffer, so invalidate that cache.
2423 pipe_bits
|= ANV_PIPE_VF_CACHE_INVALIDATE_BIT
;
2424 /* For CmdDipatchIndirect, we also load gl_NumWorkGroups through a
2425 * UBO from the buffer, so we need to invalidate constant cache.
2427 pipe_bits
|= ANV_PIPE_CONSTANT_CACHE_INVALIDATE_BIT
;
2429 case VK_ACCESS_INDEX_READ_BIT
:
2430 case VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT
:
2431 /* We transitioning a buffer to be used for as input for vkCmdDraw*
2432 * commands, so we invalidate the VF cache to make sure there is no
2433 * stale data when we start rendering.
2435 pipe_bits
|= ANV_PIPE_VF_CACHE_INVALIDATE_BIT
;
2437 case VK_ACCESS_UNIFORM_READ_BIT
:
2438 /* We transitioning a buffer to be used as uniform data. Because
2439 * uniform is accessed through the data port & sampler, we need to
2440 * invalidate the texture cache (sampler) & constant cache (data
2441 * port) to avoid stale data.
2443 pipe_bits
|= ANV_PIPE_CONSTANT_CACHE_INVALIDATE_BIT
;
2444 pipe_bits
|= ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT
;
2446 case VK_ACCESS_SHADER_READ_BIT
:
2447 case VK_ACCESS_INPUT_ATTACHMENT_READ_BIT
:
2448 case VK_ACCESS_TRANSFER_READ_BIT
:
2449 /* Transitioning a buffer to be read through the sampler, so
2450 * invalidate the texture cache, we don't want any stale data.
2452 pipe_bits
|= ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT
;
2454 case VK_ACCESS_MEMORY_READ_BIT
:
2455 /* Transitioning a buffer for generic read, invalidate all the
2458 pipe_bits
|= ANV_PIPE_INVALIDATE_BITS
;
2460 case VK_ACCESS_MEMORY_WRITE_BIT
:
2461 /* Generic write, make sure all previously written things land in
2464 pipe_bits
|= ANV_PIPE_FLUSH_BITS
;
2466 case VK_ACCESS_CONDITIONAL_RENDERING_READ_BIT_EXT
:
2467 /* Transitioning a buffer for conditional rendering. We'll load the
2468 * content of this buffer into HW registers using the command
2469 * streamer, so we need to stall the command streamer to make sure
2470 * any in-flight flush operations have completed.
2472 pipe_bits
|= ANV_PIPE_CS_STALL_BIT
;
2475 break; /* Nothing to do */
2482 #define VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV ( \
2483 VK_IMAGE_ASPECT_COLOR_BIT | \
2484 VK_IMAGE_ASPECT_PLANE_0_BIT | \
2485 VK_IMAGE_ASPECT_PLANE_1_BIT | \
2486 VK_IMAGE_ASPECT_PLANE_2_BIT)
2487 #define VK_IMAGE_ASPECT_PLANES_BITS_ANV ( \
2488 VK_IMAGE_ASPECT_PLANE_0_BIT | \
2489 VK_IMAGE_ASPECT_PLANE_1_BIT | \
2490 VK_IMAGE_ASPECT_PLANE_2_BIT)
2492 struct anv_vertex_binding
{
2493 struct anv_buffer
* buffer
;
2494 VkDeviceSize offset
;
2497 struct anv_xfb_binding
{
2498 struct anv_buffer
* buffer
;
2499 VkDeviceSize offset
;
2503 struct anv_push_constants
{
2504 /** Push constant data provided by the client through vkPushConstants */
2505 uint8_t client_data
[MAX_PUSH_CONSTANTS_SIZE
];
2507 /** Dynamic offsets for dynamic UBOs and SSBOs */
2508 uint32_t dynamic_offsets
[MAX_DYNAMIC_BUFFERS
];
2510 uint64_t push_reg_mask
;
2512 /** Pad out to a multiple of 32 bytes */
2516 /** Base workgroup ID
2518 * Used for vkCmdDispatchBase.
2520 uint32_t base_work_group_id
[3];
2524 * This is never set by software but is implicitly filled out when
2525 * uploading the push constants for compute shaders.
2527 uint32_t subgroup_id
;
2531 struct anv_dynamic_state
{
2534 VkViewport viewports
[MAX_VIEWPORTS
];
2539 VkRect2D scissors
[MAX_SCISSORS
];
2550 float blend_constants
[4];
2560 } stencil_compare_mask
;
2565 } stencil_write_mask
;
2570 } stencil_reference
;
2578 extern const struct anv_dynamic_state default_dynamic_state
;
2580 uint32_t anv_dynamic_state_copy(struct anv_dynamic_state
*dest
,
2581 const struct anv_dynamic_state
*src
,
2582 uint32_t copy_mask
);
2584 struct anv_surface_state
{
2585 struct anv_state state
;
2586 /** Address of the surface referred to by this state
2588 * This address is relative to the start of the BO.
2590 struct anv_address address
;
2591 /* Address of the aux surface, if any
2593 * This field is ANV_NULL_ADDRESS if and only if no aux surface exists.
2595 * With the exception of gen8, the bottom 12 bits of this address' offset
2596 * include extra aux information.
2598 struct anv_address aux_address
;
2599 /* Address of the clear color, if any
2601 * This address is relative to the start of the BO.
2603 struct anv_address clear_address
;
2607 * Attachment state when recording a renderpass instance.
2609 * The clear value is valid only if there exists a pending clear.
2611 struct anv_attachment_state
{
2612 enum isl_aux_usage aux_usage
;
2613 struct anv_surface_state color
;
2614 struct anv_surface_state input
;
2616 VkImageLayout current_layout
;
2617 VkImageLayout current_stencil_layout
;
2618 VkImageAspectFlags pending_clear_aspects
;
2619 VkImageAspectFlags pending_load_aspects
;
2621 VkClearValue clear_value
;
2623 /* When multiview is active, attachments with a renderpass clear
2624 * operation have their respective layers cleared on the first
2625 * subpass that uses them, and only in that subpass. We keep track
2626 * of this using a bitfield to indicate which layers of an attachment
2627 * have not been cleared yet when multiview is active.
2629 uint32_t pending_clear_views
;
2630 struct anv_image_view
* image_view
;
2633 /** State tracking for vertex buffer flushes
2635 * On Gen8-9, the VF cache only considers the bottom 32 bits of memory
2636 * addresses. If you happen to have two vertex buffers which get placed
2637 * exactly 4 GiB apart and use them in back-to-back draw calls, you can get
2638 * collisions. In order to solve this problem, we track vertex address ranges
2639 * which are live in the cache and invalidate the cache if one ever exceeds 32
2642 struct anv_vb_cache_range
{
2643 /* Virtual address at which the live vertex buffer cache range starts for
2644 * this vertex buffer index.
2648 /* Virtual address of the byte after where vertex buffer cache range ends.
2649 * This is exclusive such that end - start is the size of the range.
2654 /** State tracking for particular pipeline bind point
2656 * This struct is the base struct for anv_cmd_graphics_state and
2657 * anv_cmd_compute_state. These are used to track state which is bound to a
2658 * particular type of pipeline. Generic state that applies per-stage such as
2659 * binding table offsets and push constants is tracked generically with a
2660 * per-stage array in anv_cmd_state.
2662 struct anv_cmd_pipeline_state
{
2663 struct anv_descriptor_set
*descriptors
[MAX_SETS
];
2664 struct anv_push_descriptor_set
*push_descriptors
[MAX_SETS
];
2667 /** State tracking for graphics pipeline
2669 * This has anv_cmd_pipeline_state as a base struct to track things which get
2670 * bound to a graphics pipeline. Along with general pipeline bind point state
2671 * which is in the anv_cmd_pipeline_state base struct, it also contains other
2672 * state which is graphics-specific.
2674 struct anv_cmd_graphics_state
{
2675 struct anv_cmd_pipeline_state base
;
2677 struct anv_graphics_pipeline
*pipeline
;
2679 anv_cmd_dirty_mask_t dirty
;
2682 struct anv_vb_cache_range ib_bound_range
;
2683 struct anv_vb_cache_range ib_dirty_range
;
2684 struct anv_vb_cache_range vb_bound_ranges
[33];
2685 struct anv_vb_cache_range vb_dirty_ranges
[33];
2687 struct anv_dynamic_state dynamic
;
2690 struct anv_buffer
*index_buffer
;
2691 uint32_t index_type
; /**< 3DSTATE_INDEX_BUFFER.IndexFormat */
2692 uint32_t index_offset
;
2696 /** State tracking for compute pipeline
2698 * This has anv_cmd_pipeline_state as a base struct to track things which get
2699 * bound to a compute pipeline. Along with general pipeline bind point state
2700 * which is in the anv_cmd_pipeline_state base struct, it also contains other
2701 * state which is compute-specific.
2703 struct anv_cmd_compute_state
{
2704 struct anv_cmd_pipeline_state base
;
2706 struct anv_compute_pipeline
*pipeline
;
2708 bool pipeline_dirty
;
2710 struct anv_address num_workgroups
;
2713 /** State required while building cmd buffer */
2714 struct anv_cmd_state
{
2715 /* PIPELINE_SELECT.PipelineSelection */
2716 uint32_t current_pipeline
;
2717 const struct gen_l3_config
* current_l3_config
;
2718 uint32_t last_aux_map_state
;
2720 struct anv_cmd_graphics_state gfx
;
2721 struct anv_cmd_compute_state compute
;
2723 enum anv_pipe_bits pending_pipe_bits
;
2724 VkShaderStageFlags descriptors_dirty
;
2725 VkShaderStageFlags push_constants_dirty
;
2727 struct anv_framebuffer
* framebuffer
;
2728 struct anv_render_pass
* pass
;
2729 struct anv_subpass
* subpass
;
2730 VkRect2D render_area
;
2731 uint32_t restart_index
;
2732 struct anv_vertex_binding vertex_bindings
[MAX_VBS
];
2734 struct anv_xfb_binding xfb_bindings
[MAX_XFB_BUFFERS
];
2735 VkShaderStageFlags push_constant_stages
;
2736 struct anv_push_constants push_constants
[MESA_SHADER_STAGES
];
2737 struct anv_state binding_tables
[MESA_SHADER_STAGES
];
2738 struct anv_state samplers
[MESA_SHADER_STAGES
];
2740 unsigned char sampler_sha1s
[MESA_SHADER_STAGES
][20];
2741 unsigned char surface_sha1s
[MESA_SHADER_STAGES
][20];
2742 unsigned char push_sha1s
[MESA_SHADER_STAGES
][20];
2745 * Whether or not the gen8 PMA fix is enabled. We ensure that, at the top
2746 * of any command buffer it is disabled by disabling it in EndCommandBuffer
2747 * and before invoking the secondary in ExecuteCommands.
2749 bool pma_fix_enabled
;
2752 * Whether or not we know for certain that HiZ is enabled for the current
2753 * subpass. If, for whatever reason, we are unsure as to whether HiZ is
2754 * enabled or not, this will be false.
2758 bool conditional_render_enabled
;
2761 * Last rendering scale argument provided to
2762 * genX(cmd_buffer_emit_hashing_mode)().
2764 unsigned current_hash_scale
;
2767 * Array length is anv_cmd_state::pass::attachment_count. Array content is
2768 * valid only when recording a render pass instance.
2770 struct anv_attachment_state
* attachments
;
2773 * Surface states for color render targets. These are stored in a single
2774 * flat array. For depth-stencil attachments, the surface state is simply
2777 struct anv_state attachment_states
;
2780 * A null surface state of the right size to match the framebuffer. This
2781 * is one of the states in attachment_states.
2783 struct anv_state null_surface_state
;
2786 struct anv_cmd_pool
{
2787 struct vk_object_base base
;
2788 VkAllocationCallbacks alloc
;
2789 struct list_head cmd_buffers
;
2792 #define ANV_CMD_BUFFER_BATCH_SIZE 8192
2794 enum anv_cmd_buffer_exec_mode
{
2795 ANV_CMD_BUFFER_EXEC_MODE_PRIMARY
,
2796 ANV_CMD_BUFFER_EXEC_MODE_EMIT
,
2797 ANV_CMD_BUFFER_EXEC_MODE_GROW_AND_EMIT
,
2798 ANV_CMD_BUFFER_EXEC_MODE_CHAIN
,
2799 ANV_CMD_BUFFER_EXEC_MODE_COPY_AND_CHAIN
,
2802 struct anv_cmd_buffer
{
2803 struct vk_object_base base
;
2805 struct anv_device
* device
;
2807 struct anv_cmd_pool
* pool
;
2808 struct list_head pool_link
;
2810 struct anv_batch batch
;
2812 /* Fields required for the actual chain of anv_batch_bo's.
2814 * These fields are initialized by anv_cmd_buffer_init_batch_bo_chain().
2816 struct list_head batch_bos
;
2817 enum anv_cmd_buffer_exec_mode exec_mode
;
2819 /* A vector of anv_batch_bo pointers for every batch or surface buffer
2820 * referenced by this command buffer
2822 * initialized by anv_cmd_buffer_init_batch_bo_chain()
2824 struct u_vector seen_bbos
;
2826 /* A vector of int32_t's for every block of binding tables.
2828 * initialized by anv_cmd_buffer_init_batch_bo_chain()
2830 struct u_vector bt_block_states
;
2831 struct anv_state bt_next
;
2833 struct anv_reloc_list surface_relocs
;
2834 /** Last seen surface state block pool center bo offset */
2835 uint32_t last_ss_pool_center
;
2837 /* Serial for tracking buffer completion */
2840 /* Stream objects for storing temporary data */
2841 struct anv_state_stream surface_state_stream
;
2842 struct anv_state_stream dynamic_state_stream
;
2844 VkCommandBufferUsageFlags usage_flags
;
2845 VkCommandBufferLevel level
;
2847 struct anv_cmd_state state
;
2849 /* Set by SetPerformanceMarkerINTEL, written into queries by CmdBeginQuery */
2850 uint64_t intel_perf_marker
;
2853 VkResult
anv_cmd_buffer_init_batch_bo_chain(struct anv_cmd_buffer
*cmd_buffer
);
2854 void anv_cmd_buffer_fini_batch_bo_chain(struct anv_cmd_buffer
*cmd_buffer
);
2855 void anv_cmd_buffer_reset_batch_bo_chain(struct anv_cmd_buffer
*cmd_buffer
);
2856 void anv_cmd_buffer_end_batch_buffer(struct anv_cmd_buffer
*cmd_buffer
);
2857 void anv_cmd_buffer_add_secondary(struct anv_cmd_buffer
*primary
,
2858 struct anv_cmd_buffer
*secondary
);
2859 void anv_cmd_buffer_prepare_execbuf(struct anv_cmd_buffer
*cmd_buffer
);
2860 VkResult
anv_cmd_buffer_execbuf(struct anv_queue
*queue
,
2861 struct anv_cmd_buffer
*cmd_buffer
,
2862 const VkSemaphore
*in_semaphores
,
2863 const uint64_t *in_wait_values
,
2864 uint32_t num_in_semaphores
,
2865 const VkSemaphore
*out_semaphores
,
2866 const uint64_t *out_signal_values
,
2867 uint32_t num_out_semaphores
,
2870 VkResult
anv_cmd_buffer_reset(struct anv_cmd_buffer
*cmd_buffer
);
2872 struct anv_state
anv_cmd_buffer_emit_dynamic(struct anv_cmd_buffer
*cmd_buffer
,
2873 const void *data
, uint32_t size
, uint32_t alignment
);
2874 struct anv_state
anv_cmd_buffer_merge_dynamic(struct anv_cmd_buffer
*cmd_buffer
,
2875 uint32_t *a
, uint32_t *b
,
2876 uint32_t dwords
, uint32_t alignment
);
2879 anv_cmd_buffer_surface_base_address(struct anv_cmd_buffer
*cmd_buffer
);
2881 anv_cmd_buffer_alloc_binding_table(struct anv_cmd_buffer
*cmd_buffer
,
2882 uint32_t entries
, uint32_t *state_offset
);
2884 anv_cmd_buffer_alloc_surface_state(struct anv_cmd_buffer
*cmd_buffer
);
2886 anv_cmd_buffer_alloc_dynamic_state(struct anv_cmd_buffer
*cmd_buffer
,
2887 uint32_t size
, uint32_t alignment
);
2890 anv_cmd_buffer_new_binding_table_block(struct anv_cmd_buffer
*cmd_buffer
);
2892 void gen8_cmd_buffer_emit_viewport(struct anv_cmd_buffer
*cmd_buffer
);
2893 void gen8_cmd_buffer_emit_depth_viewport(struct anv_cmd_buffer
*cmd_buffer
,
2894 bool depth_clamp_enable
);
2895 void gen7_cmd_buffer_emit_scissor(struct anv_cmd_buffer
*cmd_buffer
);
2897 void anv_cmd_buffer_setup_attachments(struct anv_cmd_buffer
*cmd_buffer
,
2898 struct anv_render_pass
*pass
,
2899 struct anv_framebuffer
*framebuffer
,
2900 const VkClearValue
*clear_values
);
2902 void anv_cmd_buffer_emit_state_base_address(struct anv_cmd_buffer
*cmd_buffer
);
2905 anv_cmd_buffer_push_constants(struct anv_cmd_buffer
*cmd_buffer
,
2906 gl_shader_stage stage
);
2908 anv_cmd_buffer_cs_push_constants(struct anv_cmd_buffer
*cmd_buffer
);
2910 const struct anv_image_view
*
2911 anv_cmd_buffer_get_depth_stencil_view(const struct anv_cmd_buffer
*cmd_buffer
);
2914 anv_cmd_buffer_alloc_blorp_binding_table(struct anv_cmd_buffer
*cmd_buffer
,
2915 uint32_t num_entries
,
2916 uint32_t *state_offset
,
2917 struct anv_state
*bt_state
);
2919 void anv_cmd_buffer_dump(struct anv_cmd_buffer
*cmd_buffer
);
2921 void anv_cmd_emit_conditional_render_predicate(struct anv_cmd_buffer
*cmd_buffer
);
2923 enum anv_fence_type
{
2924 ANV_FENCE_TYPE_NONE
= 0,
2926 ANV_FENCE_TYPE_WSI_BO
,
2927 ANV_FENCE_TYPE_SYNCOBJ
,
2931 enum anv_bo_fence_state
{
2932 /** Indicates that this is a new (or newly reset fence) */
2933 ANV_BO_FENCE_STATE_RESET
,
2935 /** Indicates that this fence has been submitted to the GPU but is still
2936 * (as far as we know) in use by the GPU.
2938 ANV_BO_FENCE_STATE_SUBMITTED
,
2940 ANV_BO_FENCE_STATE_SIGNALED
,
2943 struct anv_fence_impl
{
2944 enum anv_fence_type type
;
2947 /** Fence implementation for BO fences
2949 * These fences use a BO and a set of CPU-tracked state flags. The BO
2950 * is added to the object list of the last execbuf call in a QueueSubmit
2951 * and is marked EXEC_WRITE. The state flags track when the BO has been
2952 * submitted to the kernel. We need to do this because Vulkan lets you
2953 * wait on a fence that has not yet been submitted and I915_GEM_BUSY
2954 * will say it's idle in this case.
2958 enum anv_bo_fence_state state
;
2961 /** DRM syncobj handle for syncobj-based fences */
2965 struct wsi_fence
*fence_wsi
;
2970 struct vk_object_base base
;
2972 /* Permanent fence state. Every fence has some form of permanent state
2973 * (type != ANV_SEMAPHORE_TYPE_NONE). This may be a BO to fence on (for
2974 * cross-process fences) or it could just be a dummy for use internally.
2976 struct anv_fence_impl permanent
;
2978 /* Temporary fence state. A fence *may* have temporary state. That state
2979 * is added to the fence by an import operation and is reset back to
2980 * ANV_SEMAPHORE_TYPE_NONE when the fence is reset. A fence with temporary
2981 * state cannot be signaled because the fence must already be signaled
2982 * before the temporary state can be exported from the fence in the other
2983 * process and imported here.
2985 struct anv_fence_impl temporary
;
2988 void anv_fence_reset_temporary(struct anv_device
*device
,
2989 struct anv_fence
*fence
);
2992 struct vk_object_base base
;
2994 struct anv_state state
;
2997 enum anv_semaphore_type
{
2998 ANV_SEMAPHORE_TYPE_NONE
= 0,
2999 ANV_SEMAPHORE_TYPE_DUMMY
,
3000 ANV_SEMAPHORE_TYPE_BO
,
3001 ANV_SEMAPHORE_TYPE_WSI_BO
,
3002 ANV_SEMAPHORE_TYPE_SYNC_FILE
,
3003 ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ
,
3004 ANV_SEMAPHORE_TYPE_TIMELINE
,
3007 struct anv_timeline_point
{
3008 struct list_head link
;
3012 /* Number of waiter on this point, when > 0 the point should not be garbage
3017 /* BO used for synchronization. */
3021 struct anv_timeline
{
3022 pthread_mutex_t mutex
;
3023 pthread_cond_t cond
;
3025 uint64_t highest_past
;
3026 uint64_t highest_pending
;
3028 struct list_head points
;
3029 struct list_head free_points
;
3032 struct anv_semaphore_impl
{
3033 enum anv_semaphore_type type
;
3036 /* A BO representing this semaphore when type == ANV_SEMAPHORE_TYPE_BO
3037 * or type == ANV_SEMAPHORE_TYPE_WSI_BO. This BO will be added to the
3038 * object list on any execbuf2 calls for which this semaphore is used as
3039 * a wait or signal fence. When used as a signal fence or when type ==
3040 * ANV_SEMAPHORE_TYPE_WSI_BO, the EXEC_OBJECT_WRITE flag will be set.
3044 /* The sync file descriptor when type == ANV_SEMAPHORE_TYPE_SYNC_FILE.
3045 * If the semaphore is in the unsignaled state due to either just being
3046 * created or because it has been used for a wait, fd will be -1.
3050 /* Sync object handle when type == ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ.
3051 * Unlike GEM BOs, DRM sync objects aren't deduplicated by the kernel on
3052 * import so we don't need to bother with a userspace cache.
3056 /* Non shareable timeline semaphore
3058 * Used when kernel don't have support for timeline semaphores.
3060 struct anv_timeline timeline
;
3064 struct anv_semaphore
{
3065 struct vk_object_base base
;
3069 /* Permanent semaphore state. Every semaphore has some form of permanent
3070 * state (type != ANV_SEMAPHORE_TYPE_NONE). This may be a BO to fence on
3071 * (for cross-process semaphores0 or it could just be a dummy for use
3074 struct anv_semaphore_impl permanent
;
3076 /* Temporary semaphore state. A semaphore *may* have temporary state.
3077 * That state is added to the semaphore by an import operation and is reset
3078 * back to ANV_SEMAPHORE_TYPE_NONE when the semaphore is waited on. A
3079 * semaphore with temporary state cannot be signaled because the semaphore
3080 * must already be signaled before the temporary state can be exported from
3081 * the semaphore in the other process and imported here.
3083 struct anv_semaphore_impl temporary
;
3086 void anv_semaphore_reset_temporary(struct anv_device
*device
,
3087 struct anv_semaphore
*semaphore
);
3089 struct anv_shader_module
{
3090 struct vk_object_base base
;
3092 unsigned char sha1
[20];
3097 static inline gl_shader_stage
3098 vk_to_mesa_shader_stage(VkShaderStageFlagBits vk_stage
)
3100 assert(__builtin_popcount(vk_stage
) == 1);
3101 return ffs(vk_stage
) - 1;
3104 static inline VkShaderStageFlagBits
3105 mesa_to_vk_shader_stage(gl_shader_stage mesa_stage
)
3107 return (1 << mesa_stage
);
3110 #define ANV_STAGE_MASK ((1 << MESA_SHADER_STAGES) - 1)
3112 #define anv_foreach_stage(stage, stage_bits) \
3113 for (gl_shader_stage stage, \
3114 __tmp = (gl_shader_stage)((stage_bits) & ANV_STAGE_MASK); \
3115 stage = __builtin_ffs(__tmp) - 1, __tmp; \
3116 __tmp &= ~(1 << (stage)))
3118 struct anv_pipeline_bind_map
{
3119 unsigned char surface_sha1
[20];
3120 unsigned char sampler_sha1
[20];
3121 unsigned char push_sha1
[20];
3123 uint32_t surface_count
;
3124 uint32_t sampler_count
;
3126 struct anv_pipeline_binding
* surface_to_descriptor
;
3127 struct anv_pipeline_binding
* sampler_to_descriptor
;
3129 struct anv_push_range push_ranges
[4];
3132 struct anv_shader_bin_key
{
3137 struct anv_shader_bin
{
3140 gl_shader_stage stage
;
3142 const struct anv_shader_bin_key
*key
;
3144 struct anv_state kernel
;
3145 uint32_t kernel_size
;
3147 struct anv_state constant_data
;
3148 uint32_t constant_data_size
;
3150 const struct brw_stage_prog_data
*prog_data
;
3151 uint32_t prog_data_size
;
3153 struct brw_compile_stats stats
[3];
3156 struct nir_xfb_info
*xfb_info
;
3158 struct anv_pipeline_bind_map bind_map
;
3161 struct anv_shader_bin
*
3162 anv_shader_bin_create(struct anv_device
*device
,
3163 gl_shader_stage stage
,
3164 const void *key
, uint32_t key_size
,
3165 const void *kernel
, uint32_t kernel_size
,
3166 const void *constant_data
, uint32_t constant_data_size
,
3167 const struct brw_stage_prog_data
*prog_data
,
3168 uint32_t prog_data_size
,
3169 const struct brw_compile_stats
*stats
, uint32_t num_stats
,
3170 const struct nir_xfb_info
*xfb_info
,
3171 const struct anv_pipeline_bind_map
*bind_map
);
3174 anv_shader_bin_destroy(struct anv_device
*device
, struct anv_shader_bin
*shader
);
3177 anv_shader_bin_ref(struct anv_shader_bin
*shader
)
3179 assert(shader
&& shader
->ref_cnt
>= 1);
3180 p_atomic_inc(&shader
->ref_cnt
);
3184 anv_shader_bin_unref(struct anv_device
*device
, struct anv_shader_bin
*shader
)
3186 assert(shader
&& shader
->ref_cnt
>= 1);
3187 if (p_atomic_dec_zero(&shader
->ref_cnt
))
3188 anv_shader_bin_destroy(device
, shader
);
3191 struct anv_pipeline_executable
{
3192 gl_shader_stage stage
;
3194 struct brw_compile_stats stats
;
3200 enum anv_pipeline_type
{
3201 ANV_PIPELINE_GRAPHICS
,
3202 ANV_PIPELINE_COMPUTE
,
3205 struct anv_pipeline
{
3206 struct vk_object_base base
;
3208 struct anv_device
* device
;
3210 struct anv_batch batch
;
3211 struct anv_reloc_list batch_relocs
;
3215 enum anv_pipeline_type type
;
3216 VkPipelineCreateFlags flags
;
3218 struct util_dynarray executables
;
3220 const struct gen_l3_config
* l3_config
;
3223 struct anv_graphics_pipeline
{
3224 struct anv_pipeline base
;
3226 uint32_t batch_data
[512];
3228 anv_cmd_dirty_mask_t dynamic_state_mask
;
3229 struct anv_dynamic_state dynamic_state
;
3233 struct anv_subpass
* subpass
;
3235 struct anv_shader_bin
* shaders
[MESA_SHADER_STAGES
];
3237 VkShaderStageFlags active_stages
;
3239 bool primitive_restart
;
3241 bool depth_test_enable
;
3242 bool writes_stencil
;
3243 bool stencil_test_enable
;
3244 bool depth_clamp_enable
;
3245 bool depth_clip_enable
;
3246 bool sample_shading_enable
;
3248 bool depth_bounds_test_enable
;
3250 /* When primitive replication is used, subpass->view_mask will describe what
3251 * views to replicate.
3253 bool use_primitive_replication
;
3255 struct anv_state blend_state
;
3258 struct anv_pipeline_vertex_binding
{
3261 uint32_t instance_divisor
;
3266 uint32_t depth_stencil_state
[3];
3272 uint32_t wm_depth_stencil
[3];
3276 uint32_t wm_depth_stencil
[4];
3280 struct anv_compute_pipeline
{
3281 struct anv_pipeline base
;
3283 struct anv_shader_bin
* cs
;
3284 uint32_t cs_right_mask
;
3285 uint32_t batch_data
[9];
3286 uint32_t interface_descriptor_data
[8];
3289 #define ANV_DECL_PIPELINE_DOWNCAST(pipe_type, pipe_enum) \
3290 static inline struct anv_##pipe_type##_pipeline * \
3291 anv_pipeline_to_##pipe_type(struct anv_pipeline *pipeline) \
3293 assert(pipeline->type == pipe_enum); \
3294 return (struct anv_##pipe_type##_pipeline *) pipeline; \
3297 ANV_DECL_PIPELINE_DOWNCAST(graphics
, ANV_PIPELINE_GRAPHICS
)
3298 ANV_DECL_PIPELINE_DOWNCAST(compute
, ANV_PIPELINE_COMPUTE
)
3301 anv_pipeline_has_stage(const struct anv_graphics_pipeline
*pipeline
,
3302 gl_shader_stage stage
)
3304 return (pipeline
->active_stages
& mesa_to_vk_shader_stage(stage
)) != 0;
3307 #define ANV_DECL_GET_GRAPHICS_PROG_DATA_FUNC(prefix, stage) \
3308 static inline const struct brw_##prefix##_prog_data * \
3309 get_##prefix##_prog_data(const struct anv_graphics_pipeline *pipeline) \
3311 if (anv_pipeline_has_stage(pipeline, stage)) { \
3312 return (const struct brw_##prefix##_prog_data *) \
3313 pipeline->shaders[stage]->prog_data; \
3319 ANV_DECL_GET_GRAPHICS_PROG_DATA_FUNC(vs
, MESA_SHADER_VERTEX
)
3320 ANV_DECL_GET_GRAPHICS_PROG_DATA_FUNC(tcs
, MESA_SHADER_TESS_CTRL
)
3321 ANV_DECL_GET_GRAPHICS_PROG_DATA_FUNC(tes
, MESA_SHADER_TESS_EVAL
)
3322 ANV_DECL_GET_GRAPHICS_PROG_DATA_FUNC(gs
, MESA_SHADER_GEOMETRY
)
3323 ANV_DECL_GET_GRAPHICS_PROG_DATA_FUNC(wm
, MESA_SHADER_FRAGMENT
)
3325 static inline const struct brw_cs_prog_data
*
3326 get_cs_prog_data(const struct anv_compute_pipeline
*pipeline
)
3328 assert(pipeline
->cs
);
3329 return (const struct brw_cs_prog_data
*) pipeline
->cs
->prog_data
;
3332 static inline const struct brw_vue_prog_data
*
3333 anv_pipeline_get_last_vue_prog_data(const struct anv_graphics_pipeline
*pipeline
)
3335 if (anv_pipeline_has_stage(pipeline
, MESA_SHADER_GEOMETRY
))
3336 return &get_gs_prog_data(pipeline
)->base
;
3337 else if (anv_pipeline_has_stage(pipeline
, MESA_SHADER_TESS_EVAL
))
3338 return &get_tes_prog_data(pipeline
)->base
;
3340 return &get_vs_prog_data(pipeline
)->base
;
3344 anv_pipeline_init(struct anv_graphics_pipeline
*pipeline
, struct anv_device
*device
,
3345 struct anv_pipeline_cache
*cache
,
3346 const VkGraphicsPipelineCreateInfo
*pCreateInfo
,
3347 const VkAllocationCallbacks
*alloc
);
3350 anv_pipeline_compile_cs(struct anv_compute_pipeline
*pipeline
,
3351 struct anv_pipeline_cache
*cache
,
3352 const VkComputePipelineCreateInfo
*info
,
3353 const struct anv_shader_module
*module
,
3354 const char *entrypoint
,
3355 const VkSpecializationInfo
*spec_info
);
3358 anv_cs_workgroup_size(const struct anv_compute_pipeline
*pipeline
);
3361 anv_cs_threads(const struct anv_compute_pipeline
*pipeline
);
3363 struct anv_format_plane
{
3364 enum isl_format isl_format
:16;
3365 struct isl_swizzle swizzle
;
3367 /* Whether this plane contains chroma channels */
3370 /* For downscaling of YUV planes */
3371 uint8_t denominator_scales
[2];
3373 /* How to map sampled ycbcr planes to a single 4 component element. */
3374 struct isl_swizzle ycbcr_swizzle
;
3376 /* What aspect is associated to this plane */
3377 VkImageAspectFlags aspect
;
3382 struct anv_format_plane planes
[3];
3389 * Return the aspect's _format_ plane, not its _memory_ plane (using the
3390 * vocabulary of VK_EXT_image_drm_format_modifier). As a consequence, \a
3391 * aspect_mask may contain VK_IMAGE_ASPECT_PLANE_*, but must not contain
3392 * VK_IMAGE_ASPECT_MEMORY_PLANE_* .
3394 static inline uint32_t
3395 anv_image_aspect_to_plane(VkImageAspectFlags image_aspects
,
3396 VkImageAspectFlags aspect_mask
)
3398 switch (aspect_mask
) {
3399 case VK_IMAGE_ASPECT_COLOR_BIT
:
3400 case VK_IMAGE_ASPECT_DEPTH_BIT
:
3401 case VK_IMAGE_ASPECT_PLANE_0_BIT
:
3403 case VK_IMAGE_ASPECT_STENCIL_BIT
:
3404 if ((image_aspects
& VK_IMAGE_ASPECT_DEPTH_BIT
) == 0)
3407 case VK_IMAGE_ASPECT_PLANE_1_BIT
:
3409 case VK_IMAGE_ASPECT_PLANE_2_BIT
:
3412 /* Purposefully assert with depth/stencil aspects. */
3413 unreachable("invalid image aspect");
3417 static inline VkImageAspectFlags
3418 anv_plane_to_aspect(VkImageAspectFlags image_aspects
,
3421 if (image_aspects
& VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV
) {
3422 if (util_bitcount(image_aspects
) > 1)
3423 return VK_IMAGE_ASPECT_PLANE_0_BIT
<< plane
;
3424 return VK_IMAGE_ASPECT_COLOR_BIT
;
3426 if (image_aspects
& VK_IMAGE_ASPECT_DEPTH_BIT
)
3427 return VK_IMAGE_ASPECT_DEPTH_BIT
<< plane
;
3428 assert(image_aspects
== VK_IMAGE_ASPECT_STENCIL_BIT
);
3429 return VK_IMAGE_ASPECT_STENCIL_BIT
;
3432 #define anv_foreach_image_aspect_bit(b, image, aspects) \
3433 for_each_bit(b, anv_image_expand_aspects(image, aspects))
3435 const struct anv_format
*
3436 anv_get_format(VkFormat format
);
3438 static inline uint32_t
3439 anv_get_format_planes(VkFormat vk_format
)
3441 const struct anv_format
*format
= anv_get_format(vk_format
);
3443 return format
!= NULL
? format
->n_planes
: 0;
3446 struct anv_format_plane
3447 anv_get_format_plane(const struct gen_device_info
*devinfo
, VkFormat vk_format
,
3448 VkImageAspectFlagBits aspect
, VkImageTiling tiling
);
3450 static inline enum isl_format
3451 anv_get_isl_format(const struct gen_device_info
*devinfo
, VkFormat vk_format
,
3452 VkImageAspectFlags aspect
, VkImageTiling tiling
)
3454 return anv_get_format_plane(devinfo
, vk_format
, aspect
, tiling
).isl_format
;
3457 bool anv_formats_ccs_e_compatible(const struct gen_device_info
*devinfo
,
3458 VkImageCreateFlags create_flags
,
3460 VkImageTiling vk_tiling
,
3461 const VkImageFormatListCreateInfoKHR
*fmt_list
);
3463 static inline struct isl_swizzle
3464 anv_swizzle_for_render(struct isl_swizzle swizzle
)
3466 /* Sometimes the swizzle will have alpha map to one. We do this to fake
3467 * RGB as RGBA for texturing
3469 assert(swizzle
.a
== ISL_CHANNEL_SELECT_ONE
||
3470 swizzle
.a
== ISL_CHANNEL_SELECT_ALPHA
);
3472 /* But it doesn't matter what we render to that channel */
3473 swizzle
.a
= ISL_CHANNEL_SELECT_ALPHA
;
3479 anv_pipeline_setup_l3_config(struct anv_pipeline
*pipeline
, bool needs_slm
);
3482 * Subsurface of an anv_image.
3484 struct anv_surface
{
3485 /** Valid only if isl_surf::size_B > 0. */
3486 struct isl_surf isl
;
3489 * Offset from VkImage's base address, as bound by vkBindImageMemory().
3495 struct vk_object_base base
;
3497 VkImageType type
; /**< VkImageCreateInfo::imageType */
3498 /* The original VkFormat provided by the client. This may not match any
3499 * of the actual surface formats.
3502 const struct anv_format
*format
;
3504 VkImageAspectFlags aspects
;
3507 uint32_t array_size
;
3508 uint32_t samples
; /**< VkImageCreateInfo::samples */
3510 VkImageUsageFlags usage
; /**< VkImageCreateInfo::usage. */
3511 VkImageUsageFlags stencil_usage
;
3512 VkImageCreateFlags create_flags
; /* Flags used when creating image. */
3513 VkImageTiling tiling
; /** VkImageCreateInfo::tiling */
3515 /** True if this is needs to be bound to an appropriately tiled BO.
3517 * When not using modifiers, consumers such as X11, Wayland, and KMS need
3518 * the tiling passed via I915_GEM_SET_TILING. When exporting these buffers
3519 * we require a dedicated allocation so that we can know to allocate a
3522 bool needs_set_tiling
;
3525 * Must be DRM_FORMAT_MOD_INVALID unless tiling is
3526 * VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT.
3528 uint64_t drm_format_mod
;
3533 /* Whether the image is made of several underlying buffer objects rather a
3534 * single one with different offsets.
3538 /* Image was created with external format. */
3539 bool external_format
;
3544 * For each foo, anv_image::planes[x].surface is valid if and only if
3545 * anv_image::aspects has a x aspect. Refer to anv_image_aspect_to_plane()
3546 * to figure the number associated with a given aspect.
3548 * The hardware requires that the depth buffer and stencil buffer be
3549 * separate surfaces. From Vulkan's perspective, though, depth and stencil
3550 * reside in the same VkImage. To satisfy both the hardware and Vulkan, we
3551 * allocate the depth and stencil buffers as separate surfaces in the same
3556 * -----------------------
3558 * ----------------------- |
3559 * | shadow surface0 | |
3560 * ----------------------- | Plane 0
3561 * | aux surface0 | |
3562 * ----------------------- |
3563 * | fast clear colors0 | \|/
3564 * -----------------------
3566 * ----------------------- |
3567 * | shadow surface1 | |
3568 * ----------------------- | Plane 1
3569 * | aux surface1 | |
3570 * ----------------------- |
3571 * | fast clear colors1 | \|/
3572 * -----------------------
3575 * -----------------------
3579 * Offset of the entire plane (whenever the image is disjoint this is
3587 struct anv_surface surface
;
3590 * A surface which shadows the main surface and may have different
3591 * tiling. This is used for sampling using a tiling that isn't supported
3592 * for other operations.
3594 struct anv_surface shadow_surface
;
3597 * The base aux usage for this image. For color images, this can be
3598 * either CCS_E or CCS_D depending on whether or not we can reliably
3599 * leave CCS on all the time.
3601 enum isl_aux_usage aux_usage
;
3603 struct anv_surface aux_surface
;
3606 * Offset of the fast clear state (used to compute the
3607 * fast_clear_state_offset of the following planes).
3609 uint32_t fast_clear_state_offset
;
3612 * BO associated with this plane, set when bound.
3614 struct anv_address address
;
3617 * When destroying the image, also free the bo.
3623 /* The ordering of this enum is important */
3624 enum anv_fast_clear_type
{
3625 /** Image does not have/support any fast-clear blocks */
3626 ANV_FAST_CLEAR_NONE
= 0,
3627 /** Image has/supports fast-clear but only to the default value */
3628 ANV_FAST_CLEAR_DEFAULT_VALUE
= 1,
3629 /** Image has/supports fast-clear with an arbitrary fast-clear value */
3630 ANV_FAST_CLEAR_ANY
= 2,
3633 /* Returns the number of auxiliary buffer levels attached to an image. */
3634 static inline uint8_t
3635 anv_image_aux_levels(const struct anv_image
* const image
,
3636 VkImageAspectFlagBits aspect
)
3638 uint32_t plane
= anv_image_aspect_to_plane(image
->aspects
, aspect
);
3639 if (image
->planes
[plane
].aux_usage
== ISL_AUX_USAGE_NONE
)
3642 /* The Gen12 CCS aux surface is represented with only one level. */
3643 return image
->planes
[plane
].aux_surface
.isl
.tiling
== ISL_TILING_GEN12_CCS
?
3644 image
->planes
[plane
].surface
.isl
.levels
:
3645 image
->planes
[plane
].aux_surface
.isl
.levels
;
3648 /* Returns the number of auxiliary buffer layers attached to an image. */
3649 static inline uint32_t
3650 anv_image_aux_layers(const struct anv_image
* const image
,
3651 VkImageAspectFlagBits aspect
,
3652 const uint8_t miplevel
)
3656 /* The miplevel must exist in the main buffer. */
3657 assert(miplevel
< image
->levels
);
3659 if (miplevel
>= anv_image_aux_levels(image
, aspect
)) {
3660 /* There are no layers with auxiliary data because the miplevel has no
3665 uint32_t plane
= anv_image_aspect_to_plane(image
->aspects
, aspect
);
3667 /* The Gen12 CCS aux surface is represented with only one layer. */
3668 const struct isl_extent4d
*aux_logical_level0_px
=
3669 image
->planes
[plane
].aux_surface
.isl
.tiling
== ISL_TILING_GEN12_CCS
?
3670 &image
->planes
[plane
].surface
.isl
.logical_level0_px
:
3671 &image
->planes
[plane
].aux_surface
.isl
.logical_level0_px
;
3673 return MAX2(aux_logical_level0_px
->array_len
,
3674 aux_logical_level0_px
->depth
>> miplevel
);
3678 static inline struct anv_address
3679 anv_image_get_clear_color_addr(const struct anv_device
*device
,
3680 const struct anv_image
*image
,
3681 VkImageAspectFlagBits aspect
)
3683 assert(image
->aspects
& VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV
);
3685 uint32_t plane
= anv_image_aspect_to_plane(image
->aspects
, aspect
);
3686 return anv_address_add(image
->planes
[plane
].address
,
3687 image
->planes
[plane
].fast_clear_state_offset
);
3690 static inline struct anv_address
3691 anv_image_get_fast_clear_type_addr(const struct anv_device
*device
,
3692 const struct anv_image
*image
,
3693 VkImageAspectFlagBits aspect
)
3695 struct anv_address addr
=
3696 anv_image_get_clear_color_addr(device
, image
, aspect
);
3698 const unsigned clear_color_state_size
= device
->info
.gen
>= 10 ?
3699 device
->isl_dev
.ss
.clear_color_state_size
:
3700 device
->isl_dev
.ss
.clear_value_size
;
3701 return anv_address_add(addr
, clear_color_state_size
);
3704 static inline struct anv_address
3705 anv_image_get_compression_state_addr(const struct anv_device
*device
,
3706 const struct anv_image
*image
,
3707 VkImageAspectFlagBits aspect
,
3708 uint32_t level
, uint32_t array_layer
)
3710 assert(level
< anv_image_aux_levels(image
, aspect
));
3711 assert(array_layer
< anv_image_aux_layers(image
, aspect
, level
));
3712 UNUSED
uint32_t plane
= anv_image_aspect_to_plane(image
->aspects
, aspect
);
3713 assert(image
->planes
[plane
].aux_usage
== ISL_AUX_USAGE_CCS_E
);
3715 struct anv_address addr
=
3716 anv_image_get_fast_clear_type_addr(device
, image
, aspect
);
3717 addr
.offset
+= 4; /* Go past the fast clear type */
3719 if (image
->type
== VK_IMAGE_TYPE_3D
) {
3720 for (uint32_t l
= 0; l
< level
; l
++)
3721 addr
.offset
+= anv_minify(image
->extent
.depth
, l
) * 4;
3723 addr
.offset
+= level
* image
->array_size
* 4;
3725 addr
.offset
+= array_layer
* 4;
3727 assert(addr
.offset
<
3728 image
->planes
[plane
].address
.offset
+ image
->planes
[plane
].size
);
3732 /* Returns true if a HiZ-enabled depth buffer can be sampled from. */
3734 anv_can_sample_with_hiz(const struct gen_device_info
* const devinfo
,
3735 const struct anv_image
*image
)
3737 if (!(image
->aspects
& VK_IMAGE_ASPECT_DEPTH_BIT
))
3740 /* For Gen8-11, there are some restrictions around sampling from HiZ.
3741 * The Skylake PRM docs for RENDER_SURFACE_STATE::AuxiliarySurfaceMode
3744 * "If this field is set to AUX_HIZ, Number of Multisamples must
3745 * be MULTISAMPLECOUNT_1, and Surface Type cannot be SURFTYPE_3D."
3747 if (image
->type
== VK_IMAGE_TYPE_3D
)
3750 /* Allow this feature on BDW even though it is disabled in the BDW devinfo
3751 * struct. There's documentation which suggests that this feature actually
3752 * reduces performance on BDW, but it has only been observed to help so
3753 * far. Sampling fast-cleared blocks on BDW must also be handled with care
3754 * (see depth_stencil_attachment_compute_aux_usage() for more info).
3756 if (devinfo
->gen
!= 8 && !devinfo
->has_sample_with_hiz
)
3759 return image
->samples
== 1;
3763 anv_image_plane_uses_aux_map(const struct anv_device
*device
,
3764 const struct anv_image
*image
,
3767 return device
->info
.has_aux_map
&&
3768 isl_aux_usage_has_ccs(image
->planes
[plane
].aux_usage
);
3772 anv_cmd_buffer_mark_image_written(struct anv_cmd_buffer
*cmd_buffer
,
3773 const struct anv_image
*image
,
3774 VkImageAspectFlagBits aspect
,
3775 enum isl_aux_usage aux_usage
,
3777 uint32_t base_layer
,
3778 uint32_t layer_count
);
3781 anv_image_clear_color(struct anv_cmd_buffer
*cmd_buffer
,
3782 const struct anv_image
*image
,
3783 VkImageAspectFlagBits aspect
,
3784 enum isl_aux_usage aux_usage
,
3785 enum isl_format format
, struct isl_swizzle swizzle
,
3786 uint32_t level
, uint32_t base_layer
, uint32_t layer_count
,
3787 VkRect2D area
, union isl_color_value clear_color
);
3789 anv_image_clear_depth_stencil(struct anv_cmd_buffer
*cmd_buffer
,
3790 const struct anv_image
*image
,
3791 VkImageAspectFlags aspects
,
3792 enum isl_aux_usage depth_aux_usage
,
3794 uint32_t base_layer
, uint32_t layer_count
,
3796 float depth_value
, uint8_t stencil_value
);
3798 anv_image_msaa_resolve(struct anv_cmd_buffer
*cmd_buffer
,
3799 const struct anv_image
*src_image
,
3800 enum isl_aux_usage src_aux_usage
,
3801 uint32_t src_level
, uint32_t src_base_layer
,
3802 const struct anv_image
*dst_image
,
3803 enum isl_aux_usage dst_aux_usage
,
3804 uint32_t dst_level
, uint32_t dst_base_layer
,
3805 VkImageAspectFlagBits aspect
,
3806 uint32_t src_x
, uint32_t src_y
,
3807 uint32_t dst_x
, uint32_t dst_y
,
3808 uint32_t width
, uint32_t height
,
3809 uint32_t layer_count
,
3810 enum blorp_filter filter
);
3812 anv_image_hiz_op(struct anv_cmd_buffer
*cmd_buffer
,
3813 const struct anv_image
*image
,
3814 VkImageAspectFlagBits aspect
, uint32_t level
,
3815 uint32_t base_layer
, uint32_t layer_count
,
3816 enum isl_aux_op hiz_op
);
3818 anv_image_hiz_clear(struct anv_cmd_buffer
*cmd_buffer
,
3819 const struct anv_image
*image
,
3820 VkImageAspectFlags aspects
,
3822 uint32_t base_layer
, uint32_t layer_count
,
3823 VkRect2D area
, uint8_t stencil_value
);
3825 anv_image_mcs_op(struct anv_cmd_buffer
*cmd_buffer
,
3826 const struct anv_image
*image
,
3827 enum isl_format format
, struct isl_swizzle swizzle
,
3828 VkImageAspectFlagBits aspect
,
3829 uint32_t base_layer
, uint32_t layer_count
,
3830 enum isl_aux_op mcs_op
, union isl_color_value
*clear_value
,
3833 anv_image_ccs_op(struct anv_cmd_buffer
*cmd_buffer
,
3834 const struct anv_image
*image
,
3835 enum isl_format format
, struct isl_swizzle swizzle
,
3836 VkImageAspectFlagBits aspect
, uint32_t level
,
3837 uint32_t base_layer
, uint32_t layer_count
,
3838 enum isl_aux_op ccs_op
, union isl_color_value
*clear_value
,
3842 anv_image_copy_to_shadow(struct anv_cmd_buffer
*cmd_buffer
,
3843 const struct anv_image
*image
,
3844 VkImageAspectFlagBits aspect
,
3845 uint32_t base_level
, uint32_t level_count
,
3846 uint32_t base_layer
, uint32_t layer_count
);
3849 anv_layout_to_aux_state(const struct gen_device_info
* const devinfo
,
3850 const struct anv_image
*image
,
3851 const VkImageAspectFlagBits aspect
,
3852 const VkImageLayout layout
);
3855 anv_layout_to_aux_usage(const struct gen_device_info
* const devinfo
,
3856 const struct anv_image
*image
,
3857 const VkImageAspectFlagBits aspect
,
3858 const VkImageUsageFlagBits usage
,
3859 const VkImageLayout layout
);
3861 enum anv_fast_clear_type
3862 anv_layout_to_fast_clear_type(const struct gen_device_info
* const devinfo
,
3863 const struct anv_image
* const image
,
3864 const VkImageAspectFlagBits aspect
,
3865 const VkImageLayout layout
);
3867 /* This is defined as a macro so that it works for both
3868 * VkImageSubresourceRange and VkImageSubresourceLayers
3870 #define anv_get_layerCount(_image, _range) \
3871 ((_range)->layerCount == VK_REMAINING_ARRAY_LAYERS ? \
3872 (_image)->array_size - (_range)->baseArrayLayer : (_range)->layerCount)
3874 static inline uint32_t
3875 anv_get_levelCount(const struct anv_image
*image
,
3876 const VkImageSubresourceRange
*range
)
3878 return range
->levelCount
== VK_REMAINING_MIP_LEVELS
?
3879 image
->levels
- range
->baseMipLevel
: range
->levelCount
;
3882 static inline VkImageAspectFlags
3883 anv_image_expand_aspects(const struct anv_image
*image
,
3884 VkImageAspectFlags aspects
)
3886 /* If the underlying image has color plane aspects and
3887 * VK_IMAGE_ASPECT_COLOR_BIT has been requested, then return the aspects of
3888 * the underlying image. */
3889 if ((image
->aspects
& VK_IMAGE_ASPECT_PLANES_BITS_ANV
) != 0 &&
3890 aspects
== VK_IMAGE_ASPECT_COLOR_BIT
)
3891 return image
->aspects
;
3897 anv_image_aspects_compatible(VkImageAspectFlags aspects1
,
3898 VkImageAspectFlags aspects2
)
3900 if (aspects1
== aspects2
)
3903 /* Only 1 color aspects are compatibles. */
3904 if ((aspects1
& VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV
) != 0 &&
3905 (aspects2
& VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV
) != 0 &&
3906 util_bitcount(aspects1
) == util_bitcount(aspects2
))
3912 struct anv_image_view
{
3913 struct vk_object_base base
;
3915 const struct anv_image
*image
; /**< VkImageViewCreateInfo::image */
3917 VkImageAspectFlags aspect_mask
;
3919 VkExtent3D extent
; /**< Extent of VkImageViewCreateInfo::baseMipLevel. */
3923 uint32_t image_plane
;
3925 struct isl_view isl
;
3928 * RENDER_SURFACE_STATE when using image as a sampler surface with an
3929 * image layout of SHADER_READ_ONLY_OPTIMAL or
3930 * DEPTH_STENCIL_READ_ONLY_OPTIMAL.
3932 struct anv_surface_state optimal_sampler_surface_state
;
3935 * RENDER_SURFACE_STATE when using image as a sampler surface with an
3936 * image layout of GENERAL.
3938 struct anv_surface_state general_sampler_surface_state
;
3941 * RENDER_SURFACE_STATE when using image as a storage image. Separate
3942 * states for write-only and readable, using the real format for
3943 * write-only and the lowered format for readable.
3945 struct anv_surface_state storage_surface_state
;
3946 struct anv_surface_state writeonly_storage_surface_state
;
3948 struct brw_image_param storage_image_param
;
3952 enum anv_image_view_state_flags
{
3953 ANV_IMAGE_VIEW_STATE_STORAGE_WRITE_ONLY
= (1 << 0),
3954 ANV_IMAGE_VIEW_STATE_TEXTURE_OPTIMAL
= (1 << 1),
3957 void anv_image_fill_surface_state(struct anv_device
*device
,
3958 const struct anv_image
*image
,
3959 VkImageAspectFlagBits aspect
,
3960 const struct isl_view
*view
,
3961 isl_surf_usage_flags_t view_usage
,
3962 enum isl_aux_usage aux_usage
,
3963 const union isl_color_value
*clear_color
,
3964 enum anv_image_view_state_flags flags
,
3965 struct anv_surface_state
*state_inout
,
3966 struct brw_image_param
*image_param_out
);
3968 struct anv_image_create_info
{
3969 const VkImageCreateInfo
*vk_info
;
3971 /** An opt-in bitmask which filters an ISL-mapping of the Vulkan tiling. */
3972 isl_tiling_flags_t isl_tiling_flags
;
3974 /** These flags will be added to any derived from VkImageCreateInfo. */
3975 isl_surf_usage_flags_t isl_extra_usage_flags
;
3978 bool external_format
;
3981 VkResult
anv_image_create(VkDevice _device
,
3982 const struct anv_image_create_info
*info
,
3983 const VkAllocationCallbacks
* alloc
,
3987 anv_isl_format_for_descriptor_type(VkDescriptorType type
);
3989 static inline VkExtent3D
3990 anv_sanitize_image_extent(const VkImageType imageType
,
3991 const VkExtent3D imageExtent
)
3993 switch (imageType
) {
3994 case VK_IMAGE_TYPE_1D
:
3995 return (VkExtent3D
) { imageExtent
.width
, 1, 1 };
3996 case VK_IMAGE_TYPE_2D
:
3997 return (VkExtent3D
) { imageExtent
.width
, imageExtent
.height
, 1 };
3998 case VK_IMAGE_TYPE_3D
:
4001 unreachable("invalid image type");
4005 static inline VkOffset3D
4006 anv_sanitize_image_offset(const VkImageType imageType
,
4007 const VkOffset3D imageOffset
)
4009 switch (imageType
) {
4010 case VK_IMAGE_TYPE_1D
:
4011 return (VkOffset3D
) { imageOffset
.x
, 0, 0 };
4012 case VK_IMAGE_TYPE_2D
:
4013 return (VkOffset3D
) { imageOffset
.x
, imageOffset
.y
, 0 };
4014 case VK_IMAGE_TYPE_3D
:
4017 unreachable("invalid image type");
4021 VkFormatFeatureFlags
4022 anv_get_image_format_features(const struct gen_device_info
*devinfo
,
4024 const struct anv_format
*anv_format
,
4025 VkImageTiling vk_tiling
);
4027 void anv_fill_buffer_surface_state(struct anv_device
*device
,
4028 struct anv_state state
,
4029 enum isl_format format
,
4030 struct anv_address address
,
4031 uint32_t range
, uint32_t stride
);
4034 anv_clear_color_from_att_state(union isl_color_value
*clear_color
,
4035 const struct anv_attachment_state
*att_state
,
4036 const struct anv_image_view
*iview
)
4038 const struct isl_format_layout
*view_fmtl
=
4039 isl_format_get_layout(iview
->planes
[0].isl
.format
);
4041 #define COPY_CLEAR_COLOR_CHANNEL(c, i) \
4042 if (view_fmtl->channels.c.bits) \
4043 clear_color->u32[i] = att_state->clear_value.color.uint32[i]
4045 COPY_CLEAR_COLOR_CHANNEL(r
, 0);
4046 COPY_CLEAR_COLOR_CHANNEL(g
, 1);
4047 COPY_CLEAR_COLOR_CHANNEL(b
, 2);
4048 COPY_CLEAR_COLOR_CHANNEL(a
, 3);
4050 #undef COPY_CLEAR_COLOR_CHANNEL
4054 struct anv_ycbcr_conversion
{
4055 struct vk_object_base base
;
4057 const struct anv_format
* format
;
4058 VkSamplerYcbcrModelConversion ycbcr_model
;
4059 VkSamplerYcbcrRange ycbcr_range
;
4060 VkComponentSwizzle mapping
[4];
4061 VkChromaLocation chroma_offsets
[2];
4062 VkFilter chroma_filter
;
4063 bool chroma_reconstruction
;
4066 struct anv_sampler
{
4067 struct vk_object_base base
;
4069 uint32_t state
[3][4];
4071 struct anv_ycbcr_conversion
*conversion
;
4073 /* Blob of sampler state data which is guaranteed to be 32-byte aligned
4074 * and with a 32-byte stride for use as bindless samplers.
4076 struct anv_state bindless_state
;
4079 struct anv_framebuffer
{
4080 struct vk_object_base base
;
4086 uint32_t attachment_count
;
4087 struct anv_image_view
* attachments
[0];
4090 struct anv_subpass_attachment
{
4091 VkImageUsageFlagBits usage
;
4092 uint32_t attachment
;
4093 VkImageLayout layout
;
4095 /* Used only with attachment containing stencil data. */
4096 VkImageLayout stencil_layout
;
4099 struct anv_subpass
{
4100 uint32_t attachment_count
;
4103 * A pointer to all attachment references used in this subpass.
4104 * Only valid if ::attachment_count > 0.
4106 struct anv_subpass_attachment
* attachments
;
4107 uint32_t input_count
;
4108 struct anv_subpass_attachment
* input_attachments
;
4109 uint32_t color_count
;
4110 struct anv_subpass_attachment
* color_attachments
;
4111 struct anv_subpass_attachment
* resolve_attachments
;
4113 struct anv_subpass_attachment
* depth_stencil_attachment
;
4114 struct anv_subpass_attachment
* ds_resolve_attachment
;
4115 VkResolveModeFlagBitsKHR depth_resolve_mode
;
4116 VkResolveModeFlagBitsKHR stencil_resolve_mode
;
4120 /** Subpass has a depth/stencil self-dependency */
4121 bool has_ds_self_dep
;
4123 /** Subpass has at least one color resolve attachment */
4124 bool has_color_resolve
;
4127 static inline unsigned
4128 anv_subpass_view_count(const struct anv_subpass
*subpass
)
4130 return MAX2(1, util_bitcount(subpass
->view_mask
));
4133 struct anv_render_pass_attachment
{
4134 /* TODO: Consider using VkAttachmentDescription instead of storing each of
4135 * its members individually.
4139 VkImageUsageFlags usage
;
4140 VkAttachmentLoadOp load_op
;
4141 VkAttachmentStoreOp store_op
;
4142 VkAttachmentLoadOp stencil_load_op
;
4143 VkImageLayout initial_layout
;
4144 VkImageLayout final_layout
;
4145 VkImageLayout first_subpass_layout
;
4147 VkImageLayout stencil_initial_layout
;
4148 VkImageLayout stencil_final_layout
;
4150 /* The subpass id in which the attachment will be used last. */
4151 uint32_t last_subpass_idx
;
4154 struct anv_render_pass
{
4155 struct vk_object_base base
;
4157 uint32_t attachment_count
;
4158 uint32_t subpass_count
;
4159 /* An array of subpass_count+1 flushes, one per subpass boundary */
4160 enum anv_pipe_bits
* subpass_flushes
;
4161 struct anv_render_pass_attachment
* attachments
;
4162 struct anv_subpass subpasses
[0];
4165 #define ANV_PIPELINE_STATISTICS_MASK 0x000007ff
4167 struct anv_query_pool
{
4168 struct vk_object_base base
;
4171 VkQueryPipelineStatisticFlags pipeline_statistics
;
4172 /** Stride between slots, in bytes */
4174 /** Number of slots in this query pool */
4179 int anv_get_instance_entrypoint_index(const char *name
);
4180 int anv_get_device_entrypoint_index(const char *name
);
4181 int anv_get_physical_device_entrypoint_index(const char *name
);
4183 const char *anv_get_instance_entry_name(int index
);
4184 const char *anv_get_physical_device_entry_name(int index
);
4185 const char *anv_get_device_entry_name(int index
);
4188 anv_instance_entrypoint_is_enabled(int index
, uint32_t core_version
,
4189 const struct anv_instance_extension_table
*instance
);
4191 anv_physical_device_entrypoint_is_enabled(int index
, uint32_t core_version
,
4192 const struct anv_instance_extension_table
*instance
);
4194 anv_device_entrypoint_is_enabled(int index
, uint32_t core_version
,
4195 const struct anv_instance_extension_table
*instance
,
4196 const struct anv_device_extension_table
*device
);
4198 void *anv_lookup_entrypoint(const struct gen_device_info
*devinfo
,
4201 void anv_dump_image_to_ppm(struct anv_device
*device
,
4202 struct anv_image
*image
, unsigned miplevel
,
4203 unsigned array_layer
, VkImageAspectFlagBits aspect
,
4204 const char *filename
);
4206 enum anv_dump_action
{
4207 ANV_DUMP_FRAMEBUFFERS_BIT
= 0x1,
4210 void anv_dump_start(struct anv_device
*device
, enum anv_dump_action actions
);
4211 void anv_dump_finish(void);
4213 void anv_dump_add_attachments(struct anv_cmd_buffer
*cmd_buffer
);
4215 static inline uint32_t
4216 anv_get_subpass_id(const struct anv_cmd_state
* const cmd_state
)
4218 /* This function must be called from within a subpass. */
4219 assert(cmd_state
->pass
&& cmd_state
->subpass
);
4221 const uint32_t subpass_id
= cmd_state
->subpass
- cmd_state
->pass
->subpasses
;
4223 /* The id of this subpass shouldn't exceed the number of subpasses in this
4224 * render pass minus 1.
4226 assert(subpass_id
< cmd_state
->pass
->subpass_count
);
4230 struct gen_perf_config
*anv_get_perf(const struct gen_device_info
*devinfo
, int fd
);
4231 void anv_device_perf_init(struct anv_device
*device
);
4233 #define ANV_FROM_HANDLE(__anv_type, __name, __handle) \
4234 VK_FROM_HANDLE(__anv_type, __name, __handle)
4236 VK_DEFINE_HANDLE_CASTS(anv_cmd_buffer
, base
, VkCommandBuffer
,
4237 VK_OBJECT_TYPE_COMMAND_BUFFER
)
4238 VK_DEFINE_HANDLE_CASTS(anv_device
, vk
.base
, VkDevice
, VK_OBJECT_TYPE_DEVICE
)
4239 VK_DEFINE_HANDLE_CASTS(anv_instance
, base
, VkInstance
, VK_OBJECT_TYPE_INSTANCE
)
4240 VK_DEFINE_HANDLE_CASTS(anv_physical_device
, base
, VkPhysicalDevice
,
4241 VK_OBJECT_TYPE_PHYSICAL_DEVICE
)
4242 VK_DEFINE_HANDLE_CASTS(anv_queue
, base
, VkQueue
, VK_OBJECT_TYPE_QUEUE
)
4244 VK_DEFINE_NONDISP_HANDLE_CASTS(anv_cmd_pool
, base
, VkCommandPool
,
4245 VK_OBJECT_TYPE_COMMAND_POOL
)
4246 VK_DEFINE_NONDISP_HANDLE_CASTS(anv_buffer
, base
, VkBuffer
,
4247 VK_OBJECT_TYPE_BUFFER
)
4248 VK_DEFINE_NONDISP_HANDLE_CASTS(anv_buffer_view
, base
, VkBufferView
,
4249 VK_OBJECT_TYPE_BUFFER_VIEW
)
4250 VK_DEFINE_NONDISP_HANDLE_CASTS(anv_descriptor_pool
, base
, VkDescriptorPool
,
4251 VK_OBJECT_TYPE_DESCRIPTOR_POOL
)
4252 VK_DEFINE_NONDISP_HANDLE_CASTS(anv_descriptor_set
, base
, VkDescriptorSet
,
4253 VK_OBJECT_TYPE_DESCRIPTOR_SET
)
4254 VK_DEFINE_NONDISP_HANDLE_CASTS(anv_descriptor_set_layout
, base
,
4255 VkDescriptorSetLayout
,
4256 VK_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT
)
4257 VK_DEFINE_NONDISP_HANDLE_CASTS(anv_descriptor_update_template
, base
,
4258 VkDescriptorUpdateTemplate
,
4259 VK_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE
)
4260 VK_DEFINE_NONDISP_HANDLE_CASTS(anv_device_memory
, base
, VkDeviceMemory
,
4261 VK_OBJECT_TYPE_DEVICE_MEMORY
)
4262 VK_DEFINE_NONDISP_HANDLE_CASTS(anv_fence
, base
, VkFence
, VK_OBJECT_TYPE_FENCE
)
4263 VK_DEFINE_NONDISP_HANDLE_CASTS(anv_event
, base
, VkEvent
, VK_OBJECT_TYPE_EVENT
)
4264 VK_DEFINE_NONDISP_HANDLE_CASTS(anv_framebuffer
, base
, VkFramebuffer
,
4265 VK_OBJECT_TYPE_FRAMEBUFFER
)
4266 VK_DEFINE_NONDISP_HANDLE_CASTS(anv_image
, base
, VkImage
, VK_OBJECT_TYPE_IMAGE
)
4267 VK_DEFINE_NONDISP_HANDLE_CASTS(anv_image_view
, base
, VkImageView
,
4268 VK_OBJECT_TYPE_IMAGE_VIEW
);
4269 VK_DEFINE_NONDISP_HANDLE_CASTS(anv_pipeline_cache
, base
, VkPipelineCache
,
4270 VK_OBJECT_TYPE_PIPELINE_CACHE
)
4271 VK_DEFINE_NONDISP_HANDLE_CASTS(anv_pipeline
, base
, VkPipeline
,
4272 VK_OBJECT_TYPE_PIPELINE
)
4273 VK_DEFINE_NONDISP_HANDLE_CASTS(anv_pipeline_layout
, base
, VkPipelineLayout
,
4274 VK_OBJECT_TYPE_PIPELINE_LAYOUT
)
4275 VK_DEFINE_NONDISP_HANDLE_CASTS(anv_query_pool
, base
, VkQueryPool
,
4276 VK_OBJECT_TYPE_QUERY_POOL
)
4277 VK_DEFINE_NONDISP_HANDLE_CASTS(anv_render_pass
, base
, VkRenderPass
,
4278 VK_OBJECT_TYPE_RENDER_PASS
)
4279 VK_DEFINE_NONDISP_HANDLE_CASTS(anv_sampler
, base
, VkSampler
,
4280 VK_OBJECT_TYPE_SAMPLER
)
4281 VK_DEFINE_NONDISP_HANDLE_CASTS(anv_semaphore
, base
, VkSemaphore
,
4282 VK_OBJECT_TYPE_SEMAPHORE
)
4283 VK_DEFINE_NONDISP_HANDLE_CASTS(anv_shader_module
, base
, VkShaderModule
,
4284 VK_OBJECT_TYPE_SHADER_MODULE
)
4285 VK_DEFINE_NONDISP_HANDLE_CASTS(anv_ycbcr_conversion
, base
,
4286 VkSamplerYcbcrConversion
,
4287 VK_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION
)
4289 /* Gen-specific function declarations */
4291 # include "anv_genX.h"
4293 # define genX(x) gen7_##x
4294 # include "anv_genX.h"
4296 # define genX(x) gen75_##x
4297 # include "anv_genX.h"
4299 # define genX(x) gen8_##x
4300 # include "anv_genX.h"
4302 # define genX(x) gen9_##x
4303 # include "anv_genX.h"
4305 # define genX(x) gen10_##x
4306 # include "anv_genX.h"
4308 # define genX(x) gen11_##x
4309 # include "anv_genX.h"
4311 # define genX(x) gen12_##x
4312 # include "anv_genX.h"
4316 #endif /* ANV_PRIVATE_H */