2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
40 #define __gen_validate_value(x) VALGRIND_CHECK_MEM_IS_DEFINED(&(x), sizeof(x))
46 #include "common/gen_clflush.h"
47 #include "common/gen_gem.h"
48 #include "dev/gen_device_info.h"
49 #include "blorp/blorp.h"
50 #include "compiler/brw_compiler.h"
51 #include "util/macros.h"
52 #include "util/hash_table.h"
53 #include "util/list.h"
55 #include "util/u_atomic.h"
56 #include "util/u_vector.h"
57 #include "util/u_math.h"
60 #include "vk_debug_report.h"
62 /* Pre-declarations needed for WSI entrypoints */
65 typedef struct xcb_connection_t xcb_connection_t
;
66 typedef uint32_t xcb_visualid_t
;
67 typedef uint32_t xcb_window_t
;
70 struct anv_buffer_view
;
71 struct anv_image_view
;
76 #include <vulkan/vulkan.h>
77 #include <vulkan/vulkan_intel.h>
78 #include <vulkan/vk_icd.h>
80 #include "anv_android.h"
81 #include "anv_entrypoints.h"
82 #include "anv_extensions.h"
85 #include "common/gen_debug.h"
86 #include "common/intel_log.h"
87 #include "wsi_common.h"
89 /* anv Virtual Memory Layout
90 * =========================
92 * When the anv driver is determining the virtual graphics addresses of memory
93 * objects itself using the softpin mechanism, the following memory ranges
96 * Three special considerations to notice:
98 * (1) the dynamic state pool is located within the same 4 GiB as the low
99 * heap. This is to work around a VF cache issue described in a comment in
100 * anv_physical_device_init_heaps.
102 * (2) the binding table pool is located at lower addresses than the surface
103 * state pool, within a 4 GiB range. This allows surface state base addresses
104 * to cover both binding tables (16 bit offsets) and surface states (32 bit
107 * (3) the last 4 GiB of the address space is withheld from the high
108 * heap. Various hardware units will read past the end of an object for
109 * various reasons. This healthy margin prevents reads from wrapping around
112 #define LOW_HEAP_MIN_ADDRESS 0x000000001000ULL /* 4 KiB */
113 #define LOW_HEAP_MAX_ADDRESS 0x0000bfffffffULL
114 #define DYNAMIC_STATE_POOL_MIN_ADDRESS 0x0000c0000000ULL /* 3 GiB */
115 #define DYNAMIC_STATE_POOL_MAX_ADDRESS 0x0000ffffffffULL
116 #define BINDING_TABLE_POOL_MIN_ADDRESS 0x000100000000ULL /* 4 GiB */
117 #define BINDING_TABLE_POOL_MAX_ADDRESS 0x00013fffffffULL
118 #define SURFACE_STATE_POOL_MIN_ADDRESS 0x000140000000ULL /* 5 GiB */
119 #define SURFACE_STATE_POOL_MAX_ADDRESS 0x00017fffffffULL
120 #define INSTRUCTION_STATE_POOL_MIN_ADDRESS 0x000180000000ULL /* 6 GiB */
121 #define INSTRUCTION_STATE_POOL_MAX_ADDRESS 0x0001bfffffffULL
122 #define HIGH_HEAP_MIN_ADDRESS 0x0001c0000000ULL /* 7 GiB */
123 #define HIGH_HEAP_MAX_ADDRESS 0xfffeffffffffULL
125 #define LOW_HEAP_SIZE \
126 (LOW_HEAP_MAX_ADDRESS - LOW_HEAP_MIN_ADDRESS + 1)
127 #define HIGH_HEAP_SIZE \
128 (HIGH_HEAP_MAX_ADDRESS - HIGH_HEAP_MIN_ADDRESS + 1)
129 #define DYNAMIC_STATE_POOL_SIZE \
130 (DYNAMIC_STATE_POOL_MAX_ADDRESS - DYNAMIC_STATE_POOL_MIN_ADDRESS + 1)
131 #define BINDING_TABLE_POOL_SIZE \
132 (BINDING_TABLE_POOL_MAX_ADDRESS - BINDING_TABLE_POOL_MIN_ADDRESS + 1)
133 #define SURFACE_STATE_POOL_SIZE \
134 (SURFACE_STATE_POOL_MAX_ADDRESS - SURFACE_STATE_POOL_MIN_ADDRESS + 1)
135 #define INSTRUCTION_STATE_POOL_SIZE \
136 (INSTRUCTION_STATE_POOL_MAX_ADDRESS - INSTRUCTION_STATE_POOL_MIN_ADDRESS + 1)
138 /* Allowing different clear colors requires us to perform a depth resolve at
139 * the end of certain render passes. This is because while slow clears store
140 * the clear color in the HiZ buffer, fast clears (without a resolve) don't.
141 * See the PRMs for examples describing when additional resolves would be
142 * necessary. To enable fast clears without requiring extra resolves, we set
143 * the clear value to a globally-defined one. We could allow different values
144 * if the user doesn't expect coherent data during or after a render passes
145 * (VK_ATTACHMENT_STORE_OP_DONT_CARE), but such users (aside from the CTS)
146 * don't seem to exist yet. In almost all Vulkan applications tested thus far,
147 * 1.0f seems to be the only value used. The only application that doesn't set
148 * this value does so through the usage of an seemingly uninitialized clear
151 #define ANV_HZ_FC_VAL 1.0f
156 #define MAX_VIEWPORTS 16
157 #define MAX_SCISSORS 16
158 #define MAX_PUSH_CONSTANTS_SIZE 128
159 #define MAX_DYNAMIC_BUFFERS 16
160 #define MAX_IMAGES 64
161 #define MAX_GEN8_IMAGES 8
162 #define MAX_PUSH_DESCRIPTORS 32 /* Minimum requirement */
164 /* The kernel relocation API has a limitation of a 32-bit delta value
165 * applied to the address before it is written which, in spite of it being
166 * unsigned, is treated as signed . Because of the way that this maps to
167 * the Vulkan API, we cannot handle an offset into a buffer that does not
168 * fit into a signed 32 bits. The only mechanism we have for dealing with
169 * this at the moment is to limit all VkDeviceMemory objects to a maximum
170 * of 2GB each. The Vulkan spec allows us to do this:
172 * "Some platforms may have a limit on the maximum size of a single
173 * allocation. For example, certain systems may fail to create
174 * allocations with a size greater than or equal to 4GB. Such a limit is
175 * implementation-dependent, and if such a failure occurs then the error
176 * VK_ERROR_OUT_OF_DEVICE_MEMORY should be returned."
178 * We don't use vk_error here because it's not an error so much as an
179 * indication to the application that the allocation is too large.
181 #define MAX_MEMORY_ALLOCATION_SIZE (1ull << 31)
183 #define ANV_SVGS_VB_INDEX MAX_VBS
184 #define ANV_DRAWID_VB_INDEX (MAX_VBS + 1)
186 #define anv_printflike(a, b) __attribute__((__format__(__printf__, a, b)))
188 static inline uint32_t
189 align_down_npot_u32(uint32_t v
, uint32_t a
)
194 static inline uint32_t
195 align_u32(uint32_t v
, uint32_t a
)
197 assert(a
!= 0 && a
== (a
& -a
));
198 return (v
+ a
- 1) & ~(a
- 1);
201 static inline uint64_t
202 align_u64(uint64_t v
, uint64_t a
)
204 assert(a
!= 0 && a
== (a
& -a
));
205 return (v
+ a
- 1) & ~(a
- 1);
208 static inline int32_t
209 align_i32(int32_t v
, int32_t a
)
211 assert(a
!= 0 && a
== (a
& -a
));
212 return (v
+ a
- 1) & ~(a
- 1);
215 /** Alignment must be a power of 2. */
217 anv_is_aligned(uintmax_t n
, uintmax_t a
)
219 assert(a
== (a
& -a
));
220 return (n
& (a
- 1)) == 0;
223 static inline uint32_t
224 anv_minify(uint32_t n
, uint32_t levels
)
226 if (unlikely(n
== 0))
229 return MAX2(n
>> levels
, 1);
233 anv_clamp_f(float f
, float min
, float max
)
246 anv_clear_mask(uint32_t *inout_mask
, uint32_t clear_mask
)
248 if (*inout_mask
& clear_mask
) {
249 *inout_mask
&= ~clear_mask
;
256 static inline union isl_color_value
257 vk_to_isl_color(VkClearColorValue color
)
259 return (union isl_color_value
) {
269 #define for_each_bit(b, dword) \
270 for (uint32_t __dword = (dword); \
271 (b) = __builtin_ffs(__dword) - 1, __dword; \
272 __dword &= ~(1 << (b)))
274 #define typed_memcpy(dest, src, count) ({ \
275 STATIC_ASSERT(sizeof(*src) == sizeof(*dest)); \
276 memcpy((dest), (src), (count) * sizeof(*(src))); \
279 /* Mapping from anv object to VkDebugReportObjectTypeEXT. New types need
280 * to be added here in order to utilize mapping in debug/error/perf macros.
282 #define REPORT_OBJECT_TYPE(o) \
283 __builtin_choose_expr ( \
284 __builtin_types_compatible_p (__typeof (o), struct anv_instance*), \
285 VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT, \
286 __builtin_choose_expr ( \
287 __builtin_types_compatible_p (__typeof (o), struct anv_physical_device*), \
288 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, \
289 __builtin_choose_expr ( \
290 __builtin_types_compatible_p (__typeof (o), struct anv_device*), \
291 VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, \
292 __builtin_choose_expr ( \
293 __builtin_types_compatible_p (__typeof (o), const struct anv_device*), \
294 VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, \
295 __builtin_choose_expr ( \
296 __builtin_types_compatible_p (__typeof (o), struct anv_queue*), \
297 VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT, \
298 __builtin_choose_expr ( \
299 __builtin_types_compatible_p (__typeof (o), struct anv_semaphore*), \
300 VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT, \
301 __builtin_choose_expr ( \
302 __builtin_types_compatible_p (__typeof (o), struct anv_cmd_buffer*), \
303 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, \
304 __builtin_choose_expr ( \
305 __builtin_types_compatible_p (__typeof (o), struct anv_fence*), \
306 VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT, \
307 __builtin_choose_expr ( \
308 __builtin_types_compatible_p (__typeof (o), struct anv_device_memory*), \
309 VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, \
310 __builtin_choose_expr ( \
311 __builtin_types_compatible_p (__typeof (o), struct anv_buffer*), \
312 VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, \
313 __builtin_choose_expr ( \
314 __builtin_types_compatible_p (__typeof (o), struct anv_image*), \
315 VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, \
316 __builtin_choose_expr ( \
317 __builtin_types_compatible_p (__typeof (o), const struct anv_image*), \
318 VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, \
319 __builtin_choose_expr ( \
320 __builtin_types_compatible_p (__typeof (o), struct anv_event*), \
321 VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT, \
322 __builtin_choose_expr ( \
323 __builtin_types_compatible_p (__typeof (o), struct anv_query_pool*), \
324 VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, \
325 __builtin_choose_expr ( \
326 __builtin_types_compatible_p (__typeof (o), struct anv_buffer_view*), \
327 VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT, \
328 __builtin_choose_expr ( \
329 __builtin_types_compatible_p (__typeof (o), struct anv_image_view*), \
330 VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT, \
331 __builtin_choose_expr ( \
332 __builtin_types_compatible_p (__typeof (o), struct anv_shader_module*), \
333 VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT, \
334 __builtin_choose_expr ( \
335 __builtin_types_compatible_p (__typeof (o), struct anv_pipeline_cache*), \
336 VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_CACHE_EXT, \
337 __builtin_choose_expr ( \
338 __builtin_types_compatible_p (__typeof (o), struct anv_pipeline_layout*), \
339 VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT, \
340 __builtin_choose_expr ( \
341 __builtin_types_compatible_p (__typeof (o), struct anv_render_pass*), \
342 VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, \
343 __builtin_choose_expr ( \
344 __builtin_types_compatible_p (__typeof (o), struct anv_pipeline*), \
345 VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT, \
346 __builtin_choose_expr ( \
347 __builtin_types_compatible_p (__typeof (o), struct anv_descriptor_set_layout*), \
348 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT, \
349 __builtin_choose_expr ( \
350 __builtin_types_compatible_p (__typeof (o), struct anv_sampler*), \
351 VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT, \
352 __builtin_choose_expr ( \
353 __builtin_types_compatible_p (__typeof (o), struct anv_descriptor_pool*), \
354 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT, \
355 __builtin_choose_expr ( \
356 __builtin_types_compatible_p (__typeof (o), struct anv_descriptor_set*), \
357 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, \
358 __builtin_choose_expr ( \
359 __builtin_types_compatible_p (__typeof (o), struct anv_framebuffer*), \
360 VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT, \
361 __builtin_choose_expr ( \
362 __builtin_types_compatible_p (__typeof (o), struct anv_cmd_pool*), \
363 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT, \
364 __builtin_choose_expr ( \
365 __builtin_types_compatible_p (__typeof (o), struct anv_surface*), \
366 VK_DEBUG_REPORT_OBJECT_TYPE_SURFACE_KHR_EXT, \
367 __builtin_choose_expr ( \
368 __builtin_types_compatible_p (__typeof (o), struct wsi_swapchain*), \
369 VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT, \
370 __builtin_choose_expr ( \
371 __builtin_types_compatible_p (__typeof (o), struct vk_debug_callback*), \
372 VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_CALLBACK_EXT_EXT, \
373 __builtin_choose_expr ( \
374 __builtin_types_compatible_p (__typeof (o), void*), \
375 VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, \
376 /* The void expression results in a compile-time error \
377 when assigning the result to something. */ \
378 (void)0)))))))))))))))))))))))))))))))
380 /* Whenever we generate an error, pass it through this function. Useful for
381 * debugging, where we can break on it. Only call at error site, not when
382 * propagating errors. Might be useful to plug in a stack trace here.
385 VkResult
__vk_errorv(struct anv_instance
*instance
, const void *object
,
386 VkDebugReportObjectTypeEXT type
, VkResult error
,
387 const char *file
, int line
, const char *format
,
390 VkResult
__vk_errorf(struct anv_instance
*instance
, const void *object
,
391 VkDebugReportObjectTypeEXT type
, VkResult error
,
392 const char *file
, int line
, const char *format
, ...);
395 #define vk_error(error) __vk_errorf(NULL, NULL,\
396 VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,\
397 error, __FILE__, __LINE__, NULL)
398 #define vk_errorv(instance, obj, error, format, args)\
399 __vk_errorv(instance, obj, REPORT_OBJECT_TYPE(obj), error,\
400 __FILE__, __LINE__, format, args)
401 #define vk_errorf(instance, obj, error, format, ...)\
402 __vk_errorf(instance, obj, REPORT_OBJECT_TYPE(obj), error,\
403 __FILE__, __LINE__, format, ## __VA_ARGS__)
405 #define vk_error(error) error
406 #define vk_errorf(instance, obj, error, format, ...) error
410 * Warn on ignored extension structs.
412 * The Vulkan spec requires us to ignore unsupported or unknown structs in
413 * a pNext chain. In debug mode, emitting warnings for ignored structs may
414 * help us discover structs that we should not have ignored.
417 * From the Vulkan 1.0.38 spec:
419 * Any component of the implementation (the loader, any enabled layers,
420 * and drivers) must skip over, without processing (other than reading the
421 * sType and pNext members) any chained structures with sType values not
422 * defined by extensions supported by that component.
424 #define anv_debug_ignored_stype(sType) \
425 intel_logd("%s: ignored VkStructureType %u\n", __func__, (sType))
427 void __anv_perf_warn(struct anv_instance
*instance
, const void *object
,
428 VkDebugReportObjectTypeEXT type
, const char *file
,
429 int line
, const char *format
, ...)
430 anv_printflike(6, 7);
431 void anv_loge(const char *format
, ...) anv_printflike(1, 2);
432 void anv_loge_v(const char *format
, va_list va
);
435 * Print a FINISHME message, including its source location.
437 #define anv_finishme(format, ...) \
439 static bool reported = false; \
441 intel_logw("%s:%d: FINISHME: " format, __FILE__, __LINE__, \
448 * Print a perf warning message. Set INTEL_DEBUG=perf to see these.
450 #define anv_perf_warn(instance, obj, format, ...) \
452 static bool reported = false; \
453 if (!reported && unlikely(INTEL_DEBUG & DEBUG_PERF)) { \
454 __anv_perf_warn(instance, obj, REPORT_OBJECT_TYPE(obj), __FILE__, __LINE__,\
455 format, ##__VA_ARGS__); \
460 /* A non-fatal assert. Useful for debugging. */
462 #define anv_assert(x) ({ \
463 if (unlikely(!(x))) \
464 intel_loge("%s:%d ASSERT: %s", __FILE__, __LINE__, #x); \
467 #define anv_assert(x)
470 /* A multi-pointer allocator
472 * When copying data structures from the user (such as a render pass), it's
473 * common to need to allocate data for a bunch of different things. Instead
474 * of doing several allocations and having to handle all of the error checking
475 * that entails, it can be easier to do a single allocation. This struct
476 * helps facilitate that. The intended usage looks like this:
479 * anv_multialloc_add(&ma, &main_ptr, 1);
480 * anv_multialloc_add(&ma, &substruct1, substruct1Count);
481 * anv_multialloc_add(&ma, &substruct2, substruct2Count);
483 * if (!anv_multialloc_alloc(&ma, pAllocator, VK_ALLOCATION_SCOPE_FOO))
484 * return vk_error(VK_ERROR_OUT_OF_HOST_MEORY);
486 struct anv_multialloc
{
494 #define ANV_MULTIALLOC_INIT \
495 ((struct anv_multialloc) { 0, })
497 #define ANV_MULTIALLOC(_name) \
498 struct anv_multialloc _name = ANV_MULTIALLOC_INIT
500 __attribute__((always_inline
))
502 _anv_multialloc_add(struct anv_multialloc
*ma
,
503 void **ptr
, size_t size
, size_t align
)
505 size_t offset
= align_u64(ma
->size
, align
);
506 ma
->size
= offset
+ size
;
507 ma
->align
= MAX2(ma
->align
, align
);
509 /* Store the offset in the pointer. */
510 *ptr
= (void *)(uintptr_t)offset
;
512 assert(ma
->ptr_count
< ARRAY_SIZE(ma
->ptrs
));
513 ma
->ptrs
[ma
->ptr_count
++] = ptr
;
516 #define anv_multialloc_add_size(_ma, _ptr, _size) \
517 _anv_multialloc_add((_ma), (void **)(_ptr), (_size), __alignof__(**(_ptr)))
519 #define anv_multialloc_add(_ma, _ptr, _count) \
520 anv_multialloc_add_size(_ma, _ptr, (_count) * sizeof(**(_ptr)));
522 __attribute__((always_inline
))
524 anv_multialloc_alloc(struct anv_multialloc
*ma
,
525 const VkAllocationCallbacks
*alloc
,
526 VkSystemAllocationScope scope
)
528 void *ptr
= vk_alloc(alloc
, ma
->size
, ma
->align
, scope
);
532 /* Fill out each of the pointers with their final value.
534 * for (uint32_t i = 0; i < ma->ptr_count; i++)
535 * *ma->ptrs[i] = ptr + (uintptr_t)*ma->ptrs[i];
537 * Unfortunately, even though ma->ptr_count is basically guaranteed to be a
538 * constant, GCC is incapable of figuring this out and unrolling the loop
539 * so we have to give it a little help.
541 STATIC_ASSERT(ARRAY_SIZE(ma
->ptrs
) == 8);
542 #define _ANV_MULTIALLOC_UPDATE_POINTER(_i) \
543 if ((_i) < ma->ptr_count) \
544 *ma->ptrs[_i] = ptr + (uintptr_t)*ma->ptrs[_i]
545 _ANV_MULTIALLOC_UPDATE_POINTER(0);
546 _ANV_MULTIALLOC_UPDATE_POINTER(1);
547 _ANV_MULTIALLOC_UPDATE_POINTER(2);
548 _ANV_MULTIALLOC_UPDATE_POINTER(3);
549 _ANV_MULTIALLOC_UPDATE_POINTER(4);
550 _ANV_MULTIALLOC_UPDATE_POINTER(5);
551 _ANV_MULTIALLOC_UPDATE_POINTER(6);
552 _ANV_MULTIALLOC_UPDATE_POINTER(7);
553 #undef _ANV_MULTIALLOC_UPDATE_POINTER
558 __attribute__((always_inline
))
560 anv_multialloc_alloc2(struct anv_multialloc
*ma
,
561 const VkAllocationCallbacks
*parent_alloc
,
562 const VkAllocationCallbacks
*alloc
,
563 VkSystemAllocationScope scope
)
565 return anv_multialloc_alloc(ma
, alloc
? alloc
: parent_alloc
, scope
);
568 /* Extra ANV-defined BO flags which won't be passed to the kernel */
569 #define ANV_BO_EXTERNAL (1ull << 31)
570 #define ANV_BO_FLAG_MASK (1ull << 31)
575 /* Index into the current validation list. This is used by the
576 * validation list building alrogithm to track which buffers are already
577 * in the validation list so that we can ensure uniqueness.
581 /* Last known offset. This value is provided by the kernel when we
582 * execbuf and is used as the presumed offset for the next bunch of
590 /** Flags to pass to the kernel through drm_i915_exec_object2::flags */
595 anv_bo_init(struct anv_bo
*bo
, uint32_t gem_handle
, uint64_t size
)
597 bo
->gem_handle
= gem_handle
;
605 /* Represents a lock-free linked list of "free" things. This is used by
606 * both the block pool and the state pools. Unfortunately, in order to
607 * solve the ABA problem, we can't use a single uint32_t head.
609 union anv_free_list
{
613 /* A simple count that is incremented every time the head changes. */
619 #define ANV_FREE_LIST_EMPTY ((union anv_free_list) { { UINT32_MAX, 0 } })
621 struct anv_block_state
{
631 #define anv_block_pool_foreach_bo(bo, pool) \
632 for (bo = (pool)->bos; bo != &(pool)->bos[(pool)->nbos]; bo++)
634 #define ANV_MAX_BLOCK_POOL_BOS 20
636 struct anv_block_pool
{
637 struct anv_device
*device
;
641 struct anv_bo bos
[ANV_MAX_BLOCK_POOL_BOS
];
647 /* The address where the start of the pool is pinned. The various bos that
648 * are created as the pool grows will have addresses in the range
649 * [start_address, start_address + BLOCK_POOL_MEMFD_SIZE).
651 uint64_t start_address
;
653 /* The offset from the start of the bo to the "center" of the block
654 * pool. Pointers to allocated blocks are given by
655 * bo.map + center_bo_offset + offsets.
657 uint32_t center_bo_offset
;
662 * Array of mmaps and gem handles owned by the block pool, reclaimed when
663 * the block pool is destroyed.
665 struct u_vector mmap_cleanups
;
667 struct anv_block_state state
;
669 struct anv_block_state back_state
;
672 /* Block pools are backed by a fixed-size 1GB memfd */
673 #define BLOCK_POOL_MEMFD_SIZE (1ul << 30)
675 /* The center of the block pool is also the middle of the memfd. This may
676 * change in the future if we decide differently for some reason.
678 #define BLOCK_POOL_MEMFD_CENTER (BLOCK_POOL_MEMFD_SIZE / 2)
680 static inline uint32_t
681 anv_block_pool_size(struct anv_block_pool
*pool
)
683 return pool
->state
.end
+ pool
->back_state
.end
;
693 #define ANV_STATE_NULL ((struct anv_state) { .alloc_size = 0 })
695 struct anv_fixed_size_state_pool
{
696 union anv_free_list free_list
;
697 struct anv_block_state block
;
700 #define ANV_MIN_STATE_SIZE_LOG2 6
701 #define ANV_MAX_STATE_SIZE_LOG2 20
703 #define ANV_STATE_BUCKETS (ANV_MAX_STATE_SIZE_LOG2 - ANV_MIN_STATE_SIZE_LOG2 + 1)
705 struct anv_free_entry
{
707 struct anv_state state
;
710 struct anv_state_table
{
711 struct anv_device
*device
;
713 struct anv_free_entry
*map
;
715 struct anv_block_state state
;
716 struct u_vector mmap_cleanups
;
719 struct anv_state_pool
{
720 struct anv_block_pool block_pool
;
722 struct anv_state_table table
;
724 /* The size of blocks which will be allocated from the block pool */
727 /** Free list for "back" allocations */
728 union anv_free_list back_alloc_free_list
;
730 struct anv_fixed_size_state_pool buckets
[ANV_STATE_BUCKETS
];
733 struct anv_state_stream_block
;
735 struct anv_state_stream
{
736 struct anv_state_pool
*state_pool
;
738 /* The size of blocks to allocate from the state pool */
741 /* Current block we're allocating from */
742 struct anv_state block
;
744 /* Offset into the current block at which to allocate the next state */
747 /* List of all blocks allocated from this pool */
748 struct anv_state_stream_block
*block_list
;
751 /* The block_pool functions exported for testing only. The block pool should
752 * only be used via a state pool (see below).
754 VkResult
anv_block_pool_init(struct anv_block_pool
*pool
,
755 struct anv_device
*device
,
756 uint64_t start_address
,
757 uint32_t initial_size
,
759 void anv_block_pool_finish(struct anv_block_pool
*pool
);
760 int32_t anv_block_pool_alloc(struct anv_block_pool
*pool
,
761 uint32_t block_size
);
762 int32_t anv_block_pool_alloc_back(struct anv_block_pool
*pool
,
763 uint32_t block_size
);
764 void* anv_block_pool_map(struct anv_block_pool
*pool
, int32_t offset
);
766 VkResult
anv_state_pool_init(struct anv_state_pool
*pool
,
767 struct anv_device
*device
,
768 uint64_t start_address
,
771 void anv_state_pool_finish(struct anv_state_pool
*pool
);
772 struct anv_state
anv_state_pool_alloc(struct anv_state_pool
*pool
,
773 uint32_t state_size
, uint32_t alignment
);
774 struct anv_state
anv_state_pool_alloc_back(struct anv_state_pool
*pool
);
775 void anv_state_pool_free(struct anv_state_pool
*pool
, struct anv_state state
);
776 void anv_state_stream_init(struct anv_state_stream
*stream
,
777 struct anv_state_pool
*state_pool
,
778 uint32_t block_size
);
779 void anv_state_stream_finish(struct anv_state_stream
*stream
);
780 struct anv_state
anv_state_stream_alloc(struct anv_state_stream
*stream
,
781 uint32_t size
, uint32_t alignment
);
783 VkResult
anv_state_table_init(struct anv_state_table
*table
,
784 struct anv_device
*device
,
785 uint32_t initial_entries
);
786 void anv_state_table_finish(struct anv_state_table
*table
);
787 VkResult
anv_state_table_add(struct anv_state_table
*table
, uint32_t *idx
,
789 void anv_free_list_push(union anv_free_list
*list
,
790 struct anv_state_table
*table
,
791 uint32_t idx
, uint32_t count
);
792 struct anv_state
* anv_free_list_pop(union anv_free_list
*list
,
793 struct anv_state_table
*table
);
796 static inline struct anv_state
*
797 anv_state_table_get(struct anv_state_table
*table
, uint32_t idx
)
799 return &table
->map
[idx
].state
;
802 * Implements a pool of re-usable BOs. The interface is identical to that
803 * of block_pool except that each block is its own BO.
806 struct anv_device
*device
;
813 void anv_bo_pool_init(struct anv_bo_pool
*pool
, struct anv_device
*device
,
815 void anv_bo_pool_finish(struct anv_bo_pool
*pool
);
816 VkResult
anv_bo_pool_alloc(struct anv_bo_pool
*pool
, struct anv_bo
*bo
,
818 void anv_bo_pool_free(struct anv_bo_pool
*pool
, const struct anv_bo
*bo
);
820 struct anv_scratch_bo
{
825 struct anv_scratch_pool
{
826 /* Indexed by Per-Thread Scratch Space number (the hardware value) and stage */
827 struct anv_scratch_bo bos
[16][MESA_SHADER_STAGES
];
830 void anv_scratch_pool_init(struct anv_device
*device
,
831 struct anv_scratch_pool
*pool
);
832 void anv_scratch_pool_finish(struct anv_device
*device
,
833 struct anv_scratch_pool
*pool
);
834 struct anv_bo
*anv_scratch_pool_alloc(struct anv_device
*device
,
835 struct anv_scratch_pool
*pool
,
836 gl_shader_stage stage
,
837 unsigned per_thread_scratch
);
839 /** Implements a BO cache that ensures a 1-1 mapping of GEM BOs to anv_bos */
840 struct anv_bo_cache
{
841 struct hash_table
*bo_map
;
842 pthread_mutex_t mutex
;
845 VkResult
anv_bo_cache_init(struct anv_bo_cache
*cache
);
846 void anv_bo_cache_finish(struct anv_bo_cache
*cache
);
847 VkResult
anv_bo_cache_alloc(struct anv_device
*device
,
848 struct anv_bo_cache
*cache
,
849 uint64_t size
, uint64_t bo_flags
,
851 VkResult
anv_bo_cache_import(struct anv_device
*device
,
852 struct anv_bo_cache
*cache
,
853 int fd
, uint64_t bo_flags
,
855 VkResult
anv_bo_cache_export(struct anv_device
*device
,
856 struct anv_bo_cache
*cache
,
857 struct anv_bo
*bo_in
, int *fd_out
);
858 void anv_bo_cache_release(struct anv_device
*device
,
859 struct anv_bo_cache
*cache
,
862 struct anv_memory_type
{
863 /* Standard bits passed on to the client */
864 VkMemoryPropertyFlags propertyFlags
;
867 /* Driver-internal book-keeping */
868 VkBufferUsageFlags valid_buffer_usage
;
871 struct anv_memory_heap
{
872 /* Standard bits passed on to the client */
874 VkMemoryHeapFlags flags
;
876 /* Driver-internal book-keeping */
877 bool supports_48bit_addresses
;
880 struct anv_physical_device
{
881 VK_LOADER_DATA _loader_data
;
883 struct anv_instance
* instance
;
894 struct gen_device_info info
;
895 /** Amount of "GPU memory" we want to advertise
897 * Clearly, this value is bogus since Intel is a UMA architecture. On
898 * gen7 platforms, we are limited by GTT size unless we want to implement
899 * fine-grained tracking and GTT splitting. On Broadwell and above we are
900 * practically unlimited. However, we will never report more than 3/4 of
901 * the total system ram to try and avoid running out of RAM.
903 bool supports_48bit_addresses
;
904 struct brw_compiler
* compiler
;
905 struct isl_device isl_dev
;
906 int cmd_parser_version
;
908 bool has_exec_capture
;
911 bool has_syncobj_wait
;
912 bool has_context_priority
;
914 bool has_context_isolation
;
916 struct anv_device_extension_table supported_extensions
;
919 uint32_t subslice_total
;
923 struct anv_memory_type types
[VK_MAX_MEMORY_TYPES
];
925 struct anv_memory_heap heaps
[VK_MAX_MEMORY_HEAPS
];
928 uint8_t driver_build_sha1
[20];
929 uint8_t pipeline_cache_uuid
[VK_UUID_SIZE
];
930 uint8_t driver_uuid
[VK_UUID_SIZE
];
931 uint8_t device_uuid
[VK_UUID_SIZE
];
933 struct disk_cache
* disk_cache
;
935 struct wsi_device wsi_device
;
940 struct anv_app_info
{
941 const char* app_name
;
942 uint32_t app_version
;
943 const char* engine_name
;
944 uint32_t engine_version
;
945 uint32_t api_version
;
948 struct anv_instance
{
949 VK_LOADER_DATA _loader_data
;
951 VkAllocationCallbacks alloc
;
953 struct anv_app_info app_info
;
955 struct anv_instance_extension_table enabled_extensions
;
956 struct anv_instance_dispatch_table dispatch
;
957 struct anv_device_dispatch_table device_dispatch
;
959 int physicalDeviceCount
;
960 struct anv_physical_device physicalDevice
;
962 bool pipeline_cache_enabled
;
964 struct vk_debug_report_instance debug_report_callbacks
;
967 VkResult
anv_init_wsi(struct anv_physical_device
*physical_device
);
968 void anv_finish_wsi(struct anv_physical_device
*physical_device
);
970 uint32_t anv_physical_device_api_version(struct anv_physical_device
*dev
);
971 bool anv_physical_device_extension_supported(struct anv_physical_device
*dev
,
975 VK_LOADER_DATA _loader_data
;
977 struct anv_device
* device
;
979 VkDeviceQueueCreateFlags flags
;
982 struct anv_pipeline_cache
{
983 struct anv_device
* device
;
984 pthread_mutex_t mutex
;
986 struct hash_table
* nir_cache
;
988 struct hash_table
* cache
;
991 struct anv_pipeline_bind_map
;
993 void anv_pipeline_cache_init(struct anv_pipeline_cache
*cache
,
994 struct anv_device
*device
,
996 void anv_pipeline_cache_finish(struct anv_pipeline_cache
*cache
);
998 struct anv_shader_bin
*
999 anv_pipeline_cache_search(struct anv_pipeline_cache
*cache
,
1000 const void *key
, uint32_t key_size
);
1001 struct anv_shader_bin
*
1002 anv_pipeline_cache_upload_kernel(struct anv_pipeline_cache
*cache
,
1003 const void *key_data
, uint32_t key_size
,
1004 const void *kernel_data
, uint32_t kernel_size
,
1005 const void *constant_data
,
1006 uint32_t constant_data_size
,
1007 const struct brw_stage_prog_data
*prog_data
,
1008 uint32_t prog_data_size
,
1009 const struct anv_pipeline_bind_map
*bind_map
);
1011 struct anv_shader_bin
*
1012 anv_device_search_for_kernel(struct anv_device
*device
,
1013 struct anv_pipeline_cache
*cache
,
1014 const void *key_data
, uint32_t key_size
);
1016 struct anv_shader_bin
*
1017 anv_device_upload_kernel(struct anv_device
*device
,
1018 struct anv_pipeline_cache
*cache
,
1019 const void *key_data
, uint32_t key_size
,
1020 const void *kernel_data
, uint32_t kernel_size
,
1021 const void *constant_data
,
1022 uint32_t constant_data_size
,
1023 const struct brw_stage_prog_data
*prog_data
,
1024 uint32_t prog_data_size
,
1025 const struct anv_pipeline_bind_map
*bind_map
);
1028 struct nir_shader_compiler_options
;
1031 anv_device_search_for_nir(struct anv_device
*device
,
1032 struct anv_pipeline_cache
*cache
,
1033 const struct nir_shader_compiler_options
*nir_options
,
1034 unsigned char sha1_key
[20],
1038 anv_device_upload_nir(struct anv_device
*device
,
1039 struct anv_pipeline_cache
*cache
,
1040 const struct nir_shader
*nir
,
1041 unsigned char sha1_key
[20]);
1044 VK_LOADER_DATA _loader_data
;
1046 VkAllocationCallbacks alloc
;
1048 struct anv_instance
* instance
;
1049 uint32_t chipset_id
;
1051 struct gen_device_info info
;
1052 struct isl_device isl_dev
;
1055 bool can_chain_batches
;
1056 bool robust_buffer_access
;
1057 struct anv_device_extension_table enabled_extensions
;
1058 struct anv_device_dispatch_table dispatch
;
1060 pthread_mutex_t vma_mutex
;
1061 struct util_vma_heap vma_lo
;
1062 struct util_vma_heap vma_hi
;
1063 uint64_t vma_lo_available
;
1064 uint64_t vma_hi_available
;
1066 struct anv_bo_pool batch_bo_pool
;
1068 struct anv_bo_cache bo_cache
;
1070 struct anv_state_pool dynamic_state_pool
;
1071 struct anv_state_pool instruction_state_pool
;
1072 struct anv_state_pool binding_table_pool
;
1073 struct anv_state_pool surface_state_pool
;
1075 struct anv_bo workaround_bo
;
1076 struct anv_bo trivial_batch_bo
;
1077 struct anv_bo hiz_clear_bo
;
1079 struct anv_pipeline_cache default_pipeline_cache
;
1080 struct blorp_context blorp
;
1082 struct anv_state border_colors
;
1084 struct anv_queue queue
;
1086 struct anv_scratch_pool scratch_pool
;
1088 uint32_t default_mocs
;
1089 uint32_t external_mocs
;
1091 pthread_mutex_t mutex
;
1092 pthread_cond_t queue_submit
;
1096 static inline struct anv_state_pool
*
1097 anv_binding_table_pool(struct anv_device
*device
)
1099 if (device
->instance
->physicalDevice
.use_softpin
)
1100 return &device
->binding_table_pool
;
1102 return &device
->surface_state_pool
;
1105 static inline struct anv_state
1106 anv_binding_table_pool_alloc(struct anv_device
*device
) {
1107 if (device
->instance
->physicalDevice
.use_softpin
)
1108 return anv_state_pool_alloc(&device
->binding_table_pool
,
1109 device
->binding_table_pool
.block_size
, 0);
1111 return anv_state_pool_alloc_back(&device
->surface_state_pool
);
1115 anv_binding_table_pool_free(struct anv_device
*device
, struct anv_state state
) {
1116 anv_state_pool_free(anv_binding_table_pool(device
), state
);
1119 static inline uint32_t
1120 anv_mocs_for_bo(const struct anv_device
*device
, const struct anv_bo
*bo
)
1122 if (bo
->flags
& ANV_BO_EXTERNAL
)
1123 return device
->external_mocs
;
1125 return device
->default_mocs
;
1129 anv_state_flush(struct anv_device
*device
, struct anv_state state
)
1131 if (device
->info
.has_llc
)
1134 gen_flush_range(state
.map
, state
.alloc_size
);
1137 void anv_device_init_blorp(struct anv_device
*device
);
1138 void anv_device_finish_blorp(struct anv_device
*device
);
1140 VkResult
_anv_device_set_lost(struct anv_device
*device
,
1141 const char *file
, int line
,
1142 const char *msg
, ...);
1143 #define anv_device_set_lost(dev, ...) \
1144 _anv_device_set_lost(dev, __FILE__, __LINE__, __VA_ARGS__)
1147 anv_device_is_lost(struct anv_device
*device
)
1149 return unlikely(device
->_lost
);
1152 VkResult
anv_device_execbuf(struct anv_device
*device
,
1153 struct drm_i915_gem_execbuffer2
*execbuf
,
1154 struct anv_bo
**execbuf_bos
);
1155 VkResult
anv_device_query_status(struct anv_device
*device
);
1156 VkResult
anv_device_bo_busy(struct anv_device
*device
, struct anv_bo
*bo
);
1157 VkResult
anv_device_wait(struct anv_device
*device
, struct anv_bo
*bo
,
1160 void* anv_gem_mmap(struct anv_device
*device
,
1161 uint32_t gem_handle
, uint64_t offset
, uint64_t size
, uint32_t flags
);
1162 void anv_gem_munmap(void *p
, uint64_t size
);
1163 uint32_t anv_gem_create(struct anv_device
*device
, uint64_t size
);
1164 void anv_gem_close(struct anv_device
*device
, uint32_t gem_handle
);
1165 uint32_t anv_gem_userptr(struct anv_device
*device
, void *mem
, size_t size
);
1166 int anv_gem_busy(struct anv_device
*device
, uint32_t gem_handle
);
1167 int anv_gem_wait(struct anv_device
*device
, uint32_t gem_handle
, int64_t *timeout_ns
);
1168 int anv_gem_execbuffer(struct anv_device
*device
,
1169 struct drm_i915_gem_execbuffer2
*execbuf
);
1170 int anv_gem_set_tiling(struct anv_device
*device
, uint32_t gem_handle
,
1171 uint32_t stride
, uint32_t tiling
);
1172 int anv_gem_create_context(struct anv_device
*device
);
1173 bool anv_gem_has_context_priority(int fd
);
1174 int anv_gem_destroy_context(struct anv_device
*device
, int context
);
1175 int anv_gem_set_context_param(int fd
, int context
, uint32_t param
,
1177 int anv_gem_get_context_param(int fd
, int context
, uint32_t param
,
1179 int anv_gem_get_param(int fd
, uint32_t param
);
1180 int anv_gem_get_tiling(struct anv_device
*device
, uint32_t gem_handle
);
1181 bool anv_gem_get_bit6_swizzle(int fd
, uint32_t tiling
);
1182 int anv_gem_get_aperture(int fd
, uint64_t *size
);
1183 int anv_gem_gpu_get_reset_stats(struct anv_device
*device
,
1184 uint32_t *active
, uint32_t *pending
);
1185 int anv_gem_handle_to_fd(struct anv_device
*device
, uint32_t gem_handle
);
1186 int anv_gem_reg_read(struct anv_device
*device
,
1187 uint32_t offset
, uint64_t *result
);
1188 uint32_t anv_gem_fd_to_handle(struct anv_device
*device
, int fd
);
1189 int anv_gem_set_caching(struct anv_device
*device
, uint32_t gem_handle
, uint32_t caching
);
1190 int anv_gem_set_domain(struct anv_device
*device
, uint32_t gem_handle
,
1191 uint32_t read_domains
, uint32_t write_domain
);
1192 int anv_gem_sync_file_merge(struct anv_device
*device
, int fd1
, int fd2
);
1193 uint32_t anv_gem_syncobj_create(struct anv_device
*device
, uint32_t flags
);
1194 void anv_gem_syncobj_destroy(struct anv_device
*device
, uint32_t handle
);
1195 int anv_gem_syncobj_handle_to_fd(struct anv_device
*device
, uint32_t handle
);
1196 uint32_t anv_gem_syncobj_fd_to_handle(struct anv_device
*device
, int fd
);
1197 int anv_gem_syncobj_export_sync_file(struct anv_device
*device
,
1199 int anv_gem_syncobj_import_sync_file(struct anv_device
*device
,
1200 uint32_t handle
, int fd
);
1201 void anv_gem_syncobj_reset(struct anv_device
*device
, uint32_t handle
);
1202 bool anv_gem_supports_syncobj_wait(int fd
);
1203 int anv_gem_syncobj_wait(struct anv_device
*device
,
1204 uint32_t *handles
, uint32_t num_handles
,
1205 int64_t abs_timeout_ns
, bool wait_all
);
1207 bool anv_vma_alloc(struct anv_device
*device
, struct anv_bo
*bo
);
1208 void anv_vma_free(struct anv_device
*device
, struct anv_bo
*bo
);
1210 VkResult
anv_bo_init_new(struct anv_bo
*bo
, struct anv_device
*device
, uint64_t size
);
1212 struct anv_reloc_list
{
1213 uint32_t num_relocs
;
1214 uint32_t array_length
;
1215 struct drm_i915_gem_relocation_entry
* relocs
;
1216 struct anv_bo
** reloc_bos
;
1220 VkResult
anv_reloc_list_init(struct anv_reloc_list
*list
,
1221 const VkAllocationCallbacks
*alloc
);
1222 void anv_reloc_list_finish(struct anv_reloc_list
*list
,
1223 const VkAllocationCallbacks
*alloc
);
1225 VkResult
anv_reloc_list_add(struct anv_reloc_list
*list
,
1226 const VkAllocationCallbacks
*alloc
,
1227 uint32_t offset
, struct anv_bo
*target_bo
,
1230 struct anv_batch_bo
{
1231 /* Link in the anv_cmd_buffer.owned_batch_bos list */
1232 struct list_head link
;
1236 /* Bytes actually consumed in this batch BO */
1239 struct anv_reloc_list relocs
;
1243 const VkAllocationCallbacks
* alloc
;
1249 struct anv_reloc_list
* relocs
;
1251 /* This callback is called (with the associated user data) in the event
1252 * that the batch runs out of space.
1254 VkResult (*extend_cb
)(struct anv_batch
*, void *);
1258 * Current error status of the command buffer. Used to track inconsistent
1259 * or incomplete command buffer states that are the consequence of run-time
1260 * errors such as out of memory scenarios. We want to track this in the
1261 * batch because the command buffer object is not visible to some parts
1267 void *anv_batch_emit_dwords(struct anv_batch
*batch
, int num_dwords
);
1268 void anv_batch_emit_batch(struct anv_batch
*batch
, struct anv_batch
*other
);
1269 uint64_t anv_batch_emit_reloc(struct anv_batch
*batch
,
1270 void *location
, struct anv_bo
*bo
, uint32_t offset
);
1271 VkResult
anv_device_submit_simple_batch(struct anv_device
*device
,
1272 struct anv_batch
*batch
);
1274 static inline VkResult
1275 anv_batch_set_error(struct anv_batch
*batch
, VkResult error
)
1277 assert(error
!= VK_SUCCESS
);
1278 if (batch
->status
== VK_SUCCESS
)
1279 batch
->status
= error
;
1280 return batch
->status
;
1284 anv_batch_has_error(struct anv_batch
*batch
)
1286 return batch
->status
!= VK_SUCCESS
;
1289 struct anv_address
{
1294 #define ANV_NULL_ADDRESS ((struct anv_address) { NULL, 0 })
1297 anv_address_is_null(struct anv_address addr
)
1299 return addr
.bo
== NULL
&& addr
.offset
== 0;
1302 static inline uint64_t
1303 anv_address_physical(struct anv_address addr
)
1305 if (addr
.bo
&& (addr
.bo
->flags
& EXEC_OBJECT_PINNED
))
1306 return gen_canonical_address(addr
.bo
->offset
+ addr
.offset
);
1308 return gen_canonical_address(addr
.offset
);
1311 static inline struct anv_address
1312 anv_address_add(struct anv_address addr
, uint64_t offset
)
1314 addr
.offset
+= offset
;
1319 write_reloc(const struct anv_device
*device
, void *p
, uint64_t v
, bool flush
)
1321 unsigned reloc_size
= 0;
1322 if (device
->info
.gen
>= 8) {
1323 reloc_size
= sizeof(uint64_t);
1324 *(uint64_t *)p
= gen_canonical_address(v
);
1326 reloc_size
= sizeof(uint32_t);
1330 if (flush
&& !device
->info
.has_llc
)
1331 gen_flush_range(p
, reloc_size
);
1334 static inline uint64_t
1335 _anv_combine_address(struct anv_batch
*batch
, void *location
,
1336 const struct anv_address address
, uint32_t delta
)
1338 if (address
.bo
== NULL
) {
1339 return address
.offset
+ delta
;
1341 assert(batch
->start
<= location
&& location
< batch
->end
);
1343 return anv_batch_emit_reloc(batch
, location
, address
.bo
, address
.offset
+ delta
);
1347 #define __gen_address_type struct anv_address
1348 #define __gen_user_data struct anv_batch
1349 #define __gen_combine_address _anv_combine_address
1351 /* Wrapper macros needed to work around preprocessor argument issues. In
1352 * particular, arguments don't get pre-evaluated if they are concatenated.
1353 * This means that, if you pass GENX(3DSTATE_PS) into the emit macro, the
1354 * GENX macro won't get evaluated if the emit macro contains "cmd ## foo".
1355 * We can work around this easily enough with these helpers.
1357 #define __anv_cmd_length(cmd) cmd ## _length
1358 #define __anv_cmd_length_bias(cmd) cmd ## _length_bias
1359 #define __anv_cmd_header(cmd) cmd ## _header
1360 #define __anv_cmd_pack(cmd) cmd ## _pack
1361 #define __anv_reg_num(reg) reg ## _num
1363 #define anv_pack_struct(dst, struc, ...) do { \
1364 struct struc __template = { \
1367 __anv_cmd_pack(struc)(NULL, dst, &__template); \
1368 VG(VALGRIND_CHECK_MEM_IS_DEFINED(dst, __anv_cmd_length(struc) * 4)); \
1371 #define anv_batch_emitn(batch, n, cmd, ...) ({ \
1372 void *__dst = anv_batch_emit_dwords(batch, n); \
1374 struct cmd __template = { \
1375 __anv_cmd_header(cmd), \
1376 .DWordLength = n - __anv_cmd_length_bias(cmd), \
1379 __anv_cmd_pack(cmd)(batch, __dst, &__template); \
1384 #define anv_batch_emit_merge(batch, dwords0, dwords1) \
1388 STATIC_ASSERT(ARRAY_SIZE(dwords0) == ARRAY_SIZE(dwords1)); \
1389 dw = anv_batch_emit_dwords((batch), ARRAY_SIZE(dwords0)); \
1392 for (uint32_t i = 0; i < ARRAY_SIZE(dwords0); i++) \
1393 dw[i] = (dwords0)[i] | (dwords1)[i]; \
1394 VG(VALGRIND_CHECK_MEM_IS_DEFINED(dw, ARRAY_SIZE(dwords0) * 4));\
1397 #define anv_batch_emit(batch, cmd, name) \
1398 for (struct cmd name = { __anv_cmd_header(cmd) }, \
1399 *_dst = anv_batch_emit_dwords(batch, __anv_cmd_length(cmd)); \
1400 __builtin_expect(_dst != NULL, 1); \
1401 ({ __anv_cmd_pack(cmd)(batch, _dst, &name); \
1402 VG(VALGRIND_CHECK_MEM_IS_DEFINED(_dst, __anv_cmd_length(cmd) * 4)); \
1406 /* MEMORY_OBJECT_CONTROL_STATE:
1407 * .GraphicsDataTypeGFDT = 0,
1408 * .LLCCacheabilityControlLLCCC = 0,
1409 * .L3CacheabilityControlL3CC = 1,
1413 /* MEMORY_OBJECT_CONTROL_STATE:
1414 * .LLCeLLCCacheabilityControlLLCCC = 0,
1415 * .L3CacheabilityControlL3CC = 1,
1417 #define GEN75_MOCS 1
1419 /* MEMORY_OBJECT_CONTROL_STATE:
1420 * .MemoryTypeLLCeLLCCacheabilityControl = WB,
1421 * .TargetCache = L3DefertoPATforLLCeLLCselection,
1422 * .AgeforQUADLRU = 0
1424 #define GEN8_MOCS 0x78
1426 /* MEMORY_OBJECT_CONTROL_STATE:
1427 * .MemoryTypeLLCeLLCCacheabilityControl = UCwithFenceifcoherentcycle,
1428 * .TargetCache = L3DefertoPATforLLCeLLCselection,
1429 * .AgeforQUADLRU = 0
1431 #define GEN8_EXTERNAL_MOCS 0x18
1433 /* Skylake: MOCS is now an index into an array of 62 different caching
1434 * configurations programmed by the kernel.
1437 /* TC=LLC/eLLC, LeCC=WB, LRUM=3, L3CC=WB */
1440 /* TC=LLC/eLLC, LeCC=WB, LRUM=3, L3CC=WB */
1441 #define GEN9_EXTERNAL_MOCS 1
1443 /* Cannonlake MOCS defines are duplicates of Skylake MOCS defines. */
1444 #define GEN10_MOCS GEN9_MOCS
1445 #define GEN10_EXTERNAL_MOCS GEN9_EXTERNAL_MOCS
1447 /* Ice Lake MOCS defines are duplicates of Skylake MOCS defines. */
1448 #define GEN11_MOCS GEN9_MOCS
1449 #define GEN11_EXTERNAL_MOCS GEN9_EXTERNAL_MOCS
1451 struct anv_device_memory
{
1453 struct anv_memory_type
* type
;
1454 VkDeviceSize map_size
;
1457 /* If set, we are holding reference to AHardwareBuffer
1458 * which we must release when memory is freed.
1460 struct AHardwareBuffer
* ahw
;
1464 * Header for Vertex URB Entry (VUE)
1466 struct anv_vue_header
{
1468 uint32_t RTAIndex
; /* RenderTargetArrayIndex */
1469 uint32_t ViewportIndex
;
1473 struct anv_descriptor_set_binding_layout
{
1475 /* The type of the descriptors in this binding */
1476 VkDescriptorType type
;
1479 /* Number of array elements in this binding */
1480 uint16_t array_size
;
1482 /* Index into the flattend descriptor set */
1483 uint16_t descriptor_index
;
1485 /* Index into the dynamic state array for a dynamic buffer */
1486 int16_t dynamic_offset_index
;
1488 /* Index into the descriptor set buffer views */
1489 int16_t buffer_index
;
1492 /* Index into the binding table for the associated surface */
1493 int16_t surface_index
;
1495 /* Index into the sampler table for the associated sampler */
1496 int16_t sampler_index
;
1498 /* Index into the image table for the associated image */
1499 int16_t image_index
;
1500 } stage
[MESA_SHADER_STAGES
];
1502 /* Immutable samplers (or NULL if no immutable samplers) */
1503 struct anv_sampler
**immutable_samplers
;
1506 struct anv_descriptor_set_layout
{
1507 /* Descriptor set layouts can be destroyed at almost any time */
1510 /* Number of bindings in this descriptor set */
1511 uint16_t binding_count
;
1513 /* Total size of the descriptor set with room for all array entries */
1516 /* Shader stages affected by this descriptor set */
1517 uint16_t shader_stages
;
1519 /* Number of buffers in this descriptor set */
1520 uint16_t buffer_count
;
1522 /* Number of dynamic offsets used by this descriptor set */
1523 uint16_t dynamic_offset_count
;
1525 /* Bindings in this descriptor set */
1526 struct anv_descriptor_set_binding_layout binding
[0];
1530 anv_descriptor_set_layout_ref(struct anv_descriptor_set_layout
*layout
)
1532 assert(layout
&& layout
->ref_cnt
>= 1);
1533 p_atomic_inc(&layout
->ref_cnt
);
1537 anv_descriptor_set_layout_unref(struct anv_device
*device
,
1538 struct anv_descriptor_set_layout
*layout
)
1540 assert(layout
&& layout
->ref_cnt
>= 1);
1541 if (p_atomic_dec_zero(&layout
->ref_cnt
))
1542 vk_free(&device
->alloc
, layout
);
1545 struct anv_descriptor
{
1546 VkDescriptorType type
;
1550 VkImageLayout layout
;
1551 struct anv_image_view
*image_view
;
1552 struct anv_sampler
*sampler
;
1556 struct anv_buffer
*buffer
;
1561 struct anv_buffer_view
*buffer_view
;
1565 struct anv_descriptor_set
{
1566 struct anv_descriptor_set_layout
*layout
;
1568 uint32_t buffer_count
;
1569 struct anv_buffer_view
*buffer_views
;
1570 struct anv_descriptor descriptors
[0];
1573 struct anv_buffer_view
{
1574 enum isl_format format
; /**< VkBufferViewCreateInfo::format */
1575 uint64_t range
; /**< VkBufferViewCreateInfo::range */
1577 struct anv_address address
;
1579 struct anv_state surface_state
;
1580 struct anv_state storage_surface_state
;
1581 struct anv_state writeonly_storage_surface_state
;
1583 struct brw_image_param storage_image_param
;
1586 struct anv_push_descriptor_set
{
1587 struct anv_descriptor_set set
;
1589 /* Put this field right behind anv_descriptor_set so it fills up the
1590 * descriptors[0] field. */
1591 struct anv_descriptor descriptors
[MAX_PUSH_DESCRIPTORS
];
1592 struct anv_buffer_view buffer_views
[MAX_PUSH_DESCRIPTORS
];
1595 struct anv_descriptor_pool
{
1600 struct anv_state_stream surface_state_stream
;
1601 void *surface_state_free_list
;
1606 enum anv_descriptor_template_entry_type
{
1607 ANV_DESCRIPTOR_TEMPLATE_ENTRY_TYPE_IMAGE
,
1608 ANV_DESCRIPTOR_TEMPLATE_ENTRY_TYPE_BUFFER
,
1609 ANV_DESCRIPTOR_TEMPLATE_ENTRY_TYPE_BUFFER_VIEW
1612 struct anv_descriptor_template_entry
{
1613 /* The type of descriptor in this entry */
1614 VkDescriptorType type
;
1616 /* Binding in the descriptor set */
1619 /* Offset at which to write into the descriptor set binding */
1620 uint32_t array_element
;
1622 /* Number of elements to write into the descriptor set binding */
1623 uint32_t array_count
;
1625 /* Offset into the user provided data */
1628 /* Stride between elements into the user provided data */
1632 struct anv_descriptor_update_template
{
1633 VkPipelineBindPoint bind_point
;
1635 /* The descriptor set this template corresponds to. This value is only
1636 * valid if the template was created with the templateType
1637 * VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET.
1641 /* Number of entries in this template */
1642 uint32_t entry_count
;
1644 /* Entries of the template */
1645 struct anv_descriptor_template_entry entries
[0];
1649 anv_descriptor_set_layout_size(const struct anv_descriptor_set_layout
*layout
);
1652 anv_descriptor_set_write_image_view(struct anv_descriptor_set
*set
,
1653 const struct gen_device_info
* const devinfo
,
1654 const VkDescriptorImageInfo
* const info
,
1655 VkDescriptorType type
,
1660 anv_descriptor_set_write_buffer_view(struct anv_descriptor_set
*set
,
1661 VkDescriptorType type
,
1662 struct anv_buffer_view
*buffer_view
,
1667 anv_descriptor_set_write_buffer(struct anv_descriptor_set
*set
,
1668 struct anv_device
*device
,
1669 struct anv_state_stream
*alloc_stream
,
1670 VkDescriptorType type
,
1671 struct anv_buffer
*buffer
,
1674 VkDeviceSize offset
,
1675 VkDeviceSize range
);
1678 anv_descriptor_set_write_template(struct anv_descriptor_set
*set
,
1679 struct anv_device
*device
,
1680 struct anv_state_stream
*alloc_stream
,
1681 const struct anv_descriptor_update_template
*template,
1685 anv_descriptor_set_create(struct anv_device
*device
,
1686 struct anv_descriptor_pool
*pool
,
1687 struct anv_descriptor_set_layout
*layout
,
1688 struct anv_descriptor_set
**out_set
);
1691 anv_descriptor_set_destroy(struct anv_device
*device
,
1692 struct anv_descriptor_pool
*pool
,
1693 struct anv_descriptor_set
*set
);
1695 #define ANV_DESCRIPTOR_SET_SHADER_CONSTANTS (UINT8_MAX - 1)
1696 #define ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS UINT8_MAX
1698 struct anv_pipeline_binding
{
1699 /* The descriptor set this surface corresponds to. The special value of
1700 * ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS indicates that the offset refers
1701 * to a color attachment and not a regular descriptor.
1705 /* Binding in the descriptor set */
1708 /* Index in the binding */
1711 /* Plane in the binding index */
1714 /* Input attachment index (relative to the subpass) */
1715 uint8_t input_attachment_index
;
1717 /* For a storage image, whether it is write-only */
1721 struct anv_pipeline_layout
{
1723 struct anv_descriptor_set_layout
*layout
;
1724 uint32_t dynamic_offset_start
;
1730 bool has_dynamic_offsets
;
1731 } stage
[MESA_SHADER_STAGES
];
1733 unsigned char sha1
[20];
1737 struct anv_device
* device
;
1740 VkBufferUsageFlags usage
;
1742 /* Set when bound */
1743 struct anv_address address
;
1746 static inline uint64_t
1747 anv_buffer_get_range(struct anv_buffer
*buffer
, uint64_t offset
, uint64_t range
)
1749 assert(offset
<= buffer
->size
);
1750 if (range
== VK_WHOLE_SIZE
) {
1751 return buffer
->size
- offset
;
1753 assert(range
+ offset
>= range
);
1754 assert(range
+ offset
<= buffer
->size
);
1759 enum anv_cmd_dirty_bits
{
1760 ANV_CMD_DIRTY_DYNAMIC_VIEWPORT
= 1 << 0, /* VK_DYNAMIC_STATE_VIEWPORT */
1761 ANV_CMD_DIRTY_DYNAMIC_SCISSOR
= 1 << 1, /* VK_DYNAMIC_STATE_SCISSOR */
1762 ANV_CMD_DIRTY_DYNAMIC_LINE_WIDTH
= 1 << 2, /* VK_DYNAMIC_STATE_LINE_WIDTH */
1763 ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS
= 1 << 3, /* VK_DYNAMIC_STATE_DEPTH_BIAS */
1764 ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS
= 1 << 4, /* VK_DYNAMIC_STATE_BLEND_CONSTANTS */
1765 ANV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS
= 1 << 5, /* VK_DYNAMIC_STATE_DEPTH_BOUNDS */
1766 ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK
= 1 << 6, /* VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK */
1767 ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK
= 1 << 7, /* VK_DYNAMIC_STATE_STENCIL_WRITE_MASK */
1768 ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE
= 1 << 8, /* VK_DYNAMIC_STATE_STENCIL_REFERENCE */
1769 ANV_CMD_DIRTY_DYNAMIC_ALL
= (1 << 9) - 1,
1770 ANV_CMD_DIRTY_PIPELINE
= 1 << 9,
1771 ANV_CMD_DIRTY_INDEX_BUFFER
= 1 << 10,
1772 ANV_CMD_DIRTY_RENDER_TARGETS
= 1 << 11,
1774 typedef uint32_t anv_cmd_dirty_mask_t
;
1776 enum anv_pipe_bits
{
1777 ANV_PIPE_DEPTH_CACHE_FLUSH_BIT
= (1 << 0),
1778 ANV_PIPE_STALL_AT_SCOREBOARD_BIT
= (1 << 1),
1779 ANV_PIPE_STATE_CACHE_INVALIDATE_BIT
= (1 << 2),
1780 ANV_PIPE_CONSTANT_CACHE_INVALIDATE_BIT
= (1 << 3),
1781 ANV_PIPE_VF_CACHE_INVALIDATE_BIT
= (1 << 4),
1782 ANV_PIPE_DATA_CACHE_FLUSH_BIT
= (1 << 5),
1783 ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT
= (1 << 10),
1784 ANV_PIPE_INSTRUCTION_CACHE_INVALIDATE_BIT
= (1 << 11),
1785 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT
= (1 << 12),
1786 ANV_PIPE_DEPTH_STALL_BIT
= (1 << 13),
1787 ANV_PIPE_CS_STALL_BIT
= (1 << 20),
1789 /* This bit does not exist directly in PIPE_CONTROL. Instead it means that
1790 * a flush has happened but not a CS stall. The next time we do any sort
1791 * of invalidation we need to insert a CS stall at that time. Otherwise,
1792 * we would have to CS stall on every flush which could be bad.
1794 ANV_PIPE_NEEDS_CS_STALL_BIT
= (1 << 21),
1796 /* This bit does not exist directly in PIPE_CONTROL. It means that render
1797 * target operations are ongoing. Some operations like copies on the
1798 * command streamer might need to be aware of this to trigger the
1799 * appropriate stall before they can proceed with the copy.
1801 ANV_PIPE_RENDER_TARGET_WRITES
= (1 << 22),
1804 #define ANV_PIPE_FLUSH_BITS ( \
1805 ANV_PIPE_DEPTH_CACHE_FLUSH_BIT | \
1806 ANV_PIPE_DATA_CACHE_FLUSH_BIT | \
1807 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT)
1809 #define ANV_PIPE_STALL_BITS ( \
1810 ANV_PIPE_STALL_AT_SCOREBOARD_BIT | \
1811 ANV_PIPE_DEPTH_STALL_BIT | \
1812 ANV_PIPE_CS_STALL_BIT)
1814 #define ANV_PIPE_INVALIDATE_BITS ( \
1815 ANV_PIPE_STATE_CACHE_INVALIDATE_BIT | \
1816 ANV_PIPE_CONSTANT_CACHE_INVALIDATE_BIT | \
1817 ANV_PIPE_VF_CACHE_INVALIDATE_BIT | \
1818 ANV_PIPE_DATA_CACHE_FLUSH_BIT | \
1819 ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT | \
1820 ANV_PIPE_INSTRUCTION_CACHE_INVALIDATE_BIT)
1822 static inline enum anv_pipe_bits
1823 anv_pipe_flush_bits_for_access_flags(VkAccessFlags flags
)
1825 enum anv_pipe_bits pipe_bits
= 0;
1828 for_each_bit(b
, flags
) {
1829 switch ((VkAccessFlagBits
)(1 << b
)) {
1830 case VK_ACCESS_SHADER_WRITE_BIT
:
1831 pipe_bits
|= ANV_PIPE_DATA_CACHE_FLUSH_BIT
;
1833 case VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT
:
1834 pipe_bits
|= ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT
;
1836 case VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT
:
1837 pipe_bits
|= ANV_PIPE_DEPTH_CACHE_FLUSH_BIT
;
1839 case VK_ACCESS_TRANSFER_WRITE_BIT
:
1840 pipe_bits
|= ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT
;
1841 pipe_bits
|= ANV_PIPE_DEPTH_CACHE_FLUSH_BIT
;
1843 case VK_ACCESS_MEMORY_WRITE_BIT
:
1844 pipe_bits
|= ANV_PIPE_FLUSH_BITS
;
1847 break; /* Nothing to do */
1854 static inline enum anv_pipe_bits
1855 anv_pipe_invalidate_bits_for_access_flags(VkAccessFlags flags
)
1857 enum anv_pipe_bits pipe_bits
= 0;
1860 for_each_bit(b
, flags
) {
1861 switch ((VkAccessFlagBits
)(1 << b
)) {
1862 case VK_ACCESS_INDIRECT_COMMAND_READ_BIT
:
1863 case VK_ACCESS_INDEX_READ_BIT
:
1864 case VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT
:
1865 pipe_bits
|= ANV_PIPE_VF_CACHE_INVALIDATE_BIT
;
1867 case VK_ACCESS_UNIFORM_READ_BIT
:
1868 pipe_bits
|= ANV_PIPE_CONSTANT_CACHE_INVALIDATE_BIT
;
1869 pipe_bits
|= ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT
;
1871 case VK_ACCESS_SHADER_READ_BIT
:
1872 case VK_ACCESS_INPUT_ATTACHMENT_READ_BIT
:
1873 case VK_ACCESS_TRANSFER_READ_BIT
:
1874 pipe_bits
|= ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT
;
1876 case VK_ACCESS_MEMORY_READ_BIT
:
1877 pipe_bits
|= ANV_PIPE_INVALIDATE_BITS
;
1879 case VK_ACCESS_MEMORY_WRITE_BIT
:
1880 pipe_bits
|= ANV_PIPE_FLUSH_BITS
;
1883 break; /* Nothing to do */
1890 #define VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV ( \
1891 VK_IMAGE_ASPECT_COLOR_BIT | \
1892 VK_IMAGE_ASPECT_PLANE_0_BIT | \
1893 VK_IMAGE_ASPECT_PLANE_1_BIT | \
1894 VK_IMAGE_ASPECT_PLANE_2_BIT)
1895 #define VK_IMAGE_ASPECT_PLANES_BITS_ANV ( \
1896 VK_IMAGE_ASPECT_PLANE_0_BIT | \
1897 VK_IMAGE_ASPECT_PLANE_1_BIT | \
1898 VK_IMAGE_ASPECT_PLANE_2_BIT)
1900 struct anv_vertex_binding
{
1901 struct anv_buffer
* buffer
;
1902 VkDeviceSize offset
;
1905 #define ANV_PARAM_PUSH(offset) ((1 << 16) | (uint32_t)(offset))
1906 #define ANV_PARAM_PUSH_OFFSET(param) ((param) & 0xffff)
1908 struct anv_push_constants
{
1909 /* Current allocated size of this push constants data structure.
1910 * Because a decent chunk of it may not be used (images on SKL, for
1911 * instance), we won't actually allocate the entire structure up-front.
1915 /* Push constant data provided by the client through vkPushConstants */
1916 uint8_t client_data
[MAX_PUSH_CONSTANTS_SIZE
];
1918 /* Used for vkCmdDispatchBase */
1919 uint32_t base_work_group_id
[3];
1921 /* Image data for image_load_store on pre-SKL */
1922 struct brw_image_param images
[MAX_GEN8_IMAGES
];
1925 struct anv_dynamic_state
{
1928 VkViewport viewports
[MAX_VIEWPORTS
];
1933 VkRect2D scissors
[MAX_SCISSORS
];
1944 float blend_constants
[4];
1954 } stencil_compare_mask
;
1959 } stencil_write_mask
;
1964 } stencil_reference
;
1967 extern const struct anv_dynamic_state default_dynamic_state
;
1969 void anv_dynamic_state_copy(struct anv_dynamic_state
*dest
,
1970 const struct anv_dynamic_state
*src
,
1971 uint32_t copy_mask
);
1973 struct anv_surface_state
{
1974 struct anv_state state
;
1975 /** Address of the surface referred to by this state
1977 * This address is relative to the start of the BO.
1979 struct anv_address address
;
1980 /* Address of the aux surface, if any
1982 * This field is ANV_NULL_ADDRESS if and only if no aux surface exists.
1984 * With the exception of gen8, the bottom 12 bits of this address' offset
1985 * include extra aux information.
1987 struct anv_address aux_address
;
1988 /* Address of the clear color, if any
1990 * This address is relative to the start of the BO.
1992 struct anv_address clear_address
;
1996 * Attachment state when recording a renderpass instance.
1998 * The clear value is valid only if there exists a pending clear.
2000 struct anv_attachment_state
{
2001 enum isl_aux_usage aux_usage
;
2002 enum isl_aux_usage input_aux_usage
;
2003 struct anv_surface_state color
;
2004 struct anv_surface_state input
;
2006 VkImageLayout current_layout
;
2007 VkImageAspectFlags pending_clear_aspects
;
2008 VkImageAspectFlags pending_load_aspects
;
2010 VkClearValue clear_value
;
2011 bool clear_color_is_zero_one
;
2012 bool clear_color_is_zero
;
2014 /* When multiview is active, attachments with a renderpass clear
2015 * operation have their respective layers cleared on the first
2016 * subpass that uses them, and only in that subpass. We keep track
2017 * of this using a bitfield to indicate which layers of an attachment
2018 * have not been cleared yet when multiview is active.
2020 uint32_t pending_clear_views
;
2023 /** State tracking for particular pipeline bind point
2025 * This struct is the base struct for anv_cmd_graphics_state and
2026 * anv_cmd_compute_state. These are used to track state which is bound to a
2027 * particular type of pipeline. Generic state that applies per-stage such as
2028 * binding table offsets and push constants is tracked generically with a
2029 * per-stage array in anv_cmd_state.
2031 struct anv_cmd_pipeline_state
{
2032 struct anv_pipeline
*pipeline
;
2033 struct anv_pipeline_layout
*layout
;
2035 struct anv_descriptor_set
*descriptors
[MAX_SETS
];
2036 uint32_t dynamic_offsets
[MAX_DYNAMIC_BUFFERS
];
2038 struct anv_push_descriptor_set
*push_descriptors
[MAX_SETS
];
2041 /** State tracking for graphics pipeline
2043 * This has anv_cmd_pipeline_state as a base struct to track things which get
2044 * bound to a graphics pipeline. Along with general pipeline bind point state
2045 * which is in the anv_cmd_pipeline_state base struct, it also contains other
2046 * state which is graphics-specific.
2048 struct anv_cmd_graphics_state
{
2049 struct anv_cmd_pipeline_state base
;
2051 anv_cmd_dirty_mask_t dirty
;
2054 struct anv_dynamic_state dynamic
;
2057 struct anv_buffer
*index_buffer
;
2058 uint32_t index_type
; /**< 3DSTATE_INDEX_BUFFER.IndexFormat */
2059 uint32_t index_offset
;
2063 /** State tracking for compute pipeline
2065 * This has anv_cmd_pipeline_state as a base struct to track things which get
2066 * bound to a compute pipeline. Along with general pipeline bind point state
2067 * which is in the anv_cmd_pipeline_state base struct, it also contains other
2068 * state which is compute-specific.
2070 struct anv_cmd_compute_state
{
2071 struct anv_cmd_pipeline_state base
;
2073 bool pipeline_dirty
;
2075 struct anv_address num_workgroups
;
2078 /** State required while building cmd buffer */
2079 struct anv_cmd_state
{
2080 /* PIPELINE_SELECT.PipelineSelection */
2081 uint32_t current_pipeline
;
2082 const struct gen_l3_config
* current_l3_config
;
2084 struct anv_cmd_graphics_state gfx
;
2085 struct anv_cmd_compute_state compute
;
2087 enum anv_pipe_bits pending_pipe_bits
;
2088 VkShaderStageFlags descriptors_dirty
;
2089 VkShaderStageFlags push_constants_dirty
;
2091 struct anv_framebuffer
* framebuffer
;
2092 struct anv_render_pass
* pass
;
2093 struct anv_subpass
* subpass
;
2094 VkRect2D render_area
;
2095 uint32_t restart_index
;
2096 struct anv_vertex_binding vertex_bindings
[MAX_VBS
];
2097 VkShaderStageFlags push_constant_stages
;
2098 struct anv_push_constants
* push_constants
[MESA_SHADER_STAGES
];
2099 struct anv_state binding_tables
[MESA_SHADER_STAGES
];
2100 struct anv_state samplers
[MESA_SHADER_STAGES
];
2103 * Whether or not the gen8 PMA fix is enabled. We ensure that, at the top
2104 * of any command buffer it is disabled by disabling it in EndCommandBuffer
2105 * and before invoking the secondary in ExecuteCommands.
2107 bool pma_fix_enabled
;
2110 * Whether or not we know for certain that HiZ is enabled for the current
2111 * subpass. If, for whatever reason, we are unsure as to whether HiZ is
2112 * enabled or not, this will be false.
2117 * Array length is anv_cmd_state::pass::attachment_count. Array content is
2118 * valid only when recording a render pass instance.
2120 struct anv_attachment_state
* attachments
;
2123 * Surface states for color render targets. These are stored in a single
2124 * flat array. For depth-stencil attachments, the surface state is simply
2127 struct anv_state render_pass_states
;
2130 * A null surface state of the right size to match the framebuffer. This
2131 * is one of the states in render_pass_states.
2133 struct anv_state null_surface_state
;
2136 struct anv_cmd_pool
{
2137 VkAllocationCallbacks alloc
;
2138 struct list_head cmd_buffers
;
2141 #define ANV_CMD_BUFFER_BATCH_SIZE 8192
2143 enum anv_cmd_buffer_exec_mode
{
2144 ANV_CMD_BUFFER_EXEC_MODE_PRIMARY
,
2145 ANV_CMD_BUFFER_EXEC_MODE_EMIT
,
2146 ANV_CMD_BUFFER_EXEC_MODE_GROW_AND_EMIT
,
2147 ANV_CMD_BUFFER_EXEC_MODE_CHAIN
,
2148 ANV_CMD_BUFFER_EXEC_MODE_COPY_AND_CHAIN
,
2151 struct anv_cmd_buffer
{
2152 VK_LOADER_DATA _loader_data
;
2154 struct anv_device
* device
;
2156 struct anv_cmd_pool
* pool
;
2157 struct list_head pool_link
;
2159 struct anv_batch batch
;
2161 /* Fields required for the actual chain of anv_batch_bo's.
2163 * These fields are initialized by anv_cmd_buffer_init_batch_bo_chain().
2165 struct list_head batch_bos
;
2166 enum anv_cmd_buffer_exec_mode exec_mode
;
2168 /* A vector of anv_batch_bo pointers for every batch or surface buffer
2169 * referenced by this command buffer
2171 * initialized by anv_cmd_buffer_init_batch_bo_chain()
2173 struct u_vector seen_bbos
;
2175 /* A vector of int32_t's for every block of binding tables.
2177 * initialized by anv_cmd_buffer_init_batch_bo_chain()
2179 struct u_vector bt_block_states
;
2182 struct anv_reloc_list surface_relocs
;
2183 /** Last seen surface state block pool center bo offset */
2184 uint32_t last_ss_pool_center
;
2186 /* Serial for tracking buffer completion */
2189 /* Stream objects for storing temporary data */
2190 struct anv_state_stream surface_state_stream
;
2191 struct anv_state_stream dynamic_state_stream
;
2193 VkCommandBufferUsageFlags usage_flags
;
2194 VkCommandBufferLevel level
;
2196 struct anv_cmd_state state
;
2199 VkResult
anv_cmd_buffer_init_batch_bo_chain(struct anv_cmd_buffer
*cmd_buffer
);
2200 void anv_cmd_buffer_fini_batch_bo_chain(struct anv_cmd_buffer
*cmd_buffer
);
2201 void anv_cmd_buffer_reset_batch_bo_chain(struct anv_cmd_buffer
*cmd_buffer
);
2202 void anv_cmd_buffer_end_batch_buffer(struct anv_cmd_buffer
*cmd_buffer
);
2203 void anv_cmd_buffer_add_secondary(struct anv_cmd_buffer
*primary
,
2204 struct anv_cmd_buffer
*secondary
);
2205 void anv_cmd_buffer_prepare_execbuf(struct anv_cmd_buffer
*cmd_buffer
);
2206 VkResult
anv_cmd_buffer_execbuf(struct anv_device
*device
,
2207 struct anv_cmd_buffer
*cmd_buffer
,
2208 const VkSemaphore
*in_semaphores
,
2209 uint32_t num_in_semaphores
,
2210 const VkSemaphore
*out_semaphores
,
2211 uint32_t num_out_semaphores
,
2214 VkResult
anv_cmd_buffer_reset(struct anv_cmd_buffer
*cmd_buffer
);
2217 anv_cmd_buffer_ensure_push_constants_size(struct anv_cmd_buffer
*cmd_buffer
,
2218 gl_shader_stage stage
, uint32_t size
);
2219 #define anv_cmd_buffer_ensure_push_constant_field(cmd_buffer, stage, field) \
2220 anv_cmd_buffer_ensure_push_constants_size(cmd_buffer, stage, \
2221 (offsetof(struct anv_push_constants, field) + \
2222 sizeof(cmd_buffer->state.push_constants[0]->field)))
2224 struct anv_state
anv_cmd_buffer_emit_dynamic(struct anv_cmd_buffer
*cmd_buffer
,
2225 const void *data
, uint32_t size
, uint32_t alignment
);
2226 struct anv_state
anv_cmd_buffer_merge_dynamic(struct anv_cmd_buffer
*cmd_buffer
,
2227 uint32_t *a
, uint32_t *b
,
2228 uint32_t dwords
, uint32_t alignment
);
2231 anv_cmd_buffer_surface_base_address(struct anv_cmd_buffer
*cmd_buffer
);
2233 anv_cmd_buffer_alloc_binding_table(struct anv_cmd_buffer
*cmd_buffer
,
2234 uint32_t entries
, uint32_t *state_offset
);
2236 anv_cmd_buffer_alloc_surface_state(struct anv_cmd_buffer
*cmd_buffer
);
2238 anv_cmd_buffer_alloc_dynamic_state(struct anv_cmd_buffer
*cmd_buffer
,
2239 uint32_t size
, uint32_t alignment
);
2242 anv_cmd_buffer_new_binding_table_block(struct anv_cmd_buffer
*cmd_buffer
);
2244 void gen8_cmd_buffer_emit_viewport(struct anv_cmd_buffer
*cmd_buffer
);
2245 void gen8_cmd_buffer_emit_depth_viewport(struct anv_cmd_buffer
*cmd_buffer
,
2246 bool depth_clamp_enable
);
2247 void gen7_cmd_buffer_emit_scissor(struct anv_cmd_buffer
*cmd_buffer
);
2249 void anv_cmd_buffer_setup_attachments(struct anv_cmd_buffer
*cmd_buffer
,
2250 struct anv_render_pass
*pass
,
2251 struct anv_framebuffer
*framebuffer
,
2252 const VkClearValue
*clear_values
);
2254 void anv_cmd_buffer_emit_state_base_address(struct anv_cmd_buffer
*cmd_buffer
);
2257 anv_cmd_buffer_push_constants(struct anv_cmd_buffer
*cmd_buffer
,
2258 gl_shader_stage stage
);
2260 anv_cmd_buffer_cs_push_constants(struct anv_cmd_buffer
*cmd_buffer
);
2262 const struct anv_image_view
*
2263 anv_cmd_buffer_get_depth_stencil_view(const struct anv_cmd_buffer
*cmd_buffer
);
2266 anv_cmd_buffer_alloc_blorp_binding_table(struct anv_cmd_buffer
*cmd_buffer
,
2267 uint32_t num_entries
,
2268 uint32_t *state_offset
,
2269 struct anv_state
*bt_state
);
2271 void anv_cmd_buffer_dump(struct anv_cmd_buffer
*cmd_buffer
);
2273 enum anv_fence_type
{
2274 ANV_FENCE_TYPE_NONE
= 0,
2276 ANV_FENCE_TYPE_SYNCOBJ
,
2280 enum anv_bo_fence_state
{
2281 /** Indicates that this is a new (or newly reset fence) */
2282 ANV_BO_FENCE_STATE_RESET
,
2284 /** Indicates that this fence has been submitted to the GPU but is still
2285 * (as far as we know) in use by the GPU.
2287 ANV_BO_FENCE_STATE_SUBMITTED
,
2289 ANV_BO_FENCE_STATE_SIGNALED
,
2292 struct anv_fence_impl
{
2293 enum anv_fence_type type
;
2296 /** Fence implementation for BO fences
2298 * These fences use a BO and a set of CPU-tracked state flags. The BO
2299 * is added to the object list of the last execbuf call in a QueueSubmit
2300 * and is marked EXEC_WRITE. The state flags track when the BO has been
2301 * submitted to the kernel. We need to do this because Vulkan lets you
2302 * wait on a fence that has not yet been submitted and I915_GEM_BUSY
2303 * will say it's idle in this case.
2307 enum anv_bo_fence_state state
;
2310 /** DRM syncobj handle for syncobj-based fences */
2314 struct wsi_fence
*fence_wsi
;
2319 /* Permanent fence state. Every fence has some form of permanent state
2320 * (type != ANV_SEMAPHORE_TYPE_NONE). This may be a BO to fence on (for
2321 * cross-process fences) or it could just be a dummy for use internally.
2323 struct anv_fence_impl permanent
;
2325 /* Temporary fence state. A fence *may* have temporary state. That state
2326 * is added to the fence by an import operation and is reset back to
2327 * ANV_SEMAPHORE_TYPE_NONE when the fence is reset. A fence with temporary
2328 * state cannot be signaled because the fence must already be signaled
2329 * before the temporary state can be exported from the fence in the other
2330 * process and imported here.
2332 struct anv_fence_impl temporary
;
2337 struct anv_state state
;
2340 enum anv_semaphore_type
{
2341 ANV_SEMAPHORE_TYPE_NONE
= 0,
2342 ANV_SEMAPHORE_TYPE_DUMMY
,
2343 ANV_SEMAPHORE_TYPE_BO
,
2344 ANV_SEMAPHORE_TYPE_SYNC_FILE
,
2345 ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ
,
2348 struct anv_semaphore_impl
{
2349 enum anv_semaphore_type type
;
2352 /* A BO representing this semaphore when type == ANV_SEMAPHORE_TYPE_BO.
2353 * This BO will be added to the object list on any execbuf2 calls for
2354 * which this semaphore is used as a wait or signal fence. When used as
2355 * a signal fence, the EXEC_OBJECT_WRITE flag will be set.
2359 /* The sync file descriptor when type == ANV_SEMAPHORE_TYPE_SYNC_FILE.
2360 * If the semaphore is in the unsignaled state due to either just being
2361 * created or because it has been used for a wait, fd will be -1.
2365 /* Sync object handle when type == ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ.
2366 * Unlike GEM BOs, DRM sync objects aren't deduplicated by the kernel on
2367 * import so we don't need to bother with a userspace cache.
2373 struct anv_semaphore
{
2374 /* Permanent semaphore state. Every semaphore has some form of permanent
2375 * state (type != ANV_SEMAPHORE_TYPE_NONE). This may be a BO to fence on
2376 * (for cross-process semaphores0 or it could just be a dummy for use
2379 struct anv_semaphore_impl permanent
;
2381 /* Temporary semaphore state. A semaphore *may* have temporary state.
2382 * That state is added to the semaphore by an import operation and is reset
2383 * back to ANV_SEMAPHORE_TYPE_NONE when the semaphore is waited on. A
2384 * semaphore with temporary state cannot be signaled because the semaphore
2385 * must already be signaled before the temporary state can be exported from
2386 * the semaphore in the other process and imported here.
2388 struct anv_semaphore_impl temporary
;
2391 void anv_semaphore_reset_temporary(struct anv_device
*device
,
2392 struct anv_semaphore
*semaphore
);
2394 struct anv_shader_module
{
2395 unsigned char sha1
[20];
2400 static inline gl_shader_stage
2401 vk_to_mesa_shader_stage(VkShaderStageFlagBits vk_stage
)
2403 assert(__builtin_popcount(vk_stage
) == 1);
2404 return ffs(vk_stage
) - 1;
2407 static inline VkShaderStageFlagBits
2408 mesa_to_vk_shader_stage(gl_shader_stage mesa_stage
)
2410 return (1 << mesa_stage
);
2413 #define ANV_STAGE_MASK ((1 << MESA_SHADER_STAGES) - 1)
2415 #define anv_foreach_stage(stage, stage_bits) \
2416 for (gl_shader_stage stage, \
2417 __tmp = (gl_shader_stage)((stage_bits) & ANV_STAGE_MASK); \
2418 stage = __builtin_ffs(__tmp) - 1, __tmp; \
2419 __tmp &= ~(1 << (stage)))
2421 struct anv_pipeline_bind_map
{
2422 uint32_t surface_count
;
2423 uint32_t sampler_count
;
2424 uint32_t image_count
;
2426 struct anv_pipeline_binding
* surface_to_descriptor
;
2427 struct anv_pipeline_binding
* sampler_to_descriptor
;
2430 struct anv_shader_bin_key
{
2435 struct anv_shader_bin
{
2438 const struct anv_shader_bin_key
*key
;
2440 struct anv_state kernel
;
2441 uint32_t kernel_size
;
2443 struct anv_state constant_data
;
2444 uint32_t constant_data_size
;
2446 const struct brw_stage_prog_data
*prog_data
;
2447 uint32_t prog_data_size
;
2449 struct anv_pipeline_bind_map bind_map
;
2452 struct anv_shader_bin
*
2453 anv_shader_bin_create(struct anv_device
*device
,
2454 const void *key
, uint32_t key_size
,
2455 const void *kernel
, uint32_t kernel_size
,
2456 const void *constant_data
, uint32_t constant_data_size
,
2457 const struct brw_stage_prog_data
*prog_data
,
2458 uint32_t prog_data_size
, const void *prog_data_param
,
2459 const struct anv_pipeline_bind_map
*bind_map
);
2462 anv_shader_bin_destroy(struct anv_device
*device
, struct anv_shader_bin
*shader
);
2465 anv_shader_bin_ref(struct anv_shader_bin
*shader
)
2467 assert(shader
&& shader
->ref_cnt
>= 1);
2468 p_atomic_inc(&shader
->ref_cnt
);
2472 anv_shader_bin_unref(struct anv_device
*device
, struct anv_shader_bin
*shader
)
2474 assert(shader
&& shader
->ref_cnt
>= 1);
2475 if (p_atomic_dec_zero(&shader
->ref_cnt
))
2476 anv_shader_bin_destroy(device
, shader
);
2479 struct anv_pipeline
{
2480 struct anv_device
* device
;
2481 struct anv_batch batch
;
2482 uint32_t batch_data
[512];
2483 struct anv_reloc_list batch_relocs
;
2484 uint32_t dynamic_state_mask
;
2485 struct anv_dynamic_state dynamic_state
;
2487 struct anv_subpass
* subpass
;
2489 bool needs_data_cache
;
2491 struct anv_shader_bin
* shaders
[MESA_SHADER_STAGES
];
2494 const struct gen_l3_config
* l3_config
;
2495 uint32_t total_size
;
2498 VkShaderStageFlags active_stages
;
2499 struct anv_state blend_state
;
2502 struct anv_pipeline_vertex_binding
{
2505 uint32_t instance_divisor
;
2508 bool primitive_restart
;
2511 uint32_t cs_right_mask
;
2514 bool depth_test_enable
;
2515 bool writes_stencil
;
2516 bool stencil_test_enable
;
2517 bool depth_clamp_enable
;
2518 bool sample_shading_enable
;
2523 uint32_t depth_stencil_state
[3];
2529 uint32_t wm_depth_stencil
[3];
2533 uint32_t wm_depth_stencil
[4];
2536 uint32_t interface_descriptor_data
[8];
2540 anv_pipeline_has_stage(const struct anv_pipeline
*pipeline
,
2541 gl_shader_stage stage
)
2543 return (pipeline
->active_stages
& mesa_to_vk_shader_stage(stage
)) != 0;
2546 #define ANV_DECL_GET_PROG_DATA_FUNC(prefix, stage) \
2547 static inline const struct brw_##prefix##_prog_data * \
2548 get_##prefix##_prog_data(const struct anv_pipeline *pipeline) \
2550 if (anv_pipeline_has_stage(pipeline, stage)) { \
2551 return (const struct brw_##prefix##_prog_data *) \
2552 pipeline->shaders[stage]->prog_data; \
2558 ANV_DECL_GET_PROG_DATA_FUNC(vs
, MESA_SHADER_VERTEX
)
2559 ANV_DECL_GET_PROG_DATA_FUNC(tcs
, MESA_SHADER_TESS_CTRL
)
2560 ANV_DECL_GET_PROG_DATA_FUNC(tes
, MESA_SHADER_TESS_EVAL
)
2561 ANV_DECL_GET_PROG_DATA_FUNC(gs
, MESA_SHADER_GEOMETRY
)
2562 ANV_DECL_GET_PROG_DATA_FUNC(wm
, MESA_SHADER_FRAGMENT
)
2563 ANV_DECL_GET_PROG_DATA_FUNC(cs
, MESA_SHADER_COMPUTE
)
2565 static inline const struct brw_vue_prog_data
*
2566 anv_pipeline_get_last_vue_prog_data(const struct anv_pipeline
*pipeline
)
2568 if (anv_pipeline_has_stage(pipeline
, MESA_SHADER_GEOMETRY
))
2569 return &get_gs_prog_data(pipeline
)->base
;
2570 else if (anv_pipeline_has_stage(pipeline
, MESA_SHADER_TESS_EVAL
))
2571 return &get_tes_prog_data(pipeline
)->base
;
2573 return &get_vs_prog_data(pipeline
)->base
;
2577 anv_pipeline_init(struct anv_pipeline
*pipeline
, struct anv_device
*device
,
2578 struct anv_pipeline_cache
*cache
,
2579 const VkGraphicsPipelineCreateInfo
*pCreateInfo
,
2580 const VkAllocationCallbacks
*alloc
);
2583 anv_pipeline_compile_cs(struct anv_pipeline
*pipeline
,
2584 struct anv_pipeline_cache
*cache
,
2585 const VkComputePipelineCreateInfo
*info
,
2586 const struct anv_shader_module
*module
,
2587 const char *entrypoint
,
2588 const VkSpecializationInfo
*spec_info
);
2590 struct anv_format_plane
{
2591 enum isl_format isl_format
:16;
2592 struct isl_swizzle swizzle
;
2594 /* Whether this plane contains chroma channels */
2597 /* For downscaling of YUV planes */
2598 uint8_t denominator_scales
[2];
2600 /* How to map sampled ycbcr planes to a single 4 component element. */
2601 struct isl_swizzle ycbcr_swizzle
;
2603 /* What aspect is associated to this plane */
2604 VkImageAspectFlags aspect
;
2609 struct anv_format_plane planes
[3];
2615 static inline uint32_t
2616 anv_image_aspect_to_plane(VkImageAspectFlags image_aspects
,
2617 VkImageAspectFlags aspect_mask
)
2619 switch (aspect_mask
) {
2620 case VK_IMAGE_ASPECT_COLOR_BIT
:
2621 case VK_IMAGE_ASPECT_DEPTH_BIT
:
2622 case VK_IMAGE_ASPECT_PLANE_0_BIT
:
2624 case VK_IMAGE_ASPECT_STENCIL_BIT
:
2625 if ((image_aspects
& VK_IMAGE_ASPECT_DEPTH_BIT
) == 0)
2628 case VK_IMAGE_ASPECT_PLANE_1_BIT
:
2630 case VK_IMAGE_ASPECT_PLANE_2_BIT
:
2633 /* Purposefully assert with depth/stencil aspects. */
2634 unreachable("invalid image aspect");
2638 static inline VkImageAspectFlags
2639 anv_plane_to_aspect(VkImageAspectFlags image_aspects
,
2642 if (image_aspects
& VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV
) {
2643 if (util_bitcount(image_aspects
) > 1)
2644 return VK_IMAGE_ASPECT_PLANE_0_BIT
<< plane
;
2645 return VK_IMAGE_ASPECT_COLOR_BIT
;
2647 if (image_aspects
& VK_IMAGE_ASPECT_DEPTH_BIT
)
2648 return VK_IMAGE_ASPECT_DEPTH_BIT
<< plane
;
2649 assert(image_aspects
== VK_IMAGE_ASPECT_STENCIL_BIT
);
2650 return VK_IMAGE_ASPECT_STENCIL_BIT
;
2653 #define anv_foreach_image_aspect_bit(b, image, aspects) \
2654 for_each_bit(b, anv_image_expand_aspects(image, aspects))
2656 const struct anv_format
*
2657 anv_get_format(VkFormat format
);
2659 static inline uint32_t
2660 anv_get_format_planes(VkFormat vk_format
)
2662 const struct anv_format
*format
= anv_get_format(vk_format
);
2664 return format
!= NULL
? format
->n_planes
: 0;
2667 struct anv_format_plane
2668 anv_get_format_plane(const struct gen_device_info
*devinfo
, VkFormat vk_format
,
2669 VkImageAspectFlagBits aspect
, VkImageTiling tiling
);
2671 static inline enum isl_format
2672 anv_get_isl_format(const struct gen_device_info
*devinfo
, VkFormat vk_format
,
2673 VkImageAspectFlags aspect
, VkImageTiling tiling
)
2675 return anv_get_format_plane(devinfo
, vk_format
, aspect
, tiling
).isl_format
;
2678 static inline struct isl_swizzle
2679 anv_swizzle_for_render(struct isl_swizzle swizzle
)
2681 /* Sometimes the swizzle will have alpha map to one. We do this to fake
2682 * RGB as RGBA for texturing
2684 assert(swizzle
.a
== ISL_CHANNEL_SELECT_ONE
||
2685 swizzle
.a
== ISL_CHANNEL_SELECT_ALPHA
);
2687 /* But it doesn't matter what we render to that channel */
2688 swizzle
.a
= ISL_CHANNEL_SELECT_ALPHA
;
2694 anv_pipeline_setup_l3_config(struct anv_pipeline
*pipeline
, bool needs_slm
);
2697 * Subsurface of an anv_image.
2699 struct anv_surface
{
2700 /** Valid only if isl_surf::size_B > 0. */
2701 struct isl_surf isl
;
2704 * Offset from VkImage's base address, as bound by vkBindImageMemory().
2711 /* The original VkFormat provided by the client. This may not match any
2712 * of the actual surface formats.
2715 const struct anv_format
*format
;
2717 VkImageAspectFlags aspects
;
2720 uint32_t array_size
;
2721 uint32_t samples
; /**< VkImageCreateInfo::samples */
2723 VkImageUsageFlags usage
; /**< Superset of VkImageCreateInfo::usage. */
2724 VkImageCreateFlags create_flags
; /* Flags used when creating image. */
2725 VkImageTiling tiling
; /** VkImageCreateInfo::tiling */
2727 /** True if this is needs to be bound to an appropriately tiled BO.
2729 * When not using modifiers, consumers such as X11, Wayland, and KMS need
2730 * the tiling passed via I915_GEM_SET_TILING. When exporting these buffers
2731 * we require a dedicated allocation so that we can know to allocate a
2734 bool needs_set_tiling
;
2737 * Must be DRM_FORMAT_MOD_INVALID unless tiling is
2738 * VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT.
2740 uint64_t drm_format_mod
;
2745 /* Whether the image is made of several underlying buffer objects rather a
2746 * single one with different offsets.
2750 /* All the formats that can be used when creating views of this image
2751 * are CCS_E compatible.
2753 bool ccs_e_compatible
;
2755 /* Image was created with external format. */
2756 bool external_format
;
2761 * For each foo, anv_image::planes[x].surface is valid if and only if
2762 * anv_image::aspects has a x aspect. Refer to anv_image_aspect_to_plane()
2763 * to figure the number associated with a given aspect.
2765 * The hardware requires that the depth buffer and stencil buffer be
2766 * separate surfaces. From Vulkan's perspective, though, depth and stencil
2767 * reside in the same VkImage. To satisfy both the hardware and Vulkan, we
2768 * allocate the depth and stencil buffers as separate surfaces in the same
2773 * -----------------------
2775 * ----------------------- |
2776 * | shadow surface0 | |
2777 * ----------------------- | Plane 0
2778 * | aux surface0 | |
2779 * ----------------------- |
2780 * | fast clear colors0 | \|/
2781 * -----------------------
2783 * ----------------------- |
2784 * | shadow surface1 | |
2785 * ----------------------- | Plane 1
2786 * | aux surface1 | |
2787 * ----------------------- |
2788 * | fast clear colors1 | \|/
2789 * -----------------------
2792 * -----------------------
2796 * Offset of the entire plane (whenever the image is disjoint this is
2804 struct anv_surface surface
;
2807 * A surface which shadows the main surface and may have different
2808 * tiling. This is used for sampling using a tiling that isn't supported
2809 * for other operations.
2811 struct anv_surface shadow_surface
;
2814 * For color images, this is the aux usage for this image when not used
2815 * as a color attachment.
2817 * For depth/stencil images, this is set to ISL_AUX_USAGE_HIZ if the
2818 * image has a HiZ buffer.
2820 enum isl_aux_usage aux_usage
;
2822 struct anv_surface aux_surface
;
2825 * Offset of the fast clear state (used to compute the
2826 * fast_clear_state_offset of the following planes).
2828 uint32_t fast_clear_state_offset
;
2831 * BO associated with this plane, set when bound.
2833 struct anv_address address
;
2836 * When destroying the image, also free the bo.
2842 /* The ordering of this enum is important */
2843 enum anv_fast_clear_type
{
2844 /** Image does not have/support any fast-clear blocks */
2845 ANV_FAST_CLEAR_NONE
= 0,
2846 /** Image has/supports fast-clear but only to the default value */
2847 ANV_FAST_CLEAR_DEFAULT_VALUE
= 1,
2848 /** Image has/supports fast-clear with an arbitrary fast-clear value */
2849 ANV_FAST_CLEAR_ANY
= 2,
2852 /* Returns the number of auxiliary buffer levels attached to an image. */
2853 static inline uint8_t
2854 anv_image_aux_levels(const struct anv_image
* const image
,
2855 VkImageAspectFlagBits aspect
)
2857 uint32_t plane
= anv_image_aspect_to_plane(image
->aspects
, aspect
);
2858 return image
->planes
[plane
].aux_surface
.isl
.size_B
> 0 ?
2859 image
->planes
[plane
].aux_surface
.isl
.levels
: 0;
2862 /* Returns the number of auxiliary buffer layers attached to an image. */
2863 static inline uint32_t
2864 anv_image_aux_layers(const struct anv_image
* const image
,
2865 VkImageAspectFlagBits aspect
,
2866 const uint8_t miplevel
)
2870 /* The miplevel must exist in the main buffer. */
2871 assert(miplevel
< image
->levels
);
2873 if (miplevel
>= anv_image_aux_levels(image
, aspect
)) {
2874 /* There are no layers with auxiliary data because the miplevel has no
2879 uint32_t plane
= anv_image_aspect_to_plane(image
->aspects
, aspect
);
2880 return MAX2(image
->planes
[plane
].aux_surface
.isl
.logical_level0_px
.array_len
,
2881 image
->planes
[plane
].aux_surface
.isl
.logical_level0_px
.depth
>> miplevel
);
2885 static inline struct anv_address
2886 anv_image_get_clear_color_addr(const struct anv_device
*device
,
2887 const struct anv_image
*image
,
2888 VkImageAspectFlagBits aspect
)
2890 assert(image
->aspects
& VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV
);
2892 uint32_t plane
= anv_image_aspect_to_plane(image
->aspects
, aspect
);
2893 return anv_address_add(image
->planes
[plane
].address
,
2894 image
->planes
[plane
].fast_clear_state_offset
);
2897 static inline struct anv_address
2898 anv_image_get_fast_clear_type_addr(const struct anv_device
*device
,
2899 const struct anv_image
*image
,
2900 VkImageAspectFlagBits aspect
)
2902 struct anv_address addr
=
2903 anv_image_get_clear_color_addr(device
, image
, aspect
);
2905 const unsigned clear_color_state_size
= device
->info
.gen
>= 10 ?
2906 device
->isl_dev
.ss
.clear_color_state_size
:
2907 device
->isl_dev
.ss
.clear_value_size
;
2908 return anv_address_add(addr
, clear_color_state_size
);
2911 static inline struct anv_address
2912 anv_image_get_compression_state_addr(const struct anv_device
*device
,
2913 const struct anv_image
*image
,
2914 VkImageAspectFlagBits aspect
,
2915 uint32_t level
, uint32_t array_layer
)
2917 assert(level
< anv_image_aux_levels(image
, aspect
));
2918 assert(array_layer
< anv_image_aux_layers(image
, aspect
, level
));
2919 UNUSED
uint32_t plane
= anv_image_aspect_to_plane(image
->aspects
, aspect
);
2920 assert(image
->planes
[plane
].aux_usage
== ISL_AUX_USAGE_CCS_E
);
2922 struct anv_address addr
=
2923 anv_image_get_fast_clear_type_addr(device
, image
, aspect
);
2924 addr
.offset
+= 4; /* Go past the fast clear type */
2926 if (image
->type
== VK_IMAGE_TYPE_3D
) {
2927 for (uint32_t l
= 0; l
< level
; l
++)
2928 addr
.offset
+= anv_minify(image
->extent
.depth
, l
) * 4;
2930 addr
.offset
+= level
* image
->array_size
* 4;
2932 addr
.offset
+= array_layer
* 4;
2937 /* Returns true if a HiZ-enabled depth buffer can be sampled from. */
2939 anv_can_sample_with_hiz(const struct gen_device_info
* const devinfo
,
2940 const struct anv_image
*image
)
2942 if (!(image
->aspects
& VK_IMAGE_ASPECT_DEPTH_BIT
))
2945 if (devinfo
->gen
< 8)
2948 return image
->samples
== 1;
2952 anv_cmd_buffer_mark_image_written(struct anv_cmd_buffer
*cmd_buffer
,
2953 const struct anv_image
*image
,
2954 VkImageAspectFlagBits aspect
,
2955 enum isl_aux_usage aux_usage
,
2957 uint32_t base_layer
,
2958 uint32_t layer_count
);
2961 anv_image_clear_color(struct anv_cmd_buffer
*cmd_buffer
,
2962 const struct anv_image
*image
,
2963 VkImageAspectFlagBits aspect
,
2964 enum isl_aux_usage aux_usage
,
2965 enum isl_format format
, struct isl_swizzle swizzle
,
2966 uint32_t level
, uint32_t base_layer
, uint32_t layer_count
,
2967 VkRect2D area
, union isl_color_value clear_color
);
2969 anv_image_clear_depth_stencil(struct anv_cmd_buffer
*cmd_buffer
,
2970 const struct anv_image
*image
,
2971 VkImageAspectFlags aspects
,
2972 enum isl_aux_usage depth_aux_usage
,
2974 uint32_t base_layer
, uint32_t layer_count
,
2976 float depth_value
, uint8_t stencil_value
);
2978 anv_image_msaa_resolve(struct anv_cmd_buffer
*cmd_buffer
,
2979 const struct anv_image
*src_image
,
2980 enum isl_aux_usage src_aux_usage
,
2981 uint32_t src_level
, uint32_t src_base_layer
,
2982 const struct anv_image
*dst_image
,
2983 enum isl_aux_usage dst_aux_usage
,
2984 uint32_t dst_level
, uint32_t dst_base_layer
,
2985 VkImageAspectFlagBits aspect
,
2986 uint32_t src_x
, uint32_t src_y
,
2987 uint32_t dst_x
, uint32_t dst_y
,
2988 uint32_t width
, uint32_t height
,
2989 uint32_t layer_count
,
2990 enum blorp_filter filter
);
2992 anv_image_hiz_op(struct anv_cmd_buffer
*cmd_buffer
,
2993 const struct anv_image
*image
,
2994 VkImageAspectFlagBits aspect
, uint32_t level
,
2995 uint32_t base_layer
, uint32_t layer_count
,
2996 enum isl_aux_op hiz_op
);
2998 anv_image_hiz_clear(struct anv_cmd_buffer
*cmd_buffer
,
2999 const struct anv_image
*image
,
3000 VkImageAspectFlags aspects
,
3002 uint32_t base_layer
, uint32_t layer_count
,
3003 VkRect2D area
, uint8_t stencil_value
);
3005 anv_image_mcs_op(struct anv_cmd_buffer
*cmd_buffer
,
3006 const struct anv_image
*image
,
3007 enum isl_format format
,
3008 VkImageAspectFlagBits aspect
,
3009 uint32_t base_layer
, uint32_t layer_count
,
3010 enum isl_aux_op mcs_op
, union isl_color_value
*clear_value
,
3013 anv_image_ccs_op(struct anv_cmd_buffer
*cmd_buffer
,
3014 const struct anv_image
*image
,
3015 enum isl_format format
,
3016 VkImageAspectFlagBits aspect
, uint32_t level
,
3017 uint32_t base_layer
, uint32_t layer_count
,
3018 enum isl_aux_op ccs_op
, union isl_color_value
*clear_value
,
3022 anv_image_copy_to_shadow(struct anv_cmd_buffer
*cmd_buffer
,
3023 const struct anv_image
*image
,
3024 uint32_t base_level
, uint32_t level_count
,
3025 uint32_t base_layer
, uint32_t layer_count
);
3028 anv_layout_to_aux_usage(const struct gen_device_info
* const devinfo
,
3029 const struct anv_image
*image
,
3030 const VkImageAspectFlagBits aspect
,
3031 const VkImageLayout layout
);
3033 enum anv_fast_clear_type
3034 anv_layout_to_fast_clear_type(const struct gen_device_info
* const devinfo
,
3035 const struct anv_image
* const image
,
3036 const VkImageAspectFlagBits aspect
,
3037 const VkImageLayout layout
);
3039 /* This is defined as a macro so that it works for both
3040 * VkImageSubresourceRange and VkImageSubresourceLayers
3042 #define anv_get_layerCount(_image, _range) \
3043 ((_range)->layerCount == VK_REMAINING_ARRAY_LAYERS ? \
3044 (_image)->array_size - (_range)->baseArrayLayer : (_range)->layerCount)
3046 static inline uint32_t
3047 anv_get_levelCount(const struct anv_image
*image
,
3048 const VkImageSubresourceRange
*range
)
3050 return range
->levelCount
== VK_REMAINING_MIP_LEVELS
?
3051 image
->levels
- range
->baseMipLevel
: range
->levelCount
;
3054 static inline VkImageAspectFlags
3055 anv_image_expand_aspects(const struct anv_image
*image
,
3056 VkImageAspectFlags aspects
)
3058 /* If the underlying image has color plane aspects and
3059 * VK_IMAGE_ASPECT_COLOR_BIT has been requested, then return the aspects of
3060 * the underlying image. */
3061 if ((image
->aspects
& VK_IMAGE_ASPECT_PLANES_BITS_ANV
) != 0 &&
3062 aspects
== VK_IMAGE_ASPECT_COLOR_BIT
)
3063 return image
->aspects
;
3069 anv_image_aspects_compatible(VkImageAspectFlags aspects1
,
3070 VkImageAspectFlags aspects2
)
3072 if (aspects1
== aspects2
)
3075 /* Only 1 color aspects are compatibles. */
3076 if ((aspects1
& VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV
) != 0 &&
3077 (aspects2
& VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV
) != 0 &&
3078 util_bitcount(aspects1
) == util_bitcount(aspects2
))
3084 struct anv_image_view
{
3085 const struct anv_image
*image
; /**< VkImageViewCreateInfo::image */
3087 VkImageAspectFlags aspect_mask
;
3089 VkExtent3D extent
; /**< Extent of VkImageViewCreateInfo::baseMipLevel. */
3093 uint32_t image_plane
;
3095 struct isl_view isl
;
3098 * RENDER_SURFACE_STATE when using image as a sampler surface with an
3099 * image layout of SHADER_READ_ONLY_OPTIMAL or
3100 * DEPTH_STENCIL_READ_ONLY_OPTIMAL.
3102 struct anv_surface_state optimal_sampler_surface_state
;
3105 * RENDER_SURFACE_STATE when using image as a sampler surface with an
3106 * image layout of GENERAL.
3108 struct anv_surface_state general_sampler_surface_state
;
3111 * RENDER_SURFACE_STATE when using image as a storage image. Separate
3112 * states for write-only and readable, using the real format for
3113 * write-only and the lowered format for readable.
3115 struct anv_surface_state storage_surface_state
;
3116 struct anv_surface_state writeonly_storage_surface_state
;
3118 struct brw_image_param storage_image_param
;
3122 enum anv_image_view_state_flags
{
3123 ANV_IMAGE_VIEW_STATE_STORAGE_WRITE_ONLY
= (1 << 0),
3124 ANV_IMAGE_VIEW_STATE_TEXTURE_OPTIMAL
= (1 << 1),
3127 void anv_image_fill_surface_state(struct anv_device
*device
,
3128 const struct anv_image
*image
,
3129 VkImageAspectFlagBits aspect
,
3130 const struct isl_view
*view
,
3131 isl_surf_usage_flags_t view_usage
,
3132 enum isl_aux_usage aux_usage
,
3133 const union isl_color_value
*clear_color
,
3134 enum anv_image_view_state_flags flags
,
3135 struct anv_surface_state
*state_inout
,
3136 struct brw_image_param
*image_param_out
);
3138 struct anv_image_create_info
{
3139 const VkImageCreateInfo
*vk_info
;
3141 /** An opt-in bitmask which filters an ISL-mapping of the Vulkan tiling. */
3142 isl_tiling_flags_t isl_tiling_flags
;
3144 /** These flags will be added to any derived from VkImageCreateInfo. */
3145 isl_surf_usage_flags_t isl_extra_usage_flags
;
3148 bool external_format
;
3151 VkResult
anv_image_create(VkDevice _device
,
3152 const struct anv_image_create_info
*info
,
3153 const VkAllocationCallbacks
* alloc
,
3156 const struct anv_surface
*
3157 anv_image_get_surface_for_aspect_mask(const struct anv_image
*image
,
3158 VkImageAspectFlags aspect_mask
);
3161 anv_isl_format_for_descriptor_type(VkDescriptorType type
);
3163 static inline struct VkExtent3D
3164 anv_sanitize_image_extent(const VkImageType imageType
,
3165 const struct VkExtent3D imageExtent
)
3167 switch (imageType
) {
3168 case VK_IMAGE_TYPE_1D
:
3169 return (VkExtent3D
) { imageExtent
.width
, 1, 1 };
3170 case VK_IMAGE_TYPE_2D
:
3171 return (VkExtent3D
) { imageExtent
.width
, imageExtent
.height
, 1 };
3172 case VK_IMAGE_TYPE_3D
:
3175 unreachable("invalid image type");
3179 static inline struct VkOffset3D
3180 anv_sanitize_image_offset(const VkImageType imageType
,
3181 const struct VkOffset3D imageOffset
)
3183 switch (imageType
) {
3184 case VK_IMAGE_TYPE_1D
:
3185 return (VkOffset3D
) { imageOffset
.x
, 0, 0 };
3186 case VK_IMAGE_TYPE_2D
:
3187 return (VkOffset3D
) { imageOffset
.x
, imageOffset
.y
, 0 };
3188 case VK_IMAGE_TYPE_3D
:
3191 unreachable("invalid image type");
3195 VkFormatFeatureFlags
3196 anv_get_image_format_features(const struct gen_device_info
*devinfo
,
3198 const struct anv_format
*anv_format
,
3199 VkImageTiling vk_tiling
);
3201 void anv_fill_buffer_surface_state(struct anv_device
*device
,
3202 struct anv_state state
,
3203 enum isl_format format
,
3204 struct anv_address address
,
3205 uint32_t range
, uint32_t stride
);
3208 anv_clear_color_from_att_state(union isl_color_value
*clear_color
,
3209 const struct anv_attachment_state
*att_state
,
3210 const struct anv_image_view
*iview
)
3212 const struct isl_format_layout
*view_fmtl
=
3213 isl_format_get_layout(iview
->planes
[0].isl
.format
);
3215 #define COPY_CLEAR_COLOR_CHANNEL(c, i) \
3216 if (view_fmtl->channels.c.bits) \
3217 clear_color->u32[i] = att_state->clear_value.color.uint32[i]
3219 COPY_CLEAR_COLOR_CHANNEL(r
, 0);
3220 COPY_CLEAR_COLOR_CHANNEL(g
, 1);
3221 COPY_CLEAR_COLOR_CHANNEL(b
, 2);
3222 COPY_CLEAR_COLOR_CHANNEL(a
, 3);
3224 #undef COPY_CLEAR_COLOR_CHANNEL
3228 struct anv_ycbcr_conversion
{
3229 const struct anv_format
* format
;
3230 VkSamplerYcbcrModelConversion ycbcr_model
;
3231 VkSamplerYcbcrRange ycbcr_range
;
3232 VkComponentSwizzle mapping
[4];
3233 VkChromaLocation chroma_offsets
[2];
3234 VkFilter chroma_filter
;
3235 bool chroma_reconstruction
;
3238 struct anv_sampler
{
3239 uint32_t state
[3][4];
3241 struct anv_ycbcr_conversion
*conversion
;
3244 struct anv_framebuffer
{
3249 uint32_t attachment_count
;
3250 struct anv_image_view
* attachments
[0];
3253 struct anv_subpass_attachment
{
3254 VkImageUsageFlagBits usage
;
3255 uint32_t attachment
;
3256 VkImageLayout layout
;
3259 struct anv_subpass
{
3260 uint32_t attachment_count
;
3263 * A pointer to all attachment references used in this subpass.
3264 * Only valid if ::attachment_count > 0.
3266 struct anv_subpass_attachment
* attachments
;
3267 uint32_t input_count
;
3268 struct anv_subpass_attachment
* input_attachments
;
3269 uint32_t color_count
;
3270 struct anv_subpass_attachment
* color_attachments
;
3271 struct anv_subpass_attachment
* resolve_attachments
;
3273 struct anv_subpass_attachment
* depth_stencil_attachment
;
3274 struct anv_subpass_attachment
* ds_resolve_attachment
;
3275 VkResolveModeFlagBitsKHR depth_resolve_mode
;
3276 VkResolveModeFlagBitsKHR stencil_resolve_mode
;
3280 /** Subpass has a depth/stencil self-dependency */
3281 bool has_ds_self_dep
;
3283 /** Subpass has at least one color resolve attachment */
3284 bool has_color_resolve
;
3287 static inline unsigned
3288 anv_subpass_view_count(const struct anv_subpass
*subpass
)
3290 return MAX2(1, util_bitcount(subpass
->view_mask
));
3293 struct anv_render_pass_attachment
{
3294 /* TODO: Consider using VkAttachmentDescription instead of storing each of
3295 * its members individually.
3299 VkImageUsageFlags usage
;
3300 VkAttachmentLoadOp load_op
;
3301 VkAttachmentStoreOp store_op
;
3302 VkAttachmentLoadOp stencil_load_op
;
3303 VkImageLayout initial_layout
;
3304 VkImageLayout final_layout
;
3305 VkImageLayout first_subpass_layout
;
3307 /* The subpass id in which the attachment will be used last. */
3308 uint32_t last_subpass_idx
;
3311 struct anv_render_pass
{
3312 uint32_t attachment_count
;
3313 uint32_t subpass_count
;
3314 /* An array of subpass_count+1 flushes, one per subpass boundary */
3315 enum anv_pipe_bits
* subpass_flushes
;
3316 struct anv_render_pass_attachment
* attachments
;
3317 struct anv_subpass subpasses
[0];
3320 #define ANV_PIPELINE_STATISTICS_MASK 0x000007ff
3322 struct anv_query_pool
{
3324 VkQueryPipelineStatisticFlags pipeline_statistics
;
3325 /** Stride between slots, in bytes */
3327 /** Number of slots in this query pool */
3332 int anv_get_instance_entrypoint_index(const char *name
);
3333 int anv_get_device_entrypoint_index(const char *name
);
3336 anv_instance_entrypoint_is_enabled(int index
, uint32_t core_version
,
3337 const struct anv_instance_extension_table
*instance
);
3340 anv_device_entrypoint_is_enabled(int index
, uint32_t core_version
,
3341 const struct anv_instance_extension_table
*instance
,
3342 const struct anv_device_extension_table
*device
);
3344 void *anv_lookup_entrypoint(const struct gen_device_info
*devinfo
,
3347 void anv_dump_image_to_ppm(struct anv_device
*device
,
3348 struct anv_image
*image
, unsigned miplevel
,
3349 unsigned array_layer
, VkImageAspectFlagBits aspect
,
3350 const char *filename
);
3352 enum anv_dump_action
{
3353 ANV_DUMP_FRAMEBUFFERS_BIT
= 0x1,
3356 void anv_dump_start(struct anv_device
*device
, enum anv_dump_action actions
);
3357 void anv_dump_finish(void);
3359 void anv_dump_add_framebuffer(struct anv_cmd_buffer
*cmd_buffer
,
3360 struct anv_framebuffer
*fb
);
3362 static inline uint32_t
3363 anv_get_subpass_id(const struct anv_cmd_state
* const cmd_state
)
3365 /* This function must be called from within a subpass. */
3366 assert(cmd_state
->pass
&& cmd_state
->subpass
);
3368 const uint32_t subpass_id
= cmd_state
->subpass
- cmd_state
->pass
->subpasses
;
3370 /* The id of this subpass shouldn't exceed the number of subpasses in this
3371 * render pass minus 1.
3373 assert(subpass_id
< cmd_state
->pass
->subpass_count
);
3377 #define ANV_DEFINE_HANDLE_CASTS(__anv_type, __VkType) \
3379 static inline struct __anv_type * \
3380 __anv_type ## _from_handle(__VkType _handle) \
3382 return (struct __anv_type *) _handle; \
3385 static inline __VkType \
3386 __anv_type ## _to_handle(struct __anv_type *_obj) \
3388 return (__VkType) _obj; \
3391 #define ANV_DEFINE_NONDISP_HANDLE_CASTS(__anv_type, __VkType) \
3393 static inline struct __anv_type * \
3394 __anv_type ## _from_handle(__VkType _handle) \
3396 return (struct __anv_type *)(uintptr_t) _handle; \
3399 static inline __VkType \
3400 __anv_type ## _to_handle(struct __anv_type *_obj) \
3402 return (__VkType)(uintptr_t) _obj; \
3405 #define ANV_FROM_HANDLE(__anv_type, __name, __handle) \
3406 struct __anv_type *__name = __anv_type ## _from_handle(__handle)
3408 ANV_DEFINE_HANDLE_CASTS(anv_cmd_buffer
, VkCommandBuffer
)
3409 ANV_DEFINE_HANDLE_CASTS(anv_device
, VkDevice
)
3410 ANV_DEFINE_HANDLE_CASTS(anv_instance
, VkInstance
)
3411 ANV_DEFINE_HANDLE_CASTS(anv_physical_device
, VkPhysicalDevice
)
3412 ANV_DEFINE_HANDLE_CASTS(anv_queue
, VkQueue
)
3414 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_cmd_pool
, VkCommandPool
)
3415 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_buffer
, VkBuffer
)
3416 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_buffer_view
, VkBufferView
)
3417 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_descriptor_pool
, VkDescriptorPool
)
3418 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_descriptor_set
, VkDescriptorSet
)
3419 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_descriptor_set_layout
, VkDescriptorSetLayout
)
3420 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_descriptor_update_template
, VkDescriptorUpdateTemplate
)
3421 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_device_memory
, VkDeviceMemory
)
3422 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_fence
, VkFence
)
3423 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_event
, VkEvent
)
3424 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_framebuffer
, VkFramebuffer
)
3425 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_image
, VkImage
)
3426 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_image_view
, VkImageView
);
3427 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_pipeline_cache
, VkPipelineCache
)
3428 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_pipeline
, VkPipeline
)
3429 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_pipeline_layout
, VkPipelineLayout
)
3430 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_query_pool
, VkQueryPool
)
3431 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_render_pass
, VkRenderPass
)
3432 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_sampler
, VkSampler
)
3433 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_semaphore
, VkSemaphore
)
3434 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_shader_module
, VkShaderModule
)
3435 ANV_DEFINE_NONDISP_HANDLE_CASTS(vk_debug_report_callback
, VkDebugReportCallbackEXT
)
3436 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_ycbcr_conversion
, VkSamplerYcbcrConversion
)
3438 /* Gen-specific function declarations */
3440 # include "anv_genX.h"
3442 # define genX(x) gen7_##x
3443 # include "anv_genX.h"
3445 # define genX(x) gen75_##x
3446 # include "anv_genX.h"
3448 # define genX(x) gen8_##x
3449 # include "anv_genX.h"
3451 # define genX(x) gen9_##x
3452 # include "anv_genX.h"
3454 # define genX(x) gen10_##x
3455 # include "anv_genX.h"
3457 # define genX(x) gen11_##x
3458 # include "anv_genX.h"
3462 #endif /* ANV_PRIVATE_H */