2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
39 #define __gen_validate_value(x) VALGRIND_CHECK_MEM_IS_DEFINED(&(x), sizeof(x))
44 #include "common/gen_clflush.h"
45 #include "common/gen_device_info.h"
46 #include "blorp/blorp.h"
47 #include "compiler/brw_compiler.h"
48 #include "util/macros.h"
49 #include "util/list.h"
50 #include "util/u_atomic.h"
51 #include "util/u_vector.h"
54 /* Pre-declarations needed for WSI entrypoints */
57 typedef struct xcb_connection_t xcb_connection_t
;
58 typedef uint32_t xcb_visualid_t
;
59 typedef uint32_t xcb_window_t
;
62 struct anv_buffer_view
;
63 struct anv_image_view
;
65 struct anv_debug_report_callback
;
69 #include <vulkan/vulkan.h>
70 #include <vulkan/vulkan_intel.h>
71 #include <vulkan/vk_icd.h>
72 #include <vulkan/vk_android_native_buffer.h>
74 #include "anv_entrypoints.h"
77 #include "common/gen_debug.h"
78 #include "common/intel_log.h"
79 #include "wsi_common.h"
81 /* Allowing different clear colors requires us to perform a depth resolve at
82 * the end of certain render passes. This is because while slow clears store
83 * the clear color in the HiZ buffer, fast clears (without a resolve) don't.
84 * See the PRMs for examples describing when additional resolves would be
85 * necessary. To enable fast clears without requiring extra resolves, we set
86 * the clear value to a globally-defined one. We could allow different values
87 * if the user doesn't expect coherent data during or after a render passes
88 * (VK_ATTACHMENT_STORE_OP_DONT_CARE), but such users (aside from the CTS)
89 * don't seem to exist yet. In almost all Vulkan applications tested thus far,
90 * 1.0f seems to be the only value used. The only application that doesn't set
91 * this value does so through the usage of an seemingly uninitialized clear
94 #define ANV_HZ_FC_VAL 1.0f
99 #define MAX_VIEWPORTS 16
100 #define MAX_SCISSORS 16
101 #define MAX_PUSH_CONSTANTS_SIZE 128
102 #define MAX_DYNAMIC_BUFFERS 16
104 #define MAX_PUSH_DESCRIPTORS 32 /* Minimum requirement */
106 #define ANV_SVGS_VB_INDEX MAX_VBS
107 #define ANV_DRAWID_VB_INDEX (MAX_VBS + 1)
109 #define anv_printflike(a, b) __attribute__((__format__(__printf__, a, b)))
111 static inline uint32_t
112 align_down_npot_u32(uint32_t v
, uint32_t a
)
117 static inline uint32_t
118 align_u32(uint32_t v
, uint32_t a
)
120 assert(a
!= 0 && a
== (a
& -a
));
121 return (v
+ a
- 1) & ~(a
- 1);
124 static inline uint64_t
125 align_u64(uint64_t v
, uint64_t a
)
127 assert(a
!= 0 && a
== (a
& -a
));
128 return (v
+ a
- 1) & ~(a
- 1);
131 static inline int32_t
132 align_i32(int32_t v
, int32_t a
)
134 assert(a
!= 0 && a
== (a
& -a
));
135 return (v
+ a
- 1) & ~(a
- 1);
138 /** Alignment must be a power of 2. */
140 anv_is_aligned(uintmax_t n
, uintmax_t a
)
142 assert(a
== (a
& -a
));
143 return (n
& (a
- 1)) == 0;
146 static inline uint32_t
147 anv_minify(uint32_t n
, uint32_t levels
)
149 if (unlikely(n
== 0))
152 return MAX2(n
>> levels
, 1);
156 anv_clamp_f(float f
, float min
, float max
)
169 anv_clear_mask(uint32_t *inout_mask
, uint32_t clear_mask
)
171 if (*inout_mask
& clear_mask
) {
172 *inout_mask
&= ~clear_mask
;
179 static inline union isl_color_value
180 vk_to_isl_color(VkClearColorValue color
)
182 return (union isl_color_value
) {
192 #define for_each_bit(b, dword) \
193 for (uint32_t __dword = (dword); \
194 (b) = __builtin_ffs(__dword) - 1, __dword; \
195 __dword &= ~(1 << (b)))
197 #define typed_memcpy(dest, src, count) ({ \
198 STATIC_ASSERT(sizeof(*src) == sizeof(*dest)); \
199 memcpy((dest), (src), (count) * sizeof(*(src))); \
202 /* Mapping from anv object to VkDebugReportObjectTypeEXT. New types need
203 * to be added here in order to utilize mapping in debug/error/perf macros.
205 #define REPORT_OBJECT_TYPE(o) \
206 __builtin_choose_expr ( \
207 __builtin_types_compatible_p (__typeof (o), struct anv_instance*), \
208 VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT, \
209 __builtin_choose_expr ( \
210 __builtin_types_compatible_p (__typeof (o), struct anv_physical_device*), \
211 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, \
212 __builtin_choose_expr ( \
213 __builtin_types_compatible_p (__typeof (o), struct anv_device*), \
214 VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, \
215 __builtin_choose_expr ( \
216 __builtin_types_compatible_p (__typeof (o), const struct anv_device*), \
217 VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, \
218 __builtin_choose_expr ( \
219 __builtin_types_compatible_p (__typeof (o), struct anv_queue*), \
220 VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT, \
221 __builtin_choose_expr ( \
222 __builtin_types_compatible_p (__typeof (o), struct anv_semaphore*), \
223 VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT, \
224 __builtin_choose_expr ( \
225 __builtin_types_compatible_p (__typeof (o), struct anv_cmd_buffer*), \
226 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, \
227 __builtin_choose_expr ( \
228 __builtin_types_compatible_p (__typeof (o), struct anv_fence*), \
229 VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT, \
230 __builtin_choose_expr ( \
231 __builtin_types_compatible_p (__typeof (o), struct anv_device_memory*), \
232 VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, \
233 __builtin_choose_expr ( \
234 __builtin_types_compatible_p (__typeof (o), struct anv_buffer*), \
235 VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, \
236 __builtin_choose_expr ( \
237 __builtin_types_compatible_p (__typeof (o), struct anv_image*), \
238 VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, \
239 __builtin_choose_expr ( \
240 __builtin_types_compatible_p (__typeof (o), const struct anv_image*), \
241 VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, \
242 __builtin_choose_expr ( \
243 __builtin_types_compatible_p (__typeof (o), struct anv_event*), \
244 VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT, \
245 __builtin_choose_expr ( \
246 __builtin_types_compatible_p (__typeof (o), struct anv_query_pool*), \
247 VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, \
248 __builtin_choose_expr ( \
249 __builtin_types_compatible_p (__typeof (o), struct anv_buffer_view*), \
250 VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT, \
251 __builtin_choose_expr ( \
252 __builtin_types_compatible_p (__typeof (o), struct anv_image_view*), \
253 VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT, \
254 __builtin_choose_expr ( \
255 __builtin_types_compatible_p (__typeof (o), struct anv_shader_module*), \
256 VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT, \
257 __builtin_choose_expr ( \
258 __builtin_types_compatible_p (__typeof (o), struct anv_pipeline_cache*), \
259 VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_CACHE_EXT, \
260 __builtin_choose_expr ( \
261 __builtin_types_compatible_p (__typeof (o), struct anv_pipeline_layout*), \
262 VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT, \
263 __builtin_choose_expr ( \
264 __builtin_types_compatible_p (__typeof (o), struct anv_render_pass*), \
265 VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, \
266 __builtin_choose_expr ( \
267 __builtin_types_compatible_p (__typeof (o), struct anv_pipeline*), \
268 VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT, \
269 __builtin_choose_expr ( \
270 __builtin_types_compatible_p (__typeof (o), struct anv_descriptor_set_layout*), \
271 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT, \
272 __builtin_choose_expr ( \
273 __builtin_types_compatible_p (__typeof (o), struct anv_sampler*), \
274 VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT, \
275 __builtin_choose_expr ( \
276 __builtin_types_compatible_p (__typeof (o), struct anv_descriptor_pool*), \
277 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT, \
278 __builtin_choose_expr ( \
279 __builtin_types_compatible_p (__typeof (o), struct anv_descriptor_set*), \
280 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, \
281 __builtin_choose_expr ( \
282 __builtin_types_compatible_p (__typeof (o), struct anv_framebuffer*), \
283 VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT, \
284 __builtin_choose_expr ( \
285 __builtin_types_compatible_p (__typeof (o), struct anv_cmd_pool*), \
286 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT, \
287 __builtin_choose_expr ( \
288 __builtin_types_compatible_p (__typeof (o), struct anv_surface*), \
289 VK_DEBUG_REPORT_OBJECT_TYPE_SURFACE_KHR_EXT, \
290 __builtin_choose_expr ( \
291 __builtin_types_compatible_p (__typeof (o), struct wsi_swapchain*), \
292 VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT, \
293 __builtin_choose_expr ( \
294 __builtin_types_compatible_p (__typeof (o), struct anv_debug_callback*), \
295 VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_CALLBACK_EXT_EXT, \
296 __builtin_choose_expr ( \
297 __builtin_types_compatible_p (__typeof (o), void*), \
298 VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, \
299 /* The void expression results in a compile-time error \
300 when assigning the result to something. */ \
301 (void)0)))))))))))))))))))))))))))))))
303 /* Whenever we generate an error, pass it through this function. Useful for
304 * debugging, where we can break on it. Only call at error site, not when
305 * propagating errors. Might be useful to plug in a stack trace here.
308 VkResult
__vk_errorf(struct anv_instance
*instance
, const void *object
,
309 VkDebugReportObjectTypeEXT type
, VkResult error
,
310 const char *file
, int line
, const char *format
, ...);
313 #define vk_error(error) __vk_errorf(NULL, NULL,\
314 VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,\
315 error, __FILE__, __LINE__, NULL);
316 #define vk_errorf(instance, obj, error, format, ...)\
317 __vk_errorf(instance, obj, REPORT_OBJECT_TYPE(obj), error,\
318 __FILE__, __LINE__, format, ## __VA_ARGS__);
320 #define vk_error(error) error
321 #define vk_errorf(instance, obj, error, format, ...) error
325 * Warn on ignored extension structs.
327 * The Vulkan spec requires us to ignore unsupported or unknown structs in
328 * a pNext chain. In debug mode, emitting warnings for ignored structs may
329 * help us discover structs that we should not have ignored.
332 * From the Vulkan 1.0.38 spec:
334 * Any component of the implementation (the loader, any enabled layers,
335 * and drivers) must skip over, without processing (other than reading the
336 * sType and pNext members) any chained structures with sType values not
337 * defined by extensions supported by that component.
339 #define anv_debug_ignored_stype(sType) \
340 intel_logd("%s: ignored VkStructureType %u\n", __func__, (sType))
342 void __anv_perf_warn(struct anv_instance
*instance
, const void *object
,
343 VkDebugReportObjectTypeEXT type
, const char *file
,
344 int line
, const char *format
, ...)
345 anv_printflike(6, 7);
346 void anv_loge(const char *format
, ...) anv_printflike(1, 2);
347 void anv_loge_v(const char *format
, va_list va
);
349 void anv_debug_report(struct anv_instance
*instance
,
350 VkDebugReportFlagsEXT flags
,
351 VkDebugReportObjectTypeEXT object_type
,
355 const char* pLayerPrefix
,
356 const char *pMessage
);
359 * Print a FINISHME message, including its source location.
361 #define anv_finishme(format, ...) \
363 static bool reported = false; \
365 intel_logw("%s:%d: FINISHME: " format, __FILE__, __LINE__, \
372 * Print a perf warning message. Set INTEL_DEBUG=perf to see these.
374 #define anv_perf_warn(instance, obj, format, ...) \
376 static bool reported = false; \
377 if (!reported && unlikely(INTEL_DEBUG & DEBUG_PERF)) { \
378 __anv_perf_warn(instance, obj, REPORT_OBJECT_TYPE(obj), __FILE__, __LINE__,\
379 format, ##__VA_ARGS__); \
384 /* A non-fatal assert. Useful for debugging. */
386 #define anv_assert(x) ({ \
387 if (unlikely(!(x))) \
388 intel_loge("%s:%d ASSERT: %s", __FILE__, __LINE__, #x); \
391 #define anv_assert(x)
394 /* A multi-pointer allocator
396 * When copying data structures from the user (such as a render pass), it's
397 * common to need to allocate data for a bunch of different things. Instead
398 * of doing several allocations and having to handle all of the error checking
399 * that entails, it can be easier to do a single allocation. This struct
400 * helps facilitate that. The intended usage looks like this:
403 * anv_multialloc_add(&ma, &main_ptr, 1);
404 * anv_multialloc_add(&ma, &substruct1, substruct1Count);
405 * anv_multialloc_add(&ma, &substruct2, substruct2Count);
407 * if (!anv_multialloc_alloc(&ma, pAllocator, VK_ALLOCATION_SCOPE_FOO))
408 * return vk_error(VK_ERROR_OUT_OF_HOST_MEORY);
410 struct anv_multialloc
{
418 #define ANV_MULTIALLOC_INIT \
419 ((struct anv_multialloc) { 0, })
421 #define ANV_MULTIALLOC(_name) \
422 struct anv_multialloc _name = ANV_MULTIALLOC_INIT
424 __attribute__((always_inline
))
426 _anv_multialloc_add(struct anv_multialloc
*ma
,
427 void **ptr
, size_t size
, size_t align
)
429 size_t offset
= align_u64(ma
->size
, align
);
430 ma
->size
= offset
+ size
;
431 ma
->align
= MAX2(ma
->align
, align
);
433 /* Store the offset in the pointer. */
434 *ptr
= (void *)(uintptr_t)offset
;
436 assert(ma
->ptr_count
< ARRAY_SIZE(ma
->ptrs
));
437 ma
->ptrs
[ma
->ptr_count
++] = ptr
;
440 #define anv_multialloc_add_size(_ma, _ptr, _size) \
441 _anv_multialloc_add((_ma), (void **)(_ptr), (_size), __alignof__(**(_ptr)))
443 #define anv_multialloc_add(_ma, _ptr, _count) \
444 anv_multialloc_add_size(_ma, _ptr, (_count) * sizeof(**(_ptr)));
446 __attribute__((always_inline
))
448 anv_multialloc_alloc(struct anv_multialloc
*ma
,
449 const VkAllocationCallbacks
*alloc
,
450 VkSystemAllocationScope scope
)
452 void *ptr
= vk_alloc(alloc
, ma
->size
, ma
->align
, scope
);
456 /* Fill out each of the pointers with their final value.
458 * for (uint32_t i = 0; i < ma->ptr_count; i++)
459 * *ma->ptrs[i] = ptr + (uintptr_t)*ma->ptrs[i];
461 * Unfortunately, even though ma->ptr_count is basically guaranteed to be a
462 * constant, GCC is incapable of figuring this out and unrolling the loop
463 * so we have to give it a little help.
465 STATIC_ASSERT(ARRAY_SIZE(ma
->ptrs
) == 8);
466 #define _ANV_MULTIALLOC_UPDATE_POINTER(_i) \
467 if ((_i) < ma->ptr_count) \
468 *ma->ptrs[_i] = ptr + (uintptr_t)*ma->ptrs[_i]
469 _ANV_MULTIALLOC_UPDATE_POINTER(0);
470 _ANV_MULTIALLOC_UPDATE_POINTER(1);
471 _ANV_MULTIALLOC_UPDATE_POINTER(2);
472 _ANV_MULTIALLOC_UPDATE_POINTER(3);
473 _ANV_MULTIALLOC_UPDATE_POINTER(4);
474 _ANV_MULTIALLOC_UPDATE_POINTER(5);
475 _ANV_MULTIALLOC_UPDATE_POINTER(6);
476 _ANV_MULTIALLOC_UPDATE_POINTER(7);
477 #undef _ANV_MULTIALLOC_UPDATE_POINTER
482 __attribute__((always_inline
))
484 anv_multialloc_alloc2(struct anv_multialloc
*ma
,
485 const VkAllocationCallbacks
*parent_alloc
,
486 const VkAllocationCallbacks
*alloc
,
487 VkSystemAllocationScope scope
)
489 return anv_multialloc_alloc(ma
, alloc
? alloc
: parent_alloc
, scope
);
495 /* Index into the current validation list. This is used by the
496 * validation list building alrogithm to track which buffers are already
497 * in the validation list so that we can ensure uniqueness.
501 /* Last known offset. This value is provided by the kernel when we
502 * execbuf and is used as the presumed offset for the next bunch of
510 /** Flags to pass to the kernel through drm_i915_exec_object2::flags */
515 anv_bo_init(struct anv_bo
*bo
, uint32_t gem_handle
, uint64_t size
)
517 bo
->gem_handle
= gem_handle
;
525 /* Represents a lock-free linked list of "free" things. This is used by
526 * both the block pool and the state pools. Unfortunately, in order to
527 * solve the ABA problem, we can't use a single uint32_t head.
529 union anv_free_list
{
533 /* A simple count that is incremented every time the head changes. */
539 #define ANV_FREE_LIST_EMPTY ((union anv_free_list) { { 1, 0 } })
541 struct anv_block_state
{
551 struct anv_block_pool
{
552 struct anv_device
*device
;
558 /* The offset from the start of the bo to the "center" of the block
559 * pool. Pointers to allocated blocks are given by
560 * bo.map + center_bo_offset + offsets.
562 uint32_t center_bo_offset
;
564 /* Current memory map of the block pool. This pointer may or may not
565 * point to the actual beginning of the block pool memory. If
566 * anv_block_pool_alloc_back has ever been called, then this pointer
567 * will point to the "center" position of the buffer and all offsets
568 * (negative or positive) given out by the block pool alloc functions
569 * will be valid relative to this pointer.
571 * In particular, map == bo.map + center_offset
577 * Array of mmaps and gem handles owned by the block pool, reclaimed when
578 * the block pool is destroyed.
580 struct u_vector mmap_cleanups
;
582 struct anv_block_state state
;
584 struct anv_block_state back_state
;
587 /* Block pools are backed by a fixed-size 1GB memfd */
588 #define BLOCK_POOL_MEMFD_SIZE (1ul << 30)
590 /* The center of the block pool is also the middle of the memfd. This may
591 * change in the future if we decide differently for some reason.
593 #define BLOCK_POOL_MEMFD_CENTER (BLOCK_POOL_MEMFD_SIZE / 2)
595 static inline uint32_t
596 anv_block_pool_size(struct anv_block_pool
*pool
)
598 return pool
->state
.end
+ pool
->back_state
.end
;
607 #define ANV_STATE_NULL ((struct anv_state) { .alloc_size = 0 })
609 struct anv_fixed_size_state_pool
{
610 union anv_free_list free_list
;
611 struct anv_block_state block
;
614 #define ANV_MIN_STATE_SIZE_LOG2 6
615 #define ANV_MAX_STATE_SIZE_LOG2 20
617 #define ANV_STATE_BUCKETS (ANV_MAX_STATE_SIZE_LOG2 - ANV_MIN_STATE_SIZE_LOG2 + 1)
619 struct anv_state_pool
{
620 struct anv_block_pool block_pool
;
622 /* The size of blocks which will be allocated from the block pool */
625 /** Free list for "back" allocations */
626 union anv_free_list back_alloc_free_list
;
628 struct anv_fixed_size_state_pool buckets
[ANV_STATE_BUCKETS
];
631 struct anv_state_stream_block
;
633 struct anv_state_stream
{
634 struct anv_state_pool
*state_pool
;
636 /* The size of blocks to allocate from the state pool */
639 /* Current block we're allocating from */
640 struct anv_state block
;
642 /* Offset into the current block at which to allocate the next state */
645 /* List of all blocks allocated from this pool */
646 struct anv_state_stream_block
*block_list
;
649 /* The block_pool functions exported for testing only. The block pool should
650 * only be used via a state pool (see below).
652 VkResult
anv_block_pool_init(struct anv_block_pool
*pool
,
653 struct anv_device
*device
,
654 uint32_t initial_size
,
656 void anv_block_pool_finish(struct anv_block_pool
*pool
);
657 int32_t anv_block_pool_alloc(struct anv_block_pool
*pool
,
658 uint32_t block_size
);
659 int32_t anv_block_pool_alloc_back(struct anv_block_pool
*pool
,
660 uint32_t block_size
);
662 VkResult
anv_state_pool_init(struct anv_state_pool
*pool
,
663 struct anv_device
*device
,
666 void anv_state_pool_finish(struct anv_state_pool
*pool
);
667 struct anv_state
anv_state_pool_alloc(struct anv_state_pool
*pool
,
668 uint32_t state_size
, uint32_t alignment
);
669 struct anv_state
anv_state_pool_alloc_back(struct anv_state_pool
*pool
);
670 void anv_state_pool_free(struct anv_state_pool
*pool
, struct anv_state state
);
671 void anv_state_stream_init(struct anv_state_stream
*stream
,
672 struct anv_state_pool
*state_pool
,
673 uint32_t block_size
);
674 void anv_state_stream_finish(struct anv_state_stream
*stream
);
675 struct anv_state
anv_state_stream_alloc(struct anv_state_stream
*stream
,
676 uint32_t size
, uint32_t alignment
);
679 * Implements a pool of re-usable BOs. The interface is identical to that
680 * of block_pool except that each block is its own BO.
683 struct anv_device
*device
;
690 void anv_bo_pool_init(struct anv_bo_pool
*pool
, struct anv_device
*device
,
692 void anv_bo_pool_finish(struct anv_bo_pool
*pool
);
693 VkResult
anv_bo_pool_alloc(struct anv_bo_pool
*pool
, struct anv_bo
*bo
,
695 void anv_bo_pool_free(struct anv_bo_pool
*pool
, const struct anv_bo
*bo
);
697 struct anv_scratch_bo
{
702 struct anv_scratch_pool
{
703 /* Indexed by Per-Thread Scratch Space number (the hardware value) and stage */
704 struct anv_scratch_bo bos
[16][MESA_SHADER_STAGES
];
707 void anv_scratch_pool_init(struct anv_device
*device
,
708 struct anv_scratch_pool
*pool
);
709 void anv_scratch_pool_finish(struct anv_device
*device
,
710 struct anv_scratch_pool
*pool
);
711 struct anv_bo
*anv_scratch_pool_alloc(struct anv_device
*device
,
712 struct anv_scratch_pool
*pool
,
713 gl_shader_stage stage
,
714 unsigned per_thread_scratch
);
716 /** Implements a BO cache that ensures a 1-1 mapping of GEM BOs to anv_bos */
717 struct anv_bo_cache
{
718 struct hash_table
*bo_map
;
719 pthread_mutex_t mutex
;
722 VkResult
anv_bo_cache_init(struct anv_bo_cache
*cache
);
723 void anv_bo_cache_finish(struct anv_bo_cache
*cache
);
724 VkResult
anv_bo_cache_alloc(struct anv_device
*device
,
725 struct anv_bo_cache
*cache
,
726 uint64_t size
, struct anv_bo
**bo
);
727 VkResult
anv_bo_cache_import(struct anv_device
*device
,
728 struct anv_bo_cache
*cache
,
729 int fd
, struct anv_bo
**bo
);
730 VkResult
anv_bo_cache_export(struct anv_device
*device
,
731 struct anv_bo_cache
*cache
,
732 struct anv_bo
*bo_in
, int *fd_out
);
733 void anv_bo_cache_release(struct anv_device
*device
,
734 struct anv_bo_cache
*cache
,
737 struct anv_memory_type
{
738 /* Standard bits passed on to the client */
739 VkMemoryPropertyFlags propertyFlags
;
742 /* Driver-internal book-keeping */
743 VkBufferUsageFlags valid_buffer_usage
;
746 struct anv_memory_heap
{
747 /* Standard bits passed on to the client */
749 VkMemoryHeapFlags flags
;
751 /* Driver-internal book-keeping */
752 bool supports_48bit_addresses
;
755 struct anv_physical_device
{
756 VK_LOADER_DATA _loader_data
;
758 struct anv_instance
* instance
;
762 struct gen_device_info info
;
763 /** Amount of "GPU memory" we want to advertise
765 * Clearly, this value is bogus since Intel is a UMA architecture. On
766 * gen7 platforms, we are limited by GTT size unless we want to implement
767 * fine-grained tracking and GTT splitting. On Broadwell and above we are
768 * practically unlimited. However, we will never report more than 3/4 of
769 * the total system ram to try and avoid running out of RAM.
771 bool supports_48bit_addresses
;
772 struct brw_compiler
* compiler
;
773 struct isl_device isl_dev
;
774 int cmd_parser_version
;
778 bool has_syncobj_wait
;
781 uint32_t subslice_total
;
785 struct anv_memory_type types
[VK_MAX_MEMORY_TYPES
];
787 struct anv_memory_heap heaps
[VK_MAX_MEMORY_HEAPS
];
790 uint8_t pipeline_cache_uuid
[VK_UUID_SIZE
];
791 uint8_t driver_uuid
[VK_UUID_SIZE
];
792 uint8_t device_uuid
[VK_UUID_SIZE
];
794 struct wsi_device wsi_device
;
798 struct anv_debug_report_callback
{
799 /* Link in the 'callbacks' list in anv_instance struct. */
800 struct list_head link
;
801 VkDebugReportFlagsEXT flags
;
802 PFN_vkDebugReportCallbackEXT callback
;
806 struct anv_instance
{
807 VK_LOADER_DATA _loader_data
;
809 VkAllocationCallbacks alloc
;
812 int physicalDeviceCount
;
813 struct anv_physical_device physicalDevice
;
815 /* VK_EXT_debug_report debug callbacks */
816 pthread_mutex_t callbacks_mutex
;
817 struct list_head callbacks
;
818 struct anv_debug_report_callback destroy_debug_cb
;
821 VkResult
anv_init_wsi(struct anv_physical_device
*physical_device
);
822 void anv_finish_wsi(struct anv_physical_device
*physical_device
);
824 bool anv_instance_extension_supported(const char *name
);
825 uint32_t anv_physical_device_api_version(struct anv_physical_device
*dev
);
826 bool anv_physical_device_extension_supported(struct anv_physical_device
*dev
,
830 VK_LOADER_DATA _loader_data
;
832 struct anv_device
* device
;
834 struct anv_state_pool
* pool
;
837 struct anv_pipeline_cache
{
838 struct anv_device
* device
;
839 pthread_mutex_t mutex
;
841 struct hash_table
* cache
;
844 struct anv_pipeline_bind_map
;
846 void anv_pipeline_cache_init(struct anv_pipeline_cache
*cache
,
847 struct anv_device
*device
,
849 void anv_pipeline_cache_finish(struct anv_pipeline_cache
*cache
);
851 struct anv_shader_bin
*
852 anv_pipeline_cache_search(struct anv_pipeline_cache
*cache
,
853 const void *key
, uint32_t key_size
);
854 struct anv_shader_bin
*
855 anv_pipeline_cache_upload_kernel(struct anv_pipeline_cache
*cache
,
856 const void *key_data
, uint32_t key_size
,
857 const void *kernel_data
, uint32_t kernel_size
,
858 const struct brw_stage_prog_data
*prog_data
,
859 uint32_t prog_data_size
,
860 const struct anv_pipeline_bind_map
*bind_map
);
863 VK_LOADER_DATA _loader_data
;
865 VkAllocationCallbacks alloc
;
867 struct anv_instance
* instance
;
869 struct gen_device_info info
;
870 struct isl_device isl_dev
;
873 bool can_chain_batches
;
874 bool robust_buffer_access
;
876 struct anv_bo_pool batch_bo_pool
;
878 struct anv_bo_cache bo_cache
;
880 struct anv_state_pool dynamic_state_pool
;
881 struct anv_state_pool instruction_state_pool
;
882 struct anv_state_pool surface_state_pool
;
884 struct anv_bo workaround_bo
;
885 struct anv_bo trivial_batch_bo
;
887 struct anv_pipeline_cache blorp_shader_cache
;
888 struct blorp_context blorp
;
890 struct anv_state border_colors
;
892 struct anv_queue queue
;
894 struct anv_scratch_pool scratch_pool
;
896 uint32_t default_mocs
;
898 pthread_mutex_t mutex
;
899 pthread_cond_t queue_submit
;
904 anv_state_flush(struct anv_device
*device
, struct anv_state state
)
906 if (device
->info
.has_llc
)
909 gen_flush_range(state
.map
, state
.alloc_size
);
912 void anv_device_init_blorp(struct anv_device
*device
);
913 void anv_device_finish_blorp(struct anv_device
*device
);
915 VkResult
anv_device_execbuf(struct anv_device
*device
,
916 struct drm_i915_gem_execbuffer2
*execbuf
,
917 struct anv_bo
**execbuf_bos
);
918 VkResult
anv_device_query_status(struct anv_device
*device
);
919 VkResult
anv_device_bo_busy(struct anv_device
*device
, struct anv_bo
*bo
);
920 VkResult
anv_device_wait(struct anv_device
*device
, struct anv_bo
*bo
,
923 void* anv_gem_mmap(struct anv_device
*device
,
924 uint32_t gem_handle
, uint64_t offset
, uint64_t size
, uint32_t flags
);
925 void anv_gem_munmap(void *p
, uint64_t size
);
926 uint32_t anv_gem_create(struct anv_device
*device
, uint64_t size
);
927 void anv_gem_close(struct anv_device
*device
, uint32_t gem_handle
);
928 uint32_t anv_gem_userptr(struct anv_device
*device
, void *mem
, size_t size
);
929 int anv_gem_busy(struct anv_device
*device
, uint32_t gem_handle
);
930 int anv_gem_wait(struct anv_device
*device
, uint32_t gem_handle
, int64_t *timeout_ns
);
931 int anv_gem_execbuffer(struct anv_device
*device
,
932 struct drm_i915_gem_execbuffer2
*execbuf
);
933 int anv_gem_set_tiling(struct anv_device
*device
, uint32_t gem_handle
,
934 uint32_t stride
, uint32_t tiling
);
935 int anv_gem_create_context(struct anv_device
*device
);
936 int anv_gem_destroy_context(struct anv_device
*device
, int context
);
937 int anv_gem_get_context_param(int fd
, int context
, uint32_t param
,
939 int anv_gem_get_param(int fd
, uint32_t param
);
940 int anv_gem_get_tiling(struct anv_device
*device
, uint32_t gem_handle
);
941 bool anv_gem_get_bit6_swizzle(int fd
, uint32_t tiling
);
942 int anv_gem_get_aperture(int fd
, uint64_t *size
);
943 bool anv_gem_supports_48b_addresses(int fd
);
944 int anv_gem_gpu_get_reset_stats(struct anv_device
*device
,
945 uint32_t *active
, uint32_t *pending
);
946 int anv_gem_handle_to_fd(struct anv_device
*device
, uint32_t gem_handle
);
947 uint32_t anv_gem_fd_to_handle(struct anv_device
*device
, int fd
);
948 int anv_gem_set_caching(struct anv_device
*device
, uint32_t gem_handle
, uint32_t caching
);
949 int anv_gem_set_domain(struct anv_device
*device
, uint32_t gem_handle
,
950 uint32_t read_domains
, uint32_t write_domain
);
951 int anv_gem_sync_file_merge(struct anv_device
*device
, int fd1
, int fd2
);
952 uint32_t anv_gem_syncobj_create(struct anv_device
*device
, uint32_t flags
);
953 void anv_gem_syncobj_destroy(struct anv_device
*device
, uint32_t handle
);
954 int anv_gem_syncobj_handle_to_fd(struct anv_device
*device
, uint32_t handle
);
955 uint32_t anv_gem_syncobj_fd_to_handle(struct anv_device
*device
, int fd
);
956 int anv_gem_syncobj_export_sync_file(struct anv_device
*device
,
958 int anv_gem_syncobj_import_sync_file(struct anv_device
*device
,
959 uint32_t handle
, int fd
);
960 void anv_gem_syncobj_reset(struct anv_device
*device
, uint32_t handle
);
961 bool anv_gem_supports_syncobj_wait(int fd
);
962 int anv_gem_syncobj_wait(struct anv_device
*device
,
963 uint32_t *handles
, uint32_t num_handles
,
964 int64_t abs_timeout_ns
, bool wait_all
);
966 VkResult
anv_bo_init_new(struct anv_bo
*bo
, struct anv_device
*device
, uint64_t size
);
968 struct anv_reloc_list
{
970 uint32_t array_length
;
971 struct drm_i915_gem_relocation_entry
* relocs
;
972 struct anv_bo
** reloc_bos
;
975 VkResult
anv_reloc_list_init(struct anv_reloc_list
*list
,
976 const VkAllocationCallbacks
*alloc
);
977 void anv_reloc_list_finish(struct anv_reloc_list
*list
,
978 const VkAllocationCallbacks
*alloc
);
980 VkResult
anv_reloc_list_add(struct anv_reloc_list
*list
,
981 const VkAllocationCallbacks
*alloc
,
982 uint32_t offset
, struct anv_bo
*target_bo
,
985 struct anv_batch_bo
{
986 /* Link in the anv_cmd_buffer.owned_batch_bos list */
987 struct list_head link
;
991 /* Bytes actually consumed in this batch BO */
994 struct anv_reloc_list relocs
;
998 const VkAllocationCallbacks
* alloc
;
1004 struct anv_reloc_list
* relocs
;
1006 /* This callback is called (with the associated user data) in the event
1007 * that the batch runs out of space.
1009 VkResult (*extend_cb
)(struct anv_batch
*, void *);
1013 * Current error status of the command buffer. Used to track inconsistent
1014 * or incomplete command buffer states that are the consequence of run-time
1015 * errors such as out of memory scenarios. We want to track this in the
1016 * batch because the command buffer object is not visible to some parts
1022 void *anv_batch_emit_dwords(struct anv_batch
*batch
, int num_dwords
);
1023 void anv_batch_emit_batch(struct anv_batch
*batch
, struct anv_batch
*other
);
1024 uint64_t anv_batch_emit_reloc(struct anv_batch
*batch
,
1025 void *location
, struct anv_bo
*bo
, uint32_t offset
);
1026 VkResult
anv_device_submit_simple_batch(struct anv_device
*device
,
1027 struct anv_batch
*batch
);
1029 static inline VkResult
1030 anv_batch_set_error(struct anv_batch
*batch
, VkResult error
)
1032 assert(error
!= VK_SUCCESS
);
1033 if (batch
->status
== VK_SUCCESS
)
1034 batch
->status
= error
;
1035 return batch
->status
;
1039 anv_batch_has_error(struct anv_batch
*batch
)
1041 return batch
->status
!= VK_SUCCESS
;
1044 struct anv_address
{
1049 static inline uint64_t
1050 _anv_combine_address(struct anv_batch
*batch
, void *location
,
1051 const struct anv_address address
, uint32_t delta
)
1053 if (address
.bo
== NULL
) {
1054 return address
.offset
+ delta
;
1056 assert(batch
->start
<= location
&& location
< batch
->end
);
1058 return anv_batch_emit_reloc(batch
, location
, address
.bo
, address
.offset
+ delta
);
1062 #define __gen_address_type struct anv_address
1063 #define __gen_user_data struct anv_batch
1064 #define __gen_combine_address _anv_combine_address
1066 /* Wrapper macros needed to work around preprocessor argument issues. In
1067 * particular, arguments don't get pre-evaluated if they are concatenated.
1068 * This means that, if you pass GENX(3DSTATE_PS) into the emit macro, the
1069 * GENX macro won't get evaluated if the emit macro contains "cmd ## foo".
1070 * We can work around this easily enough with these helpers.
1072 #define __anv_cmd_length(cmd) cmd ## _length
1073 #define __anv_cmd_length_bias(cmd) cmd ## _length_bias
1074 #define __anv_cmd_header(cmd) cmd ## _header
1075 #define __anv_cmd_pack(cmd) cmd ## _pack
1076 #define __anv_reg_num(reg) reg ## _num
1078 #define anv_pack_struct(dst, struc, ...) do { \
1079 struct struc __template = { \
1082 __anv_cmd_pack(struc)(NULL, dst, &__template); \
1083 VG(VALGRIND_CHECK_MEM_IS_DEFINED(dst, __anv_cmd_length(struc) * 4)); \
1086 #define anv_batch_emitn(batch, n, cmd, ...) ({ \
1087 void *__dst = anv_batch_emit_dwords(batch, n); \
1089 struct cmd __template = { \
1090 __anv_cmd_header(cmd), \
1091 .DWordLength = n - __anv_cmd_length_bias(cmd), \
1094 __anv_cmd_pack(cmd)(batch, __dst, &__template); \
1099 #define anv_batch_emit_merge(batch, dwords0, dwords1) \
1103 STATIC_ASSERT(ARRAY_SIZE(dwords0) == ARRAY_SIZE(dwords1)); \
1104 dw = anv_batch_emit_dwords((batch), ARRAY_SIZE(dwords0)); \
1107 for (uint32_t i = 0; i < ARRAY_SIZE(dwords0); i++) \
1108 dw[i] = (dwords0)[i] | (dwords1)[i]; \
1109 VG(VALGRIND_CHECK_MEM_IS_DEFINED(dw, ARRAY_SIZE(dwords0) * 4));\
1112 #define anv_batch_emit(batch, cmd, name) \
1113 for (struct cmd name = { __anv_cmd_header(cmd) }, \
1114 *_dst = anv_batch_emit_dwords(batch, __anv_cmd_length(cmd)); \
1115 __builtin_expect(_dst != NULL, 1); \
1116 ({ __anv_cmd_pack(cmd)(batch, _dst, &name); \
1117 VG(VALGRIND_CHECK_MEM_IS_DEFINED(_dst, __anv_cmd_length(cmd) * 4)); \
1121 #define GEN7_MOCS (struct GEN7_MEMORY_OBJECT_CONTROL_STATE) { \
1122 .GraphicsDataTypeGFDT = 0, \
1123 .LLCCacheabilityControlLLCCC = 0, \
1124 .L3CacheabilityControlL3CC = 1, \
1127 #define GEN75_MOCS (struct GEN75_MEMORY_OBJECT_CONTROL_STATE) { \
1128 .LLCeLLCCacheabilityControlLLCCC = 0, \
1129 .L3CacheabilityControlL3CC = 1, \
1132 #define GEN8_MOCS (struct GEN8_MEMORY_OBJECT_CONTROL_STATE) { \
1133 .MemoryTypeLLCeLLCCacheabilityControl = WB, \
1134 .TargetCache = L3DefertoPATforLLCeLLCselection, \
1135 .AgeforQUADLRU = 0 \
1138 /* Skylake: MOCS is now an index into an array of 62 different caching
1139 * configurations programmed by the kernel.
1142 #define GEN9_MOCS (struct GEN9_MEMORY_OBJECT_CONTROL_STATE) { \
1143 /* TC=LLC/eLLC, LeCC=WB, LRUM=3, L3CC=WB */ \
1144 .IndextoMOCSTables = 2 \
1147 #define GEN9_MOCS_PTE { \
1148 /* TC=LLC/eLLC, LeCC=WB, LRUM=3, L3CC=WB */ \
1149 .IndextoMOCSTables = 1 \
1152 /* Cannonlake MOCS defines are duplicates of Skylake MOCS defines. */
1153 #define GEN10_MOCS (struct GEN10_MEMORY_OBJECT_CONTROL_STATE) { \
1154 /* TC=LLC/eLLC, LeCC=WB, LRUM=3, L3CC=WB */ \
1155 .IndextoMOCSTables = 2 \
1158 #define GEN10_MOCS_PTE { \
1159 /* TC=LLC/eLLC, LeCC=WB, LRUM=3, L3CC=WB */ \
1160 .IndextoMOCSTables = 1 \
1163 struct anv_device_memory
{
1165 struct anv_memory_type
* type
;
1166 VkDeviceSize map_size
;
1171 * Header for Vertex URB Entry (VUE)
1173 struct anv_vue_header
{
1175 uint32_t RTAIndex
; /* RenderTargetArrayIndex */
1176 uint32_t ViewportIndex
;
1180 struct anv_descriptor_set_binding_layout
{
1182 /* The type of the descriptors in this binding */
1183 VkDescriptorType type
;
1186 /* Number of array elements in this binding */
1187 uint16_t array_size
;
1189 /* Index into the flattend descriptor set */
1190 uint16_t descriptor_index
;
1192 /* Index into the dynamic state array for a dynamic buffer */
1193 int16_t dynamic_offset_index
;
1195 /* Index into the descriptor set buffer views */
1196 int16_t buffer_index
;
1199 /* Index into the binding table for the associated surface */
1200 int16_t surface_index
;
1202 /* Index into the sampler table for the associated sampler */
1203 int16_t sampler_index
;
1205 /* Index into the image table for the associated image */
1206 int16_t image_index
;
1207 } stage
[MESA_SHADER_STAGES
];
1209 /* Immutable samplers (or NULL if no immutable samplers) */
1210 struct anv_sampler
**immutable_samplers
;
1213 struct anv_descriptor_set_layout
{
1214 /* Number of bindings in this descriptor set */
1215 uint16_t binding_count
;
1217 /* Total size of the descriptor set with room for all array entries */
1220 /* Shader stages affected by this descriptor set */
1221 uint16_t shader_stages
;
1223 /* Number of buffers in this descriptor set */
1224 uint16_t buffer_count
;
1226 /* Number of dynamic offsets used by this descriptor set */
1227 uint16_t dynamic_offset_count
;
1229 /* Bindings in this descriptor set */
1230 struct anv_descriptor_set_binding_layout binding
[0];
1233 struct anv_descriptor
{
1234 VkDescriptorType type
;
1238 VkImageLayout layout
;
1239 struct anv_image_view
*image_view
;
1240 struct anv_sampler
*sampler
;
1244 struct anv_buffer
*buffer
;
1249 struct anv_buffer_view
*buffer_view
;
1253 struct anv_descriptor_set
{
1254 const struct anv_descriptor_set_layout
*layout
;
1256 uint32_t buffer_count
;
1257 struct anv_buffer_view
*buffer_views
;
1258 struct anv_descriptor descriptors
[0];
1261 struct anv_buffer_view
{
1262 enum isl_format format
; /**< VkBufferViewCreateInfo::format */
1264 uint32_t offset
; /**< Offset into bo. */
1265 uint64_t range
; /**< VkBufferViewCreateInfo::range */
1267 struct anv_state surface_state
;
1268 struct anv_state storage_surface_state
;
1269 struct anv_state writeonly_storage_surface_state
;
1271 struct brw_image_param storage_image_param
;
1274 struct anv_push_descriptor_set
{
1275 struct anv_descriptor_set set
;
1277 /* Put this field right behind anv_descriptor_set so it fills up the
1278 * descriptors[0] field. */
1279 struct anv_descriptor descriptors
[MAX_PUSH_DESCRIPTORS
];
1280 struct anv_buffer_view buffer_views
[MAX_PUSH_DESCRIPTORS
];
1283 struct anv_descriptor_pool
{
1288 struct anv_state_stream surface_state_stream
;
1289 void *surface_state_free_list
;
1294 enum anv_descriptor_template_entry_type
{
1295 ANV_DESCRIPTOR_TEMPLATE_ENTRY_TYPE_IMAGE
,
1296 ANV_DESCRIPTOR_TEMPLATE_ENTRY_TYPE_BUFFER
,
1297 ANV_DESCRIPTOR_TEMPLATE_ENTRY_TYPE_BUFFER_VIEW
1300 struct anv_descriptor_template_entry
{
1301 /* The type of descriptor in this entry */
1302 VkDescriptorType type
;
1304 /* Binding in the descriptor set */
1307 /* Offset at which to write into the descriptor set binding */
1308 uint32_t array_element
;
1310 /* Number of elements to write into the descriptor set binding */
1311 uint32_t array_count
;
1313 /* Offset into the user provided data */
1316 /* Stride between elements into the user provided data */
1320 struct anv_descriptor_update_template
{
1321 /* The descriptor set this template corresponds to. This value is only
1322 * valid if the template was created with the templateType
1323 * VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET_KHR.
1327 /* Number of entries in this template */
1328 uint32_t entry_count
;
1330 /* Entries of the template */
1331 struct anv_descriptor_template_entry entries
[0];
1335 anv_descriptor_set_binding_layout_get_hw_size(const struct anv_descriptor_set_binding_layout
*binding
);
1338 anv_descriptor_set_layout_size(const struct anv_descriptor_set_layout
*layout
);
1341 anv_descriptor_set_write_image_view(struct anv_descriptor_set
*set
,
1342 const struct gen_device_info
* const devinfo
,
1343 const VkDescriptorImageInfo
* const info
,
1344 VkDescriptorType type
,
1349 anv_descriptor_set_write_buffer_view(struct anv_descriptor_set
*set
,
1350 VkDescriptorType type
,
1351 struct anv_buffer_view
*buffer_view
,
1356 anv_descriptor_set_write_buffer(struct anv_descriptor_set
*set
,
1357 struct anv_device
*device
,
1358 struct anv_state_stream
*alloc_stream
,
1359 VkDescriptorType type
,
1360 struct anv_buffer
*buffer
,
1363 VkDeviceSize offset
,
1364 VkDeviceSize range
);
1367 anv_descriptor_set_write_template(struct anv_descriptor_set
*set
,
1368 struct anv_device
*device
,
1369 struct anv_state_stream
*alloc_stream
,
1370 const struct anv_descriptor_update_template
*template,
1374 anv_descriptor_set_create(struct anv_device
*device
,
1375 struct anv_descriptor_pool
*pool
,
1376 const struct anv_descriptor_set_layout
*layout
,
1377 struct anv_descriptor_set
**out_set
);
1380 anv_descriptor_set_destroy(struct anv_device
*device
,
1381 struct anv_descriptor_pool
*pool
,
1382 struct anv_descriptor_set
*set
);
1384 #define ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS UINT8_MAX
1386 struct anv_pipeline_binding
{
1387 /* The descriptor set this surface corresponds to. The special value of
1388 * ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS indicates that the offset refers
1389 * to a color attachment and not a regular descriptor.
1393 /* Binding in the descriptor set */
1396 /* Index in the binding */
1399 /* Plane in the binding index */
1402 /* Input attachment index (relative to the subpass) */
1403 uint8_t input_attachment_index
;
1405 /* For a storage image, whether it is write-only */
1409 struct anv_pipeline_layout
{
1411 struct anv_descriptor_set_layout
*layout
;
1412 uint32_t dynamic_offset_start
;
1418 bool has_dynamic_offsets
;
1419 } stage
[MESA_SHADER_STAGES
];
1421 unsigned char sha1
[20];
1425 struct anv_device
* device
;
1428 VkBufferUsageFlags usage
;
1430 /* Set when bound */
1432 VkDeviceSize offset
;
1435 static inline uint64_t
1436 anv_buffer_get_range(struct anv_buffer
*buffer
, uint64_t offset
, uint64_t range
)
1438 assert(offset
<= buffer
->size
);
1439 if (range
== VK_WHOLE_SIZE
) {
1440 return buffer
->size
- offset
;
1442 assert(range
<= buffer
->size
);
1447 enum anv_cmd_dirty_bits
{
1448 ANV_CMD_DIRTY_DYNAMIC_VIEWPORT
= 1 << 0, /* VK_DYNAMIC_STATE_VIEWPORT */
1449 ANV_CMD_DIRTY_DYNAMIC_SCISSOR
= 1 << 1, /* VK_DYNAMIC_STATE_SCISSOR */
1450 ANV_CMD_DIRTY_DYNAMIC_LINE_WIDTH
= 1 << 2, /* VK_DYNAMIC_STATE_LINE_WIDTH */
1451 ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS
= 1 << 3, /* VK_DYNAMIC_STATE_DEPTH_BIAS */
1452 ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS
= 1 << 4, /* VK_DYNAMIC_STATE_BLEND_CONSTANTS */
1453 ANV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS
= 1 << 5, /* VK_DYNAMIC_STATE_DEPTH_BOUNDS */
1454 ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK
= 1 << 6, /* VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK */
1455 ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK
= 1 << 7, /* VK_DYNAMIC_STATE_STENCIL_WRITE_MASK */
1456 ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE
= 1 << 8, /* VK_DYNAMIC_STATE_STENCIL_REFERENCE */
1457 ANV_CMD_DIRTY_DYNAMIC_ALL
= (1 << 9) - 1,
1458 ANV_CMD_DIRTY_PIPELINE
= 1 << 9,
1459 ANV_CMD_DIRTY_INDEX_BUFFER
= 1 << 10,
1460 ANV_CMD_DIRTY_RENDER_TARGETS
= 1 << 11,
1462 typedef uint32_t anv_cmd_dirty_mask_t
;
1464 enum anv_pipe_bits
{
1465 ANV_PIPE_DEPTH_CACHE_FLUSH_BIT
= (1 << 0),
1466 ANV_PIPE_STALL_AT_SCOREBOARD_BIT
= (1 << 1),
1467 ANV_PIPE_STATE_CACHE_INVALIDATE_BIT
= (1 << 2),
1468 ANV_PIPE_CONSTANT_CACHE_INVALIDATE_BIT
= (1 << 3),
1469 ANV_PIPE_VF_CACHE_INVALIDATE_BIT
= (1 << 4),
1470 ANV_PIPE_DATA_CACHE_FLUSH_BIT
= (1 << 5),
1471 ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT
= (1 << 10),
1472 ANV_PIPE_INSTRUCTION_CACHE_INVALIDATE_BIT
= (1 << 11),
1473 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT
= (1 << 12),
1474 ANV_PIPE_DEPTH_STALL_BIT
= (1 << 13),
1475 ANV_PIPE_CS_STALL_BIT
= (1 << 20),
1477 /* This bit does not exist directly in PIPE_CONTROL. Instead it means that
1478 * a flush has happened but not a CS stall. The next time we do any sort
1479 * of invalidation we need to insert a CS stall at that time. Otherwise,
1480 * we would have to CS stall on every flush which could be bad.
1482 ANV_PIPE_NEEDS_CS_STALL_BIT
= (1 << 21),
1485 #define ANV_PIPE_FLUSH_BITS ( \
1486 ANV_PIPE_DEPTH_CACHE_FLUSH_BIT | \
1487 ANV_PIPE_DATA_CACHE_FLUSH_BIT | \
1488 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT)
1490 #define ANV_PIPE_STALL_BITS ( \
1491 ANV_PIPE_STALL_AT_SCOREBOARD_BIT | \
1492 ANV_PIPE_DEPTH_STALL_BIT | \
1493 ANV_PIPE_CS_STALL_BIT)
1495 #define ANV_PIPE_INVALIDATE_BITS ( \
1496 ANV_PIPE_STATE_CACHE_INVALIDATE_BIT | \
1497 ANV_PIPE_CONSTANT_CACHE_INVALIDATE_BIT | \
1498 ANV_PIPE_VF_CACHE_INVALIDATE_BIT | \
1499 ANV_PIPE_DATA_CACHE_FLUSH_BIT | \
1500 ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT | \
1501 ANV_PIPE_INSTRUCTION_CACHE_INVALIDATE_BIT)
1503 static inline enum anv_pipe_bits
1504 anv_pipe_flush_bits_for_access_flags(VkAccessFlags flags
)
1506 enum anv_pipe_bits pipe_bits
= 0;
1509 for_each_bit(b
, flags
) {
1510 switch ((VkAccessFlagBits
)(1 << b
)) {
1511 case VK_ACCESS_SHADER_WRITE_BIT
:
1512 pipe_bits
|= ANV_PIPE_DATA_CACHE_FLUSH_BIT
;
1514 case VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT
:
1515 pipe_bits
|= ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT
;
1517 case VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT
:
1518 pipe_bits
|= ANV_PIPE_DEPTH_CACHE_FLUSH_BIT
;
1520 case VK_ACCESS_TRANSFER_WRITE_BIT
:
1521 pipe_bits
|= ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT
;
1522 pipe_bits
|= ANV_PIPE_DEPTH_CACHE_FLUSH_BIT
;
1525 break; /* Nothing to do */
1532 static inline enum anv_pipe_bits
1533 anv_pipe_invalidate_bits_for_access_flags(VkAccessFlags flags
)
1535 enum anv_pipe_bits pipe_bits
= 0;
1538 for_each_bit(b
, flags
) {
1539 switch ((VkAccessFlagBits
)(1 << b
)) {
1540 case VK_ACCESS_INDIRECT_COMMAND_READ_BIT
:
1541 case VK_ACCESS_INDEX_READ_BIT
:
1542 case VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT
:
1543 pipe_bits
|= ANV_PIPE_VF_CACHE_INVALIDATE_BIT
;
1545 case VK_ACCESS_UNIFORM_READ_BIT
:
1546 pipe_bits
|= ANV_PIPE_CONSTANT_CACHE_INVALIDATE_BIT
;
1547 pipe_bits
|= ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT
;
1549 case VK_ACCESS_SHADER_READ_BIT
:
1550 case VK_ACCESS_INPUT_ATTACHMENT_READ_BIT
:
1551 case VK_ACCESS_TRANSFER_READ_BIT
:
1552 pipe_bits
|= ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT
;
1555 break; /* Nothing to do */
1562 #define VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV ( \
1563 VK_IMAGE_ASPECT_COLOR_BIT | \
1564 VK_IMAGE_ASPECT_PLANE_0_BIT_KHR | \
1565 VK_IMAGE_ASPECT_PLANE_1_BIT_KHR | \
1566 VK_IMAGE_ASPECT_PLANE_2_BIT_KHR)
1567 #define VK_IMAGE_ASPECT_PLANES_BITS_ANV ( \
1568 VK_IMAGE_ASPECT_PLANE_0_BIT_KHR | \
1569 VK_IMAGE_ASPECT_PLANE_1_BIT_KHR | \
1570 VK_IMAGE_ASPECT_PLANE_2_BIT_KHR)
1572 struct anv_vertex_binding
{
1573 struct anv_buffer
* buffer
;
1574 VkDeviceSize offset
;
1577 #define ANV_PARAM_PUSH(offset) ((1 << 16) | (uint32_t)(offset))
1578 #define ANV_PARAM_PUSH_OFFSET(param) ((param) & 0xffff)
1580 struct anv_push_constants
{
1581 /* Current allocated size of this push constants data structure.
1582 * Because a decent chunk of it may not be used (images on SKL, for
1583 * instance), we won't actually allocate the entire structure up-front.
1587 /* Push constant data provided by the client through vkPushConstants */
1588 uint8_t client_data
[MAX_PUSH_CONSTANTS_SIZE
];
1590 /* Image data for image_load_store on pre-SKL */
1591 struct brw_image_param images
[MAX_IMAGES
];
1594 struct anv_dynamic_state
{
1597 VkViewport viewports
[MAX_VIEWPORTS
];
1602 VkRect2D scissors
[MAX_SCISSORS
];
1613 float blend_constants
[4];
1623 } stencil_compare_mask
;
1628 } stencil_write_mask
;
1633 } stencil_reference
;
1636 extern const struct anv_dynamic_state default_dynamic_state
;
1638 void anv_dynamic_state_copy(struct anv_dynamic_state
*dest
,
1639 const struct anv_dynamic_state
*src
,
1640 uint32_t copy_mask
);
1642 struct anv_surface_state
{
1643 struct anv_state state
;
1644 /** Address of the surface referred to by this state
1646 * This address is relative to the start of the BO.
1649 /* Address of the aux surface, if any
1651 * This field is 0 if and only if no aux surface exists.
1653 * This address is relative to the start of the BO. On gen7, the bottom 12
1654 * bits of this address include extra aux information.
1656 uint64_t aux_address
;
1660 * Attachment state when recording a renderpass instance.
1662 * The clear value is valid only if there exists a pending clear.
1664 struct anv_attachment_state
{
1665 enum isl_aux_usage aux_usage
;
1666 enum isl_aux_usage input_aux_usage
;
1667 struct anv_surface_state color
;
1668 struct anv_surface_state input
;
1670 VkImageLayout current_layout
;
1671 VkImageAspectFlags pending_clear_aspects
;
1673 VkClearValue clear_value
;
1674 bool clear_color_is_zero_one
;
1675 bool clear_color_is_zero
;
1678 /** State required while building cmd buffer */
1679 struct anv_cmd_state
{
1680 /* PIPELINE_SELECT.PipelineSelection */
1681 uint32_t current_pipeline
;
1682 const struct gen_l3_config
* current_l3_config
;
1684 anv_cmd_dirty_mask_t dirty
;
1685 anv_cmd_dirty_mask_t compute_dirty
;
1686 enum anv_pipe_bits pending_pipe_bits
;
1687 uint32_t num_workgroups_offset
;
1688 struct anv_bo
*num_workgroups_bo
;
1689 VkShaderStageFlags descriptors_dirty
;
1690 VkShaderStageFlags push_constants_dirty
;
1691 uint32_t scratch_size
;
1692 struct anv_pipeline
* pipeline
;
1693 struct anv_pipeline
* compute_pipeline
;
1694 struct anv_framebuffer
* framebuffer
;
1695 struct anv_render_pass
* pass
;
1696 struct anv_subpass
* subpass
;
1697 VkRect2D render_area
;
1698 uint32_t restart_index
;
1699 struct anv_vertex_binding vertex_bindings
[MAX_VBS
];
1700 struct anv_descriptor_set
* descriptors
[MAX_SETS
];
1701 uint32_t dynamic_offsets
[MAX_DYNAMIC_BUFFERS
];
1702 VkShaderStageFlags push_constant_stages
;
1703 struct anv_push_constants
* push_constants
[MESA_SHADER_STAGES
];
1704 struct anv_state binding_tables
[MESA_SHADER_STAGES
];
1705 struct anv_state samplers
[MESA_SHADER_STAGES
];
1706 struct anv_dynamic_state dynamic
;
1709 struct anv_push_descriptor_set
* push_descriptors
[MAX_SETS
];
1712 * Whether or not the gen8 PMA fix is enabled. We ensure that, at the top
1713 * of any command buffer it is disabled by disabling it in EndCommandBuffer
1714 * and before invoking the secondary in ExecuteCommands.
1716 bool pma_fix_enabled
;
1719 * Whether or not we know for certain that HiZ is enabled for the current
1720 * subpass. If, for whatever reason, we are unsure as to whether HiZ is
1721 * enabled or not, this will be false.
1726 * Array length is anv_cmd_state::pass::attachment_count. Array content is
1727 * valid only when recording a render pass instance.
1729 struct anv_attachment_state
* attachments
;
1732 * Surface states for color render targets. These are stored in a single
1733 * flat array. For depth-stencil attachments, the surface state is simply
1736 struct anv_state render_pass_states
;
1739 * A null surface state of the right size to match the framebuffer. This
1740 * is one of the states in render_pass_states.
1742 struct anv_state null_surface_state
;
1745 struct anv_buffer
* index_buffer
;
1746 uint32_t index_type
; /**< 3DSTATE_INDEX_BUFFER.IndexFormat */
1747 uint32_t index_offset
;
1751 struct anv_cmd_pool
{
1752 VkAllocationCallbacks alloc
;
1753 struct list_head cmd_buffers
;
1756 #define ANV_CMD_BUFFER_BATCH_SIZE 8192
1758 enum anv_cmd_buffer_exec_mode
{
1759 ANV_CMD_BUFFER_EXEC_MODE_PRIMARY
,
1760 ANV_CMD_BUFFER_EXEC_MODE_EMIT
,
1761 ANV_CMD_BUFFER_EXEC_MODE_GROW_AND_EMIT
,
1762 ANV_CMD_BUFFER_EXEC_MODE_CHAIN
,
1763 ANV_CMD_BUFFER_EXEC_MODE_COPY_AND_CHAIN
,
1766 struct anv_cmd_buffer
{
1767 VK_LOADER_DATA _loader_data
;
1769 struct anv_device
* device
;
1771 struct anv_cmd_pool
* pool
;
1772 struct list_head pool_link
;
1774 struct anv_batch batch
;
1776 /* Fields required for the actual chain of anv_batch_bo's.
1778 * These fields are initialized by anv_cmd_buffer_init_batch_bo_chain().
1780 struct list_head batch_bos
;
1781 enum anv_cmd_buffer_exec_mode exec_mode
;
1783 /* A vector of anv_batch_bo pointers for every batch or surface buffer
1784 * referenced by this command buffer
1786 * initialized by anv_cmd_buffer_init_batch_bo_chain()
1788 struct u_vector seen_bbos
;
1790 /* A vector of int32_t's for every block of binding tables.
1792 * initialized by anv_cmd_buffer_init_batch_bo_chain()
1794 struct u_vector bt_block_states
;
1797 struct anv_reloc_list surface_relocs
;
1798 /** Last seen surface state block pool center bo offset */
1799 uint32_t last_ss_pool_center
;
1801 /* Serial for tracking buffer completion */
1804 /* Stream objects for storing temporary data */
1805 struct anv_state_stream surface_state_stream
;
1806 struct anv_state_stream dynamic_state_stream
;
1808 VkCommandBufferUsageFlags usage_flags
;
1809 VkCommandBufferLevel level
;
1811 struct anv_cmd_state state
;
1814 VkResult
anv_cmd_buffer_init_batch_bo_chain(struct anv_cmd_buffer
*cmd_buffer
);
1815 void anv_cmd_buffer_fini_batch_bo_chain(struct anv_cmd_buffer
*cmd_buffer
);
1816 void anv_cmd_buffer_reset_batch_bo_chain(struct anv_cmd_buffer
*cmd_buffer
);
1817 void anv_cmd_buffer_end_batch_buffer(struct anv_cmd_buffer
*cmd_buffer
);
1818 void anv_cmd_buffer_add_secondary(struct anv_cmd_buffer
*primary
,
1819 struct anv_cmd_buffer
*secondary
);
1820 void anv_cmd_buffer_prepare_execbuf(struct anv_cmd_buffer
*cmd_buffer
);
1821 VkResult
anv_cmd_buffer_execbuf(struct anv_device
*device
,
1822 struct anv_cmd_buffer
*cmd_buffer
,
1823 const VkSemaphore
*in_semaphores
,
1824 uint32_t num_in_semaphores
,
1825 const VkSemaphore
*out_semaphores
,
1826 uint32_t num_out_semaphores
,
1829 VkResult
anv_cmd_buffer_reset(struct anv_cmd_buffer
*cmd_buffer
);
1832 anv_cmd_buffer_ensure_push_constants_size(struct anv_cmd_buffer
*cmd_buffer
,
1833 gl_shader_stage stage
, uint32_t size
);
1834 #define anv_cmd_buffer_ensure_push_constant_field(cmd_buffer, stage, field) \
1835 anv_cmd_buffer_ensure_push_constants_size(cmd_buffer, stage, \
1836 (offsetof(struct anv_push_constants, field) + \
1837 sizeof(cmd_buffer->state.push_constants[0]->field)))
1839 struct anv_state
anv_cmd_buffer_emit_dynamic(struct anv_cmd_buffer
*cmd_buffer
,
1840 const void *data
, uint32_t size
, uint32_t alignment
);
1841 struct anv_state
anv_cmd_buffer_merge_dynamic(struct anv_cmd_buffer
*cmd_buffer
,
1842 uint32_t *a
, uint32_t *b
,
1843 uint32_t dwords
, uint32_t alignment
);
1846 anv_cmd_buffer_surface_base_address(struct anv_cmd_buffer
*cmd_buffer
);
1848 anv_cmd_buffer_alloc_binding_table(struct anv_cmd_buffer
*cmd_buffer
,
1849 uint32_t entries
, uint32_t *state_offset
);
1851 anv_cmd_buffer_alloc_surface_state(struct anv_cmd_buffer
*cmd_buffer
);
1853 anv_cmd_buffer_alloc_dynamic_state(struct anv_cmd_buffer
*cmd_buffer
,
1854 uint32_t size
, uint32_t alignment
);
1857 anv_cmd_buffer_new_binding_table_block(struct anv_cmd_buffer
*cmd_buffer
);
1859 void gen8_cmd_buffer_emit_viewport(struct anv_cmd_buffer
*cmd_buffer
);
1860 void gen8_cmd_buffer_emit_depth_viewport(struct anv_cmd_buffer
*cmd_buffer
,
1861 bool depth_clamp_enable
);
1862 void gen7_cmd_buffer_emit_scissor(struct anv_cmd_buffer
*cmd_buffer
);
1864 void anv_cmd_buffer_setup_attachments(struct anv_cmd_buffer
*cmd_buffer
,
1865 struct anv_render_pass
*pass
,
1866 struct anv_framebuffer
*framebuffer
,
1867 const VkClearValue
*clear_values
);
1869 void anv_cmd_buffer_emit_state_base_address(struct anv_cmd_buffer
*cmd_buffer
);
1872 anv_cmd_buffer_push_constants(struct anv_cmd_buffer
*cmd_buffer
,
1873 gl_shader_stage stage
);
1875 anv_cmd_buffer_cs_push_constants(struct anv_cmd_buffer
*cmd_buffer
);
1877 void anv_cmd_buffer_clear_subpass(struct anv_cmd_buffer
*cmd_buffer
);
1878 void anv_cmd_buffer_resolve_subpass(struct anv_cmd_buffer
*cmd_buffer
);
1880 const struct anv_image_view
*
1881 anv_cmd_buffer_get_depth_stencil_view(const struct anv_cmd_buffer
*cmd_buffer
);
1884 anv_cmd_buffer_alloc_blorp_binding_table(struct anv_cmd_buffer
*cmd_buffer
,
1885 uint32_t num_entries
,
1886 uint32_t *state_offset
,
1887 struct anv_state
*bt_state
);
1889 void anv_cmd_buffer_dump(struct anv_cmd_buffer
*cmd_buffer
);
1891 enum anv_fence_type
{
1892 ANV_FENCE_TYPE_NONE
= 0,
1894 ANV_FENCE_TYPE_SYNCOBJ
,
1897 enum anv_bo_fence_state
{
1898 /** Indicates that this is a new (or newly reset fence) */
1899 ANV_BO_FENCE_STATE_RESET
,
1901 /** Indicates that this fence has been submitted to the GPU but is still
1902 * (as far as we know) in use by the GPU.
1904 ANV_BO_FENCE_STATE_SUBMITTED
,
1906 ANV_BO_FENCE_STATE_SIGNALED
,
1909 struct anv_fence_impl
{
1910 enum anv_fence_type type
;
1913 /** Fence implementation for BO fences
1915 * These fences use a BO and a set of CPU-tracked state flags. The BO
1916 * is added to the object list of the last execbuf call in a QueueSubmit
1917 * and is marked EXEC_WRITE. The state flags track when the BO has been
1918 * submitted to the kernel. We need to do this because Vulkan lets you
1919 * wait on a fence that has not yet been submitted and I915_GEM_BUSY
1920 * will say it's idle in this case.
1924 enum anv_bo_fence_state state
;
1927 /** DRM syncobj handle for syncobj-based fences */
1933 /* Permanent fence state. Every fence has some form of permanent state
1934 * (type != ANV_SEMAPHORE_TYPE_NONE). This may be a BO to fence on (for
1935 * cross-process fences) or it could just be a dummy for use internally.
1937 struct anv_fence_impl permanent
;
1939 /* Temporary fence state. A fence *may* have temporary state. That state
1940 * is added to the fence by an import operation and is reset back to
1941 * ANV_SEMAPHORE_TYPE_NONE when the fence is reset. A fence with temporary
1942 * state cannot be signaled because the fence must already be signaled
1943 * before the temporary state can be exported from the fence in the other
1944 * process and imported here.
1946 struct anv_fence_impl temporary
;
1951 struct anv_state state
;
1954 enum anv_semaphore_type
{
1955 ANV_SEMAPHORE_TYPE_NONE
= 0,
1956 ANV_SEMAPHORE_TYPE_DUMMY
,
1957 ANV_SEMAPHORE_TYPE_BO
,
1958 ANV_SEMAPHORE_TYPE_SYNC_FILE
,
1959 ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ
,
1962 struct anv_semaphore_impl
{
1963 enum anv_semaphore_type type
;
1966 /* A BO representing this semaphore when type == ANV_SEMAPHORE_TYPE_BO.
1967 * This BO will be added to the object list on any execbuf2 calls for
1968 * which this semaphore is used as a wait or signal fence. When used as
1969 * a signal fence, the EXEC_OBJECT_WRITE flag will be set.
1973 /* The sync file descriptor when type == ANV_SEMAPHORE_TYPE_SYNC_FILE.
1974 * If the semaphore is in the unsignaled state due to either just being
1975 * created or because it has been used for a wait, fd will be -1.
1979 /* Sync object handle when type == ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ.
1980 * Unlike GEM BOs, DRM sync objects aren't deduplicated by the kernel on
1981 * import so we don't need to bother with a userspace cache.
1987 struct anv_semaphore
{
1988 /* Permanent semaphore state. Every semaphore has some form of permanent
1989 * state (type != ANV_SEMAPHORE_TYPE_NONE). This may be a BO to fence on
1990 * (for cross-process semaphores0 or it could just be a dummy for use
1993 struct anv_semaphore_impl permanent
;
1995 /* Temporary semaphore state. A semaphore *may* have temporary state.
1996 * That state is added to the semaphore by an import operation and is reset
1997 * back to ANV_SEMAPHORE_TYPE_NONE when the semaphore is waited on. A
1998 * semaphore with temporary state cannot be signaled because the semaphore
1999 * must already be signaled before the temporary state can be exported from
2000 * the semaphore in the other process and imported here.
2002 struct anv_semaphore_impl temporary
;
2005 void anv_semaphore_reset_temporary(struct anv_device
*device
,
2006 struct anv_semaphore
*semaphore
);
2008 struct anv_shader_module
{
2009 unsigned char sha1
[20];
2014 static inline gl_shader_stage
2015 vk_to_mesa_shader_stage(VkShaderStageFlagBits vk_stage
)
2017 assert(__builtin_popcount(vk_stage
) == 1);
2018 return ffs(vk_stage
) - 1;
2021 static inline VkShaderStageFlagBits
2022 mesa_to_vk_shader_stage(gl_shader_stage mesa_stage
)
2024 return (1 << mesa_stage
);
2027 #define ANV_STAGE_MASK ((1 << MESA_SHADER_STAGES) - 1)
2029 #define anv_foreach_stage(stage, stage_bits) \
2030 for (gl_shader_stage stage, \
2031 __tmp = (gl_shader_stage)((stage_bits) & ANV_STAGE_MASK); \
2032 stage = __builtin_ffs(__tmp) - 1, __tmp; \
2033 __tmp &= ~(1 << (stage)))
2035 struct anv_pipeline_bind_map
{
2036 uint32_t surface_count
;
2037 uint32_t sampler_count
;
2038 uint32_t image_count
;
2040 struct anv_pipeline_binding
* surface_to_descriptor
;
2041 struct anv_pipeline_binding
* sampler_to_descriptor
;
2044 struct anv_shader_bin_key
{
2049 struct anv_shader_bin
{
2052 const struct anv_shader_bin_key
*key
;
2054 struct anv_state kernel
;
2055 uint32_t kernel_size
;
2057 const struct brw_stage_prog_data
*prog_data
;
2058 uint32_t prog_data_size
;
2060 struct anv_pipeline_bind_map bind_map
;
2063 struct anv_shader_bin
*
2064 anv_shader_bin_create(struct anv_device
*device
,
2065 const void *key
, uint32_t key_size
,
2066 const void *kernel
, uint32_t kernel_size
,
2067 const struct brw_stage_prog_data
*prog_data
,
2068 uint32_t prog_data_size
, const void *prog_data_param
,
2069 const struct anv_pipeline_bind_map
*bind_map
);
2072 anv_shader_bin_destroy(struct anv_device
*device
, struct anv_shader_bin
*shader
);
2075 anv_shader_bin_ref(struct anv_shader_bin
*shader
)
2077 assert(shader
&& shader
->ref_cnt
>= 1);
2078 p_atomic_inc(&shader
->ref_cnt
);
2082 anv_shader_bin_unref(struct anv_device
*device
, struct anv_shader_bin
*shader
)
2084 assert(shader
&& shader
->ref_cnt
>= 1);
2085 if (p_atomic_dec_zero(&shader
->ref_cnt
))
2086 anv_shader_bin_destroy(device
, shader
);
2089 struct anv_pipeline
{
2090 struct anv_device
* device
;
2091 struct anv_batch batch
;
2092 uint32_t batch_data
[512];
2093 struct anv_reloc_list batch_relocs
;
2094 uint32_t dynamic_state_mask
;
2095 struct anv_dynamic_state dynamic_state
;
2097 struct anv_subpass
* subpass
;
2098 struct anv_pipeline_layout
* layout
;
2100 bool needs_data_cache
;
2102 struct anv_shader_bin
* shaders
[MESA_SHADER_STAGES
];
2105 const struct gen_l3_config
* l3_config
;
2106 uint32_t total_size
;
2109 VkShaderStageFlags active_stages
;
2110 struct anv_state blend_state
;
2113 uint32_t binding_stride
[MAX_VBS
];
2114 bool instancing_enable
[MAX_VBS
];
2115 bool primitive_restart
;
2118 uint32_t cs_right_mask
;
2121 bool depth_test_enable
;
2122 bool writes_stencil
;
2123 bool stencil_test_enable
;
2124 bool depth_clamp_enable
;
2125 bool sample_shading_enable
;
2130 uint32_t depth_stencil_state
[3];
2136 uint32_t wm_depth_stencil
[3];
2140 uint32_t wm_depth_stencil
[4];
2143 uint32_t interface_descriptor_data
[8];
2147 anv_pipeline_has_stage(const struct anv_pipeline
*pipeline
,
2148 gl_shader_stage stage
)
2150 return (pipeline
->active_stages
& mesa_to_vk_shader_stage(stage
)) != 0;
2153 #define ANV_DECL_GET_PROG_DATA_FUNC(prefix, stage) \
2154 static inline const struct brw_##prefix##_prog_data * \
2155 get_##prefix##_prog_data(const struct anv_pipeline *pipeline) \
2157 if (anv_pipeline_has_stage(pipeline, stage)) { \
2158 return (const struct brw_##prefix##_prog_data *) \
2159 pipeline->shaders[stage]->prog_data; \
2165 ANV_DECL_GET_PROG_DATA_FUNC(vs
, MESA_SHADER_VERTEX
)
2166 ANV_DECL_GET_PROG_DATA_FUNC(tcs
, MESA_SHADER_TESS_CTRL
)
2167 ANV_DECL_GET_PROG_DATA_FUNC(tes
, MESA_SHADER_TESS_EVAL
)
2168 ANV_DECL_GET_PROG_DATA_FUNC(gs
, MESA_SHADER_GEOMETRY
)
2169 ANV_DECL_GET_PROG_DATA_FUNC(wm
, MESA_SHADER_FRAGMENT
)
2170 ANV_DECL_GET_PROG_DATA_FUNC(cs
, MESA_SHADER_COMPUTE
)
2172 static inline const struct brw_vue_prog_data
*
2173 anv_pipeline_get_last_vue_prog_data(const struct anv_pipeline
*pipeline
)
2175 if (anv_pipeline_has_stage(pipeline
, MESA_SHADER_GEOMETRY
))
2176 return &get_gs_prog_data(pipeline
)->base
;
2177 else if (anv_pipeline_has_stage(pipeline
, MESA_SHADER_TESS_EVAL
))
2178 return &get_tes_prog_data(pipeline
)->base
;
2180 return &get_vs_prog_data(pipeline
)->base
;
2184 anv_pipeline_init(struct anv_pipeline
*pipeline
, struct anv_device
*device
,
2185 struct anv_pipeline_cache
*cache
,
2186 const VkGraphicsPipelineCreateInfo
*pCreateInfo
,
2187 const VkAllocationCallbacks
*alloc
);
2190 anv_pipeline_compile_cs(struct anv_pipeline
*pipeline
,
2191 struct anv_pipeline_cache
*cache
,
2192 const VkComputePipelineCreateInfo
*info
,
2193 struct anv_shader_module
*module
,
2194 const char *entrypoint
,
2195 const VkSpecializationInfo
*spec_info
);
2197 struct anv_format_plane
{
2198 enum isl_format isl_format
:16;
2199 struct isl_swizzle swizzle
;
2201 /* Whether this plane contains chroma channels */
2204 /* For downscaling of YUV planes */
2205 uint8_t denominator_scales
[2];
2207 /* How to map sampled ycbcr planes to a single 4 component element. */
2208 struct isl_swizzle ycbcr_swizzle
;
2213 struct anv_format_plane planes
[3];
2218 static inline uint32_t
2219 anv_image_aspect_to_plane(VkImageAspectFlags image_aspects
,
2220 VkImageAspectFlags aspect_mask
)
2222 switch (aspect_mask
) {
2223 case VK_IMAGE_ASPECT_COLOR_BIT
:
2224 case VK_IMAGE_ASPECT_DEPTH_BIT
:
2225 case VK_IMAGE_ASPECT_PLANE_0_BIT_KHR
:
2227 case VK_IMAGE_ASPECT_STENCIL_BIT
:
2228 if ((image_aspects
& VK_IMAGE_ASPECT_DEPTH_BIT
) == 0)
2231 case VK_IMAGE_ASPECT_PLANE_1_BIT_KHR
:
2233 case VK_IMAGE_ASPECT_PLANE_2_BIT_KHR
:
2236 /* Purposefully assert with depth/stencil aspects. */
2237 unreachable("invalid image aspect");
2241 static inline uint32_t
2242 anv_image_aspect_get_planes(VkImageAspectFlags aspect_mask
)
2244 uint32_t planes
= 0;
2246 if (aspect_mask
& (VK_IMAGE_ASPECT_COLOR_BIT
|
2247 VK_IMAGE_ASPECT_DEPTH_BIT
|
2248 VK_IMAGE_ASPECT_STENCIL_BIT
|
2249 VK_IMAGE_ASPECT_PLANE_0_BIT_KHR
))
2251 if (aspect_mask
& VK_IMAGE_ASPECT_PLANE_1_BIT_KHR
)
2253 if (aspect_mask
& VK_IMAGE_ASPECT_PLANE_2_BIT_KHR
)
2259 static inline VkImageAspectFlags
2260 anv_plane_to_aspect(VkImageAspectFlags image_aspects
,
2263 if (image_aspects
& VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV
) {
2264 if (_mesa_bitcount(image_aspects
) > 1)
2265 return VK_IMAGE_ASPECT_PLANE_0_BIT_KHR
<< plane
;
2266 return VK_IMAGE_ASPECT_COLOR_BIT
;
2268 if (image_aspects
& VK_IMAGE_ASPECT_DEPTH_BIT
)
2269 return VK_IMAGE_ASPECT_DEPTH_BIT
<< plane
;
2270 assert(image_aspects
== VK_IMAGE_ASPECT_STENCIL_BIT
);
2271 return VK_IMAGE_ASPECT_STENCIL_BIT
;
2274 #define anv_foreach_image_aspect_bit(b, image, aspects) \
2275 for_each_bit(b, anv_image_expand_aspects(image, aspects))
2277 const struct anv_format
*
2278 anv_get_format(VkFormat format
);
2280 static inline uint32_t
2281 anv_get_format_planes(VkFormat vk_format
)
2283 const struct anv_format
*format
= anv_get_format(vk_format
);
2285 return format
!= NULL
? format
->n_planes
: 0;
2288 struct anv_format_plane
2289 anv_get_format_plane(const struct gen_device_info
*devinfo
, VkFormat vk_format
,
2290 VkImageAspectFlagBits aspect
, VkImageTiling tiling
);
2292 static inline enum isl_format
2293 anv_get_isl_format(const struct gen_device_info
*devinfo
, VkFormat vk_format
,
2294 VkImageAspectFlags aspect
, VkImageTiling tiling
)
2296 return anv_get_format_plane(devinfo
, vk_format
, aspect
, tiling
).isl_format
;
2299 static inline struct isl_swizzle
2300 anv_swizzle_for_render(struct isl_swizzle swizzle
)
2302 /* Sometimes the swizzle will have alpha map to one. We do this to fake
2303 * RGB as RGBA for texturing
2305 assert(swizzle
.a
== ISL_CHANNEL_SELECT_ONE
||
2306 swizzle
.a
== ISL_CHANNEL_SELECT_ALPHA
);
2308 /* But it doesn't matter what we render to that channel */
2309 swizzle
.a
= ISL_CHANNEL_SELECT_ALPHA
;
2315 anv_pipeline_setup_l3_config(struct anv_pipeline
*pipeline
, bool needs_slm
);
2318 * Subsurface of an anv_image.
2320 struct anv_surface
{
2321 /** Valid only if isl_surf::size > 0. */
2322 struct isl_surf isl
;
2325 * Offset from VkImage's base address, as bound by vkBindImageMemory().
2332 /* The original VkFormat provided by the client. This may not match any
2333 * of the actual surface formats.
2336 const struct anv_format
*format
;
2338 VkImageAspectFlags aspects
;
2341 uint32_t array_size
;
2342 uint32_t samples
; /**< VkImageCreateInfo::samples */
2344 VkImageUsageFlags usage
; /**< Superset of VkImageCreateInfo::usage. */
2345 VkImageTiling tiling
; /** VkImageCreateInfo::tiling */
2350 /* Whether the image is made of several underlying buffer objects rather a
2351 * single one with different offsets.
2358 * For each foo, anv_image::planes[x].surface is valid if and only if
2359 * anv_image::aspects has a x aspect. Refer to anv_image_aspect_to_plane()
2360 * to figure the number associated with a given aspect.
2362 * The hardware requires that the depth buffer and stencil buffer be
2363 * separate surfaces. From Vulkan's perspective, though, depth and stencil
2364 * reside in the same VkImage. To satisfy both the hardware and Vulkan, we
2365 * allocate the depth and stencil buffers as separate surfaces in the same
2370 * -----------------------
2372 * ----------------------- |
2373 * | shadow surface0 | |
2374 * ----------------------- | Plane 0
2375 * | aux surface0 | |
2376 * ----------------------- |
2377 * | fast clear colors0 | \|/
2378 * -----------------------
2380 * ----------------------- |
2381 * | shadow surface1 | |
2382 * ----------------------- | Plane 1
2383 * | aux surface1 | |
2384 * ----------------------- |
2385 * | fast clear colors1 | \|/
2386 * -----------------------
2389 * -----------------------
2393 * Offset of the entire plane (whenever the image is disjoint this is
2401 struct anv_surface surface
;
2404 * A surface which shadows the main surface and may have different
2405 * tiling. This is used for sampling using a tiling that isn't supported
2406 * for other operations.
2408 struct anv_surface shadow_surface
;
2411 * For color images, this is the aux usage for this image when not used
2412 * as a color attachment.
2414 * For depth/stencil images, this is set to ISL_AUX_USAGE_HIZ if the
2415 * image has a HiZ buffer.
2417 enum isl_aux_usage aux_usage
;
2419 struct anv_surface aux_surface
;
2422 * Offset of the fast clear state (used to compute the
2423 * fast_clear_state_offset of the following planes).
2425 uint32_t fast_clear_state_offset
;
2428 * BO associated with this plane, set when bound.
2431 VkDeviceSize bo_offset
;
2434 * When destroying the image, also free the bo.
2440 /* Returns the number of auxiliary buffer levels attached to an image. */
2441 static inline uint8_t
2442 anv_image_aux_levels(const struct anv_image
* const image
,
2443 VkImageAspectFlagBits aspect
)
2445 uint32_t plane
= anv_image_aspect_to_plane(image
->aspects
, aspect
);
2446 return image
->planes
[plane
].aux_surface
.isl
.size
> 0 ?
2447 image
->planes
[plane
].aux_surface
.isl
.levels
: 0;
2450 /* Returns the number of auxiliary buffer layers attached to an image. */
2451 static inline uint32_t
2452 anv_image_aux_layers(const struct anv_image
* const image
,
2453 VkImageAspectFlagBits aspect
,
2454 const uint8_t miplevel
)
2458 /* The miplevel must exist in the main buffer. */
2459 assert(miplevel
< image
->levels
);
2461 if (miplevel
>= anv_image_aux_levels(image
, aspect
)) {
2462 /* There are no layers with auxiliary data because the miplevel has no
2467 uint32_t plane
= anv_image_aspect_to_plane(image
->aspects
, aspect
);
2468 return MAX2(image
->planes
[plane
].aux_surface
.isl
.logical_level0_px
.array_len
,
2469 image
->planes
[plane
].aux_surface
.isl
.logical_level0_px
.depth
>> miplevel
);
2473 static inline unsigned
2474 anv_fast_clear_state_entry_size(const struct anv_device
*device
)
2478 * +--------------------------------------------+
2479 * | clear value dword(s) | needs resolve dword |
2480 * +--------------------------------------------+
2483 /* Ensure that the needs resolve dword is in fact dword-aligned to enable
2484 * GPU memcpy operations.
2486 assert(device
->isl_dev
.ss
.clear_value_size
% 4 == 0);
2487 return device
->isl_dev
.ss
.clear_value_size
+ 4;
2490 /* Returns true if a HiZ-enabled depth buffer can be sampled from. */
2492 anv_can_sample_with_hiz(const struct gen_device_info
* const devinfo
,
2493 const struct anv_image
*image
)
2495 if (!(image
->aspects
& VK_IMAGE_ASPECT_DEPTH_BIT
))
2498 if (devinfo
->gen
< 8)
2501 return image
->samples
== 1;
2505 anv_gen8_hiz_op_resolve(struct anv_cmd_buffer
*cmd_buffer
,
2506 const struct anv_image
*image
,
2507 enum blorp_hiz_op op
);
2509 anv_ccs_resolve(struct anv_cmd_buffer
* const cmd_buffer
,
2510 const struct anv_state surface_state
,
2511 const struct anv_image
* const image
,
2512 VkImageAspectFlagBits aspect
,
2513 const uint8_t level
, const uint32_t layer_count
,
2514 const enum blorp_fast_clear_op op
);
2517 anv_image_fast_clear(struct anv_cmd_buffer
*cmd_buffer
,
2518 const struct anv_image
*image
,
2519 VkImageAspectFlagBits aspect
,
2520 const uint32_t base_level
, const uint32_t level_count
,
2521 const uint32_t base_layer
, uint32_t layer_count
);
2524 anv_image_copy_to_shadow(struct anv_cmd_buffer
*cmd_buffer
,
2525 const struct anv_image
*image
,
2526 uint32_t base_level
, uint32_t level_count
,
2527 uint32_t base_layer
, uint32_t layer_count
);
2530 anv_layout_to_aux_usage(const struct gen_device_info
* const devinfo
,
2531 const struct anv_image
*image
,
2532 const VkImageAspectFlagBits aspect
,
2533 const VkImageLayout layout
);
2535 /* This is defined as a macro so that it works for both
2536 * VkImageSubresourceRange and VkImageSubresourceLayers
2538 #define anv_get_layerCount(_image, _range) \
2539 ((_range)->layerCount == VK_REMAINING_ARRAY_LAYERS ? \
2540 (_image)->array_size - (_range)->baseArrayLayer : (_range)->layerCount)
2542 static inline uint32_t
2543 anv_get_levelCount(const struct anv_image
*image
,
2544 const VkImageSubresourceRange
*range
)
2546 return range
->levelCount
== VK_REMAINING_MIP_LEVELS
?
2547 image
->levels
- range
->baseMipLevel
: range
->levelCount
;
2550 static inline VkImageAspectFlags
2551 anv_image_expand_aspects(const struct anv_image
*image
,
2552 VkImageAspectFlags aspects
)
2554 /* If the underlying image has color plane aspects and
2555 * VK_IMAGE_ASPECT_COLOR_BIT has been requested, then return the aspects of
2556 * the underlying image. */
2557 if ((image
->aspects
& VK_IMAGE_ASPECT_PLANES_BITS_ANV
) != 0 &&
2558 aspects
== VK_IMAGE_ASPECT_COLOR_BIT
)
2559 return image
->aspects
;
2565 anv_image_aspects_compatible(VkImageAspectFlags aspects1
,
2566 VkImageAspectFlags aspects2
)
2568 if (aspects1
== aspects2
)
2571 /* Only 1 color aspects are compatibles. */
2572 if ((aspects1
& VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV
) != 0 &&
2573 (aspects2
& VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV
) != 0 &&
2574 _mesa_bitcount(aspects1
) == _mesa_bitcount(aspects2
))
2580 struct anv_image_view
{
2581 const struct anv_image
*image
; /**< VkImageViewCreateInfo::image */
2583 VkImageAspectFlags aspect_mask
;
2585 VkExtent3D extent
; /**< Extent of VkImageViewCreateInfo::baseMipLevel. */
2589 uint32_t image_plane
;
2591 struct isl_view isl
;
2594 * RENDER_SURFACE_STATE when using image as a sampler surface with an
2595 * image layout of SHADER_READ_ONLY_OPTIMAL or
2596 * DEPTH_STENCIL_READ_ONLY_OPTIMAL.
2598 struct anv_surface_state optimal_sampler_surface_state
;
2601 * RENDER_SURFACE_STATE when using image as a sampler surface with an
2602 * image layout of GENERAL.
2604 struct anv_surface_state general_sampler_surface_state
;
2607 * RENDER_SURFACE_STATE when using image as a storage image. Separate
2608 * states for write-only and readable, using the real format for
2609 * write-only and the lowered format for readable.
2611 struct anv_surface_state storage_surface_state
;
2612 struct anv_surface_state writeonly_storage_surface_state
;
2614 struct brw_image_param storage_image_param
;
2618 enum anv_image_view_state_flags
{
2619 ANV_IMAGE_VIEW_STATE_STORAGE_WRITE_ONLY
= (1 << 0),
2620 ANV_IMAGE_VIEW_STATE_TEXTURE_OPTIMAL
= (1 << 1),
2623 void anv_image_fill_surface_state(struct anv_device
*device
,
2624 const struct anv_image
*image
,
2625 VkImageAspectFlagBits aspect
,
2626 const struct isl_view
*view
,
2627 isl_surf_usage_flags_t view_usage
,
2628 enum isl_aux_usage aux_usage
,
2629 const union isl_color_value
*clear_color
,
2630 enum anv_image_view_state_flags flags
,
2631 struct anv_surface_state
*state_inout
,
2632 struct brw_image_param
*image_param_out
);
2634 struct anv_image_create_info
{
2635 const VkImageCreateInfo
*vk_info
;
2637 /** An opt-in bitmask which filters an ISL-mapping of the Vulkan tiling. */
2638 isl_tiling_flags_t isl_tiling_flags
;
2640 /** These flags will be added to any derived from VkImageCreateInfo. */
2641 isl_surf_usage_flags_t isl_extra_usage_flags
;
2646 VkResult
anv_image_create(VkDevice _device
,
2647 const struct anv_image_create_info
*info
,
2648 const VkAllocationCallbacks
* alloc
,
2652 VkResult
anv_image_from_gralloc(VkDevice device_h
,
2653 const VkImageCreateInfo
*base_info
,
2654 const VkNativeBufferANDROID
*gralloc_info
,
2655 const VkAllocationCallbacks
*alloc
,
2659 const struct anv_surface
*
2660 anv_image_get_surface_for_aspect_mask(const struct anv_image
*image
,
2661 VkImageAspectFlags aspect_mask
);
2664 anv_isl_format_for_descriptor_type(VkDescriptorType type
);
2666 static inline struct VkExtent3D
2667 anv_sanitize_image_extent(const VkImageType imageType
,
2668 const struct VkExtent3D imageExtent
)
2670 switch (imageType
) {
2671 case VK_IMAGE_TYPE_1D
:
2672 return (VkExtent3D
) { imageExtent
.width
, 1, 1 };
2673 case VK_IMAGE_TYPE_2D
:
2674 return (VkExtent3D
) { imageExtent
.width
, imageExtent
.height
, 1 };
2675 case VK_IMAGE_TYPE_3D
:
2678 unreachable("invalid image type");
2682 static inline struct VkOffset3D
2683 anv_sanitize_image_offset(const VkImageType imageType
,
2684 const struct VkOffset3D imageOffset
)
2686 switch (imageType
) {
2687 case VK_IMAGE_TYPE_1D
:
2688 return (VkOffset3D
) { imageOffset
.x
, 0, 0 };
2689 case VK_IMAGE_TYPE_2D
:
2690 return (VkOffset3D
) { imageOffset
.x
, imageOffset
.y
, 0 };
2691 case VK_IMAGE_TYPE_3D
:
2694 unreachable("invalid image type");
2699 void anv_fill_buffer_surface_state(struct anv_device
*device
,
2700 struct anv_state state
,
2701 enum isl_format format
,
2702 uint32_t offset
, uint32_t range
,
2706 struct anv_ycbcr_conversion
{
2707 const struct anv_format
* format
;
2708 VkSamplerYcbcrModelConversionKHR ycbcr_model
;
2709 VkSamplerYcbcrRangeKHR ycbcr_range
;
2710 VkComponentSwizzle mapping
[4];
2711 VkChromaLocationKHR chroma_offsets
[2];
2712 VkFilter chroma_filter
;
2713 bool chroma_reconstruction
;
2716 struct anv_sampler
{
2717 uint32_t state
[3][4];
2719 struct anv_ycbcr_conversion
*conversion
;
2722 struct anv_framebuffer
{
2727 uint32_t attachment_count
;
2728 struct anv_image_view
* attachments
[0];
2731 struct anv_subpass
{
2732 uint32_t attachment_count
;
2735 * A pointer to all attachment references used in this subpass.
2736 * Only valid if ::attachment_count > 0.
2738 VkAttachmentReference
* attachments
;
2739 uint32_t input_count
;
2740 VkAttachmentReference
* input_attachments
;
2741 uint32_t color_count
;
2742 VkAttachmentReference
* color_attachments
;
2743 VkAttachmentReference
* resolve_attachments
;
2745 VkAttachmentReference depth_stencil_attachment
;
2749 /** Subpass has a depth/stencil self-dependency */
2750 bool has_ds_self_dep
;
2752 /** Subpass has at least one resolve attachment */
2756 static inline unsigned
2757 anv_subpass_view_count(const struct anv_subpass
*subpass
)
2759 return MAX2(1, _mesa_bitcount(subpass
->view_mask
));
2762 struct anv_render_pass_attachment
{
2763 /* TODO: Consider using VkAttachmentDescription instead of storing each of
2764 * its members individually.
2768 VkImageUsageFlags usage
;
2769 VkAttachmentLoadOp load_op
;
2770 VkAttachmentStoreOp store_op
;
2771 VkAttachmentLoadOp stencil_load_op
;
2772 VkImageLayout initial_layout
;
2773 VkImageLayout final_layout
;
2774 VkImageLayout first_subpass_layout
;
2776 /* The subpass id in which the attachment will be used last. */
2777 uint32_t last_subpass_idx
;
2780 struct anv_render_pass
{
2781 uint32_t attachment_count
;
2782 uint32_t subpass_count
;
2783 /* An array of subpass_count+1 flushes, one per subpass boundary */
2784 enum anv_pipe_bits
* subpass_flushes
;
2785 struct anv_render_pass_attachment
* attachments
;
2786 struct anv_subpass subpasses
[0];
2789 #define ANV_PIPELINE_STATISTICS_MASK 0x000007ff
2791 struct anv_query_pool
{
2793 VkQueryPipelineStatisticFlags pipeline_statistics
;
2794 /** Stride between slots, in bytes */
2796 /** Number of slots in this query pool */
2801 void *anv_lookup_entrypoint(const struct gen_device_info
*devinfo
,
2804 void anv_dump_image_to_ppm(struct anv_device
*device
,
2805 struct anv_image
*image
, unsigned miplevel
,
2806 unsigned array_layer
, VkImageAspectFlagBits aspect
,
2807 const char *filename
);
2809 enum anv_dump_action
{
2810 ANV_DUMP_FRAMEBUFFERS_BIT
= 0x1,
2813 void anv_dump_start(struct anv_device
*device
, enum anv_dump_action actions
);
2814 void anv_dump_finish(void);
2816 void anv_dump_add_framebuffer(struct anv_cmd_buffer
*cmd_buffer
,
2817 struct anv_framebuffer
*fb
);
2819 static inline uint32_t
2820 anv_get_subpass_id(const struct anv_cmd_state
* const cmd_state
)
2822 /* This function must be called from within a subpass. */
2823 assert(cmd_state
->pass
&& cmd_state
->subpass
);
2825 const uint32_t subpass_id
= cmd_state
->subpass
- cmd_state
->pass
->subpasses
;
2827 /* The id of this subpass shouldn't exceed the number of subpasses in this
2828 * render pass minus 1.
2830 assert(subpass_id
< cmd_state
->pass
->subpass_count
);
2834 #define ANV_DEFINE_HANDLE_CASTS(__anv_type, __VkType) \
2836 static inline struct __anv_type * \
2837 __anv_type ## _from_handle(__VkType _handle) \
2839 return (struct __anv_type *) _handle; \
2842 static inline __VkType \
2843 __anv_type ## _to_handle(struct __anv_type *_obj) \
2845 return (__VkType) _obj; \
2848 #define ANV_DEFINE_NONDISP_HANDLE_CASTS(__anv_type, __VkType) \
2850 static inline struct __anv_type * \
2851 __anv_type ## _from_handle(__VkType _handle) \
2853 return (struct __anv_type *)(uintptr_t) _handle; \
2856 static inline __VkType \
2857 __anv_type ## _to_handle(struct __anv_type *_obj) \
2859 return (__VkType)(uintptr_t) _obj; \
2862 #define ANV_FROM_HANDLE(__anv_type, __name, __handle) \
2863 struct __anv_type *__name = __anv_type ## _from_handle(__handle)
2865 ANV_DEFINE_HANDLE_CASTS(anv_cmd_buffer
, VkCommandBuffer
)
2866 ANV_DEFINE_HANDLE_CASTS(anv_device
, VkDevice
)
2867 ANV_DEFINE_HANDLE_CASTS(anv_instance
, VkInstance
)
2868 ANV_DEFINE_HANDLE_CASTS(anv_physical_device
, VkPhysicalDevice
)
2869 ANV_DEFINE_HANDLE_CASTS(anv_queue
, VkQueue
)
2871 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_cmd_pool
, VkCommandPool
)
2872 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_buffer
, VkBuffer
)
2873 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_buffer_view
, VkBufferView
)
2874 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_descriptor_pool
, VkDescriptorPool
)
2875 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_descriptor_set
, VkDescriptorSet
)
2876 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_descriptor_set_layout
, VkDescriptorSetLayout
)
2877 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_descriptor_update_template
, VkDescriptorUpdateTemplateKHR
)
2878 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_device_memory
, VkDeviceMemory
)
2879 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_fence
, VkFence
)
2880 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_event
, VkEvent
)
2881 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_framebuffer
, VkFramebuffer
)
2882 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_image
, VkImage
)
2883 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_image_view
, VkImageView
);
2884 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_pipeline_cache
, VkPipelineCache
)
2885 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_pipeline
, VkPipeline
)
2886 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_pipeline_layout
, VkPipelineLayout
)
2887 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_query_pool
, VkQueryPool
)
2888 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_render_pass
, VkRenderPass
)
2889 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_sampler
, VkSampler
)
2890 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_semaphore
, VkSemaphore
)
2891 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_shader_module
, VkShaderModule
)
2892 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_debug_report_callback
, VkDebugReportCallbackEXT
)
2893 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_ycbcr_conversion
, VkSamplerYcbcrConversionKHR
)
2895 /* Gen-specific function declarations */
2897 # include "anv_genX.h"
2899 # define genX(x) gen7_##x
2900 # include "anv_genX.h"
2902 # define genX(x) gen75_##x
2903 # include "anv_genX.h"
2905 # define genX(x) gen8_##x
2906 # include "anv_genX.h"
2908 # define genX(x) gen9_##x
2909 # include "anv_genX.h"
2911 # define genX(x) gen10_##x
2912 # include "anv_genX.h"
2916 #endif /* ANV_PRIVATE_H */