2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
39 #define __gen_validate_value(x) VALGRIND_CHECK_MEM_IS_DEFINED(&(x), sizeof(x))
44 #include "common/gen_clflush.h"
45 #include "common/gen_device_info.h"
46 #include "blorp/blorp.h"
47 #include "compiler/brw_compiler.h"
48 #include "util/macros.h"
49 #include "util/list.h"
50 #include "util/u_atomic.h"
51 #include "util/u_vector.h"
54 /* Pre-declarations needed for WSI entrypoints */
57 typedef struct xcb_connection_t xcb_connection_t
;
58 typedef uint32_t xcb_visualid_t
;
59 typedef uint32_t xcb_window_t
;
62 struct anv_buffer_view
;
63 struct anv_image_view
;
65 struct anv_debug_report_callback
;
69 #include <vulkan/vulkan.h>
70 #include <vulkan/vulkan_intel.h>
71 #include <vulkan/vk_icd.h>
73 #include "anv_entrypoints.h"
76 #include "common/gen_debug.h"
77 #include "common/intel_log.h"
78 #include "wsi_common.h"
80 /* Allowing different clear colors requires us to perform a depth resolve at
81 * the end of certain render passes. This is because while slow clears store
82 * the clear color in the HiZ buffer, fast clears (without a resolve) don't.
83 * See the PRMs for examples describing when additional resolves would be
84 * necessary. To enable fast clears without requiring extra resolves, we set
85 * the clear value to a globally-defined one. We could allow different values
86 * if the user doesn't expect coherent data during or after a render passes
87 * (VK_ATTACHMENT_STORE_OP_DONT_CARE), but such users (aside from the CTS)
88 * don't seem to exist yet. In almost all Vulkan applications tested thus far,
89 * 1.0f seems to be the only value used. The only application that doesn't set
90 * this value does so through the usage of an seemingly uninitialized clear
93 #define ANV_HZ_FC_VAL 1.0f
98 #define MAX_VIEWPORTS 16
99 #define MAX_SCISSORS 16
100 #define MAX_PUSH_CONSTANTS_SIZE 128
101 #define MAX_DYNAMIC_BUFFERS 16
103 #define MAX_PUSH_DESCRIPTORS 32 /* Minimum requirement */
105 #define ANV_SVGS_VB_INDEX MAX_VBS
106 #define ANV_DRAWID_VB_INDEX (MAX_VBS + 1)
108 #define anv_printflike(a, b) __attribute__((__format__(__printf__, a, b)))
110 static inline uint32_t
111 align_down_npot_u32(uint32_t v
, uint32_t a
)
116 static inline uint32_t
117 align_u32(uint32_t v
, uint32_t a
)
119 assert(a
!= 0 && a
== (a
& -a
));
120 return (v
+ a
- 1) & ~(a
- 1);
123 static inline uint64_t
124 align_u64(uint64_t v
, uint64_t a
)
126 assert(a
!= 0 && a
== (a
& -a
));
127 return (v
+ a
- 1) & ~(a
- 1);
130 static inline int32_t
131 align_i32(int32_t v
, int32_t a
)
133 assert(a
!= 0 && a
== (a
& -a
));
134 return (v
+ a
- 1) & ~(a
- 1);
137 /** Alignment must be a power of 2. */
139 anv_is_aligned(uintmax_t n
, uintmax_t a
)
141 assert(a
== (a
& -a
));
142 return (n
& (a
- 1)) == 0;
145 static inline uint32_t
146 anv_minify(uint32_t n
, uint32_t levels
)
148 if (unlikely(n
== 0))
151 return MAX2(n
>> levels
, 1);
155 anv_clamp_f(float f
, float min
, float max
)
168 anv_clear_mask(uint32_t *inout_mask
, uint32_t clear_mask
)
170 if (*inout_mask
& clear_mask
) {
171 *inout_mask
&= ~clear_mask
;
178 static inline union isl_color_value
179 vk_to_isl_color(VkClearColorValue color
)
181 return (union isl_color_value
) {
191 #define for_each_bit(b, dword) \
192 for (uint32_t __dword = (dword); \
193 (b) = __builtin_ffs(__dword) - 1, __dword; \
194 __dword &= ~(1 << (b)))
196 #define typed_memcpy(dest, src, count) ({ \
197 STATIC_ASSERT(sizeof(*src) == sizeof(*dest)); \
198 memcpy((dest), (src), (count) * sizeof(*(src))); \
201 /* Mapping from anv object to VkDebugReportObjectTypeEXT. New types need
202 * to be added here in order to utilize mapping in debug/error/perf macros.
204 #define REPORT_OBJECT_TYPE(o) \
205 __builtin_choose_expr ( \
206 __builtin_types_compatible_p (__typeof (o), struct anv_instance*), \
207 VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT, \
208 __builtin_choose_expr ( \
209 __builtin_types_compatible_p (__typeof (o), struct anv_physical_device*), \
210 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, \
211 __builtin_choose_expr ( \
212 __builtin_types_compatible_p (__typeof (o), struct anv_device*), \
213 VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, \
214 __builtin_choose_expr ( \
215 __builtin_types_compatible_p (__typeof (o), const struct anv_device*), \
216 VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, \
217 __builtin_choose_expr ( \
218 __builtin_types_compatible_p (__typeof (o), struct anv_queue*), \
219 VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT, \
220 __builtin_choose_expr ( \
221 __builtin_types_compatible_p (__typeof (o), struct anv_semaphore*), \
222 VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT, \
223 __builtin_choose_expr ( \
224 __builtin_types_compatible_p (__typeof (o), struct anv_cmd_buffer*), \
225 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, \
226 __builtin_choose_expr ( \
227 __builtin_types_compatible_p (__typeof (o), struct anv_fence*), \
228 VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT, \
229 __builtin_choose_expr ( \
230 __builtin_types_compatible_p (__typeof (o), struct anv_device_memory*), \
231 VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, \
232 __builtin_choose_expr ( \
233 __builtin_types_compatible_p (__typeof (o), struct anv_buffer*), \
234 VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, \
235 __builtin_choose_expr ( \
236 __builtin_types_compatible_p (__typeof (o), struct anv_image*), \
237 VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, \
238 __builtin_choose_expr ( \
239 __builtin_types_compatible_p (__typeof (o), const struct anv_image*), \
240 VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, \
241 __builtin_choose_expr ( \
242 __builtin_types_compatible_p (__typeof (o), struct anv_event*), \
243 VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT, \
244 __builtin_choose_expr ( \
245 __builtin_types_compatible_p (__typeof (o), struct anv_query_pool*), \
246 VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, \
247 __builtin_choose_expr ( \
248 __builtin_types_compatible_p (__typeof (o), struct anv_buffer_view*), \
249 VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT, \
250 __builtin_choose_expr ( \
251 __builtin_types_compatible_p (__typeof (o), struct anv_image_view*), \
252 VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT, \
253 __builtin_choose_expr ( \
254 __builtin_types_compatible_p (__typeof (o), struct anv_shader_module*), \
255 VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT, \
256 __builtin_choose_expr ( \
257 __builtin_types_compatible_p (__typeof (o), struct anv_pipeline_cache*), \
258 VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_CACHE_EXT, \
259 __builtin_choose_expr ( \
260 __builtin_types_compatible_p (__typeof (o), struct anv_pipeline_layout*), \
261 VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT, \
262 __builtin_choose_expr ( \
263 __builtin_types_compatible_p (__typeof (o), struct anv_render_pass*), \
264 VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, \
265 __builtin_choose_expr ( \
266 __builtin_types_compatible_p (__typeof (o), struct anv_pipeline*), \
267 VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT, \
268 __builtin_choose_expr ( \
269 __builtin_types_compatible_p (__typeof (o), struct anv_descriptor_set_layout*), \
270 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT, \
271 __builtin_choose_expr ( \
272 __builtin_types_compatible_p (__typeof (o), struct anv_sampler*), \
273 VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT, \
274 __builtin_choose_expr ( \
275 __builtin_types_compatible_p (__typeof (o), struct anv_descriptor_pool*), \
276 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT, \
277 __builtin_choose_expr ( \
278 __builtin_types_compatible_p (__typeof (o), struct anv_descriptor_set*), \
279 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, \
280 __builtin_choose_expr ( \
281 __builtin_types_compatible_p (__typeof (o), struct anv_framebuffer*), \
282 VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT, \
283 __builtin_choose_expr ( \
284 __builtin_types_compatible_p (__typeof (o), struct anv_cmd_pool*), \
285 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT, \
286 __builtin_choose_expr ( \
287 __builtin_types_compatible_p (__typeof (o), struct anv_surface*), \
288 VK_DEBUG_REPORT_OBJECT_TYPE_SURFACE_KHR_EXT, \
289 __builtin_choose_expr ( \
290 __builtin_types_compatible_p (__typeof (o), struct wsi_swapchain*), \
291 VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT, \
292 __builtin_choose_expr ( \
293 __builtin_types_compatible_p (__typeof (o), struct anv_debug_callback*), \
294 VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_CALLBACK_EXT_EXT, \
295 __builtin_choose_expr ( \
296 __builtin_types_compatible_p (__typeof (o), void*), \
297 VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, \
298 /* The void expression results in a compile-time error \
299 when assigning the result to something. */ \
300 (void)0)))))))))))))))))))))))))))))))
302 /* Whenever we generate an error, pass it through this function. Useful for
303 * debugging, where we can break on it. Only call at error site, not when
304 * propagating errors. Might be useful to plug in a stack trace here.
307 VkResult
__vk_errorf(struct anv_instance
*instance
, const void *object
,
308 VkDebugReportObjectTypeEXT type
, VkResult error
,
309 const char *file
, int line
, const char *format
, ...);
312 #define vk_error(error) __vk_errorf(NULL, NULL,\
313 VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,\
314 error, __FILE__, __LINE__, NULL);
315 #define vk_errorf(instance, obj, error, format, ...)\
316 __vk_errorf(instance, obj, REPORT_OBJECT_TYPE(obj), error,\
317 __FILE__, __LINE__, format, ## __VA_ARGS__);
319 #define vk_error(error) error
320 #define vk_errorf(instance, obj, error, format, ...) error
324 * Warn on ignored extension structs.
326 * The Vulkan spec requires us to ignore unsupported or unknown structs in
327 * a pNext chain. In debug mode, emitting warnings for ignored structs may
328 * help us discover structs that we should not have ignored.
331 * From the Vulkan 1.0.38 spec:
333 * Any component of the implementation (the loader, any enabled layers,
334 * and drivers) must skip over, without processing (other than reading the
335 * sType and pNext members) any chained structures with sType values not
336 * defined by extensions supported by that component.
338 #define anv_debug_ignored_stype(sType) \
339 intel_logd("%s: ignored VkStructureType %u\n", __func__, (sType))
341 void __anv_perf_warn(struct anv_instance
*instance
, const void *object
,
342 VkDebugReportObjectTypeEXT type
, const char *file
,
343 int line
, const char *format
, ...)
344 anv_printflike(6, 7);
345 void anv_loge(const char *format
, ...) anv_printflike(1, 2);
346 void anv_loge_v(const char *format
, va_list va
);
348 void anv_debug_report(struct anv_instance
*instance
,
349 VkDebugReportFlagsEXT flags
,
350 VkDebugReportObjectTypeEXT object_type
,
354 const char* pLayerPrefix
,
355 const char *pMessage
);
358 * Print a FINISHME message, including its source location.
360 #define anv_finishme(format, ...) \
362 static bool reported = false; \
364 intel_logw("%s:%d: FINISHME: " format, __FILE__, __LINE__, \
371 * Print a perf warning message. Set INTEL_DEBUG=perf to see these.
373 #define anv_perf_warn(instance, obj, format, ...) \
375 static bool reported = false; \
376 if (!reported && unlikely(INTEL_DEBUG & DEBUG_PERF)) { \
377 __anv_perf_warn(instance, obj, REPORT_OBJECT_TYPE(obj), __FILE__, __LINE__,\
378 format, ##__VA_ARGS__); \
383 /* A non-fatal assert. Useful for debugging. */
385 #define anv_assert(x) ({ \
386 if (unlikely(!(x))) \
387 intel_loge("%s:%d ASSERT: %s", __FILE__, __LINE__, #x); \
390 #define anv_assert(x)
393 /* A multi-pointer allocator
395 * When copying data structures from the user (such as a render pass), it's
396 * common to need to allocate data for a bunch of different things. Instead
397 * of doing several allocations and having to handle all of the error checking
398 * that entails, it can be easier to do a single allocation. This struct
399 * helps facilitate that. The intended usage looks like this:
402 * anv_multialloc_add(&ma, &main_ptr, 1);
403 * anv_multialloc_add(&ma, &substruct1, substruct1Count);
404 * anv_multialloc_add(&ma, &substruct2, substruct2Count);
406 * if (!anv_multialloc_alloc(&ma, pAllocator, VK_ALLOCATION_SCOPE_FOO))
407 * return vk_error(VK_ERROR_OUT_OF_HOST_MEORY);
409 struct anv_multialloc
{
417 #define ANV_MULTIALLOC_INIT \
418 ((struct anv_multialloc) { 0, })
420 #define ANV_MULTIALLOC(_name) \
421 struct anv_multialloc _name = ANV_MULTIALLOC_INIT
423 __attribute__((always_inline
))
425 _anv_multialloc_add(struct anv_multialloc
*ma
,
426 void **ptr
, size_t size
, size_t align
)
428 size_t offset
= align_u64(ma
->size
, align
);
429 ma
->size
= offset
+ size
;
430 ma
->align
= MAX2(ma
->align
, align
);
432 /* Store the offset in the pointer. */
433 *ptr
= (void *)(uintptr_t)offset
;
435 assert(ma
->ptr_count
< ARRAY_SIZE(ma
->ptrs
));
436 ma
->ptrs
[ma
->ptr_count
++] = ptr
;
439 #define anv_multialloc_add_size(_ma, _ptr, _size) \
440 _anv_multialloc_add((_ma), (void **)(_ptr), (_size), __alignof__(**(_ptr)))
442 #define anv_multialloc_add(_ma, _ptr, _count) \
443 anv_multialloc_add_size(_ma, _ptr, (_count) * sizeof(**(_ptr)));
445 __attribute__((always_inline
))
447 anv_multialloc_alloc(struct anv_multialloc
*ma
,
448 const VkAllocationCallbacks
*alloc
,
449 VkSystemAllocationScope scope
)
451 void *ptr
= vk_alloc(alloc
, ma
->size
, ma
->align
, scope
);
455 /* Fill out each of the pointers with their final value.
457 * for (uint32_t i = 0; i < ma->ptr_count; i++)
458 * *ma->ptrs[i] = ptr + (uintptr_t)*ma->ptrs[i];
460 * Unfortunately, even though ma->ptr_count is basically guaranteed to be a
461 * constant, GCC is incapable of figuring this out and unrolling the loop
462 * so we have to give it a little help.
464 STATIC_ASSERT(ARRAY_SIZE(ma
->ptrs
) == 8);
465 #define _ANV_MULTIALLOC_UPDATE_POINTER(_i) \
466 if ((_i) < ma->ptr_count) \
467 *ma->ptrs[_i] = ptr + (uintptr_t)*ma->ptrs[_i]
468 _ANV_MULTIALLOC_UPDATE_POINTER(0);
469 _ANV_MULTIALLOC_UPDATE_POINTER(1);
470 _ANV_MULTIALLOC_UPDATE_POINTER(2);
471 _ANV_MULTIALLOC_UPDATE_POINTER(3);
472 _ANV_MULTIALLOC_UPDATE_POINTER(4);
473 _ANV_MULTIALLOC_UPDATE_POINTER(5);
474 _ANV_MULTIALLOC_UPDATE_POINTER(6);
475 _ANV_MULTIALLOC_UPDATE_POINTER(7);
476 #undef _ANV_MULTIALLOC_UPDATE_POINTER
481 __attribute__((always_inline
))
483 anv_multialloc_alloc2(struct anv_multialloc
*ma
,
484 const VkAllocationCallbacks
*parent_alloc
,
485 const VkAllocationCallbacks
*alloc
,
486 VkSystemAllocationScope scope
)
488 return anv_multialloc_alloc(ma
, alloc
? alloc
: parent_alloc
, scope
);
494 /* Index into the current validation list. This is used by the
495 * validation list building alrogithm to track which buffers are already
496 * in the validation list so that we can ensure uniqueness.
500 /* Last known offset. This value is provided by the kernel when we
501 * execbuf and is used as the presumed offset for the next bunch of
509 /** Flags to pass to the kernel through drm_i915_exec_object2::flags */
514 anv_bo_init(struct anv_bo
*bo
, uint32_t gem_handle
, uint64_t size
)
516 bo
->gem_handle
= gem_handle
;
524 /* Represents a lock-free linked list of "free" things. This is used by
525 * both the block pool and the state pools. Unfortunately, in order to
526 * solve the ABA problem, we can't use a single uint32_t head.
528 union anv_free_list
{
532 /* A simple count that is incremented every time the head changes. */
538 #define ANV_FREE_LIST_EMPTY ((union anv_free_list) { { 1, 0 } })
540 struct anv_block_state
{
550 struct anv_block_pool
{
551 struct anv_device
*device
;
555 /* The offset from the start of the bo to the "center" of the block
556 * pool. Pointers to allocated blocks are given by
557 * bo.map + center_bo_offset + offsets.
559 uint32_t center_bo_offset
;
561 /* Current memory map of the block pool. This pointer may or may not
562 * point to the actual beginning of the block pool memory. If
563 * anv_block_pool_alloc_back has ever been called, then this pointer
564 * will point to the "center" position of the buffer and all offsets
565 * (negative or positive) given out by the block pool alloc functions
566 * will be valid relative to this pointer.
568 * In particular, map == bo.map + center_offset
574 * Array of mmaps and gem handles owned by the block pool, reclaimed when
575 * the block pool is destroyed.
577 struct u_vector mmap_cleanups
;
579 struct anv_block_state state
;
581 struct anv_block_state back_state
;
584 /* Block pools are backed by a fixed-size 1GB memfd */
585 #define BLOCK_POOL_MEMFD_SIZE (1ul << 30)
587 /* The center of the block pool is also the middle of the memfd. This may
588 * change in the future if we decide differently for some reason.
590 #define BLOCK_POOL_MEMFD_CENTER (BLOCK_POOL_MEMFD_SIZE / 2)
592 static inline uint32_t
593 anv_block_pool_size(struct anv_block_pool
*pool
)
595 return pool
->state
.end
+ pool
->back_state
.end
;
604 #define ANV_STATE_NULL ((struct anv_state) { .alloc_size = 0 })
606 struct anv_fixed_size_state_pool
{
607 union anv_free_list free_list
;
608 struct anv_block_state block
;
611 #define ANV_MIN_STATE_SIZE_LOG2 6
612 #define ANV_MAX_STATE_SIZE_LOG2 20
614 #define ANV_STATE_BUCKETS (ANV_MAX_STATE_SIZE_LOG2 - ANV_MIN_STATE_SIZE_LOG2 + 1)
616 struct anv_state_pool
{
617 struct anv_block_pool block_pool
;
619 /* The size of blocks which will be allocated from the block pool */
622 /** Free list for "back" allocations */
623 union anv_free_list back_alloc_free_list
;
625 struct anv_fixed_size_state_pool buckets
[ANV_STATE_BUCKETS
];
628 struct anv_state_stream_block
;
630 struct anv_state_stream
{
631 struct anv_state_pool
*state_pool
;
633 /* The size of blocks to allocate from the state pool */
636 /* Current block we're allocating from */
637 struct anv_state block
;
639 /* Offset into the current block at which to allocate the next state */
642 /* List of all blocks allocated from this pool */
643 struct anv_state_stream_block
*block_list
;
646 /* The block_pool functions exported for testing only. The block pool should
647 * only be used via a state pool (see below).
649 VkResult
anv_block_pool_init(struct anv_block_pool
*pool
,
650 struct anv_device
*device
,
651 uint32_t initial_size
);
652 void anv_block_pool_finish(struct anv_block_pool
*pool
);
653 int32_t anv_block_pool_alloc(struct anv_block_pool
*pool
,
654 uint32_t block_size
);
655 int32_t anv_block_pool_alloc_back(struct anv_block_pool
*pool
,
656 uint32_t block_size
);
658 VkResult
anv_state_pool_init(struct anv_state_pool
*pool
,
659 struct anv_device
*device
,
660 uint32_t block_size
);
661 void anv_state_pool_finish(struct anv_state_pool
*pool
);
662 struct anv_state
anv_state_pool_alloc(struct anv_state_pool
*pool
,
663 uint32_t state_size
, uint32_t alignment
);
664 struct anv_state
anv_state_pool_alloc_back(struct anv_state_pool
*pool
);
665 void anv_state_pool_free(struct anv_state_pool
*pool
, struct anv_state state
);
666 void anv_state_stream_init(struct anv_state_stream
*stream
,
667 struct anv_state_pool
*state_pool
,
668 uint32_t block_size
);
669 void anv_state_stream_finish(struct anv_state_stream
*stream
);
670 struct anv_state
anv_state_stream_alloc(struct anv_state_stream
*stream
,
671 uint32_t size
, uint32_t alignment
);
674 * Implements a pool of re-usable BOs. The interface is identical to that
675 * of block_pool except that each block is its own BO.
678 struct anv_device
*device
;
683 void anv_bo_pool_init(struct anv_bo_pool
*pool
, struct anv_device
*device
);
684 void anv_bo_pool_finish(struct anv_bo_pool
*pool
);
685 VkResult
anv_bo_pool_alloc(struct anv_bo_pool
*pool
, struct anv_bo
*bo
,
687 void anv_bo_pool_free(struct anv_bo_pool
*pool
, const struct anv_bo
*bo
);
689 struct anv_scratch_bo
{
694 struct anv_scratch_pool
{
695 /* Indexed by Per-Thread Scratch Space number (the hardware value) and stage */
696 struct anv_scratch_bo bos
[16][MESA_SHADER_STAGES
];
699 void anv_scratch_pool_init(struct anv_device
*device
,
700 struct anv_scratch_pool
*pool
);
701 void anv_scratch_pool_finish(struct anv_device
*device
,
702 struct anv_scratch_pool
*pool
);
703 struct anv_bo
*anv_scratch_pool_alloc(struct anv_device
*device
,
704 struct anv_scratch_pool
*pool
,
705 gl_shader_stage stage
,
706 unsigned per_thread_scratch
);
708 /** Implements a BO cache that ensures a 1-1 mapping of GEM BOs to anv_bos */
709 struct anv_bo_cache
{
710 struct hash_table
*bo_map
;
711 pthread_mutex_t mutex
;
714 VkResult
anv_bo_cache_init(struct anv_bo_cache
*cache
);
715 void anv_bo_cache_finish(struct anv_bo_cache
*cache
);
716 VkResult
anv_bo_cache_alloc(struct anv_device
*device
,
717 struct anv_bo_cache
*cache
,
718 uint64_t size
, struct anv_bo
**bo
);
719 VkResult
anv_bo_cache_import(struct anv_device
*device
,
720 struct anv_bo_cache
*cache
,
721 int fd
, uint64_t size
, struct anv_bo
**bo
);
722 VkResult
anv_bo_cache_export(struct anv_device
*device
,
723 struct anv_bo_cache
*cache
,
724 struct anv_bo
*bo_in
, int *fd_out
);
725 void anv_bo_cache_release(struct anv_device
*device
,
726 struct anv_bo_cache
*cache
,
729 struct anv_memory_type
{
730 /* Standard bits passed on to the client */
731 VkMemoryPropertyFlags propertyFlags
;
734 /* Driver-internal book-keeping */
735 VkBufferUsageFlags valid_buffer_usage
;
738 struct anv_memory_heap
{
739 /* Standard bits passed on to the client */
741 VkMemoryHeapFlags flags
;
743 /* Driver-internal book-keeping */
744 bool supports_48bit_addresses
;
747 struct anv_physical_device
{
748 VK_LOADER_DATA _loader_data
;
750 struct anv_instance
* instance
;
754 struct gen_device_info info
;
755 /** Amount of "GPU memory" we want to advertise
757 * Clearly, this value is bogus since Intel is a UMA architecture. On
758 * gen7 platforms, we are limited by GTT size unless we want to implement
759 * fine-grained tracking and GTT splitting. On Broadwell and above we are
760 * practically unlimited. However, we will never report more than 3/4 of
761 * the total system ram to try and avoid running out of RAM.
763 bool supports_48bit_addresses
;
764 struct brw_compiler
* compiler
;
765 struct isl_device isl_dev
;
766 int cmd_parser_version
;
770 bool has_syncobj_wait
;
773 uint32_t subslice_total
;
777 struct anv_memory_type types
[VK_MAX_MEMORY_TYPES
];
779 struct anv_memory_heap heaps
[VK_MAX_MEMORY_HEAPS
];
782 uint8_t pipeline_cache_uuid
[VK_UUID_SIZE
];
783 uint8_t driver_uuid
[VK_UUID_SIZE
];
784 uint8_t device_uuid
[VK_UUID_SIZE
];
786 struct wsi_device wsi_device
;
790 struct anv_debug_report_callback
{
791 /* Link in the 'callbacks' list in anv_instance struct. */
792 struct list_head link
;
793 VkDebugReportFlagsEXT flags
;
794 PFN_vkDebugReportCallbackEXT callback
;
798 struct anv_instance
{
799 VK_LOADER_DATA _loader_data
;
801 VkAllocationCallbacks alloc
;
804 int physicalDeviceCount
;
805 struct anv_physical_device physicalDevice
;
807 /* VK_EXT_debug_report debug callbacks */
808 pthread_mutex_t callbacks_mutex
;
809 struct list_head callbacks
;
810 struct anv_debug_report_callback destroy_debug_cb
;
813 VkResult
anv_init_wsi(struct anv_physical_device
*physical_device
);
814 void anv_finish_wsi(struct anv_physical_device
*physical_device
);
816 bool anv_instance_extension_supported(const char *name
);
817 uint32_t anv_physical_device_api_version(struct anv_physical_device
*dev
);
818 bool anv_physical_device_extension_supported(struct anv_physical_device
*dev
,
822 VK_LOADER_DATA _loader_data
;
824 struct anv_device
* device
;
826 struct anv_state_pool
* pool
;
829 struct anv_pipeline_cache
{
830 struct anv_device
* device
;
831 pthread_mutex_t mutex
;
833 struct hash_table
* cache
;
836 struct anv_pipeline_bind_map
;
838 void anv_pipeline_cache_init(struct anv_pipeline_cache
*cache
,
839 struct anv_device
*device
,
841 void anv_pipeline_cache_finish(struct anv_pipeline_cache
*cache
);
843 struct anv_shader_bin
*
844 anv_pipeline_cache_search(struct anv_pipeline_cache
*cache
,
845 const void *key
, uint32_t key_size
);
846 struct anv_shader_bin
*
847 anv_pipeline_cache_upload_kernel(struct anv_pipeline_cache
*cache
,
848 const void *key_data
, uint32_t key_size
,
849 const void *kernel_data
, uint32_t kernel_size
,
850 const struct brw_stage_prog_data
*prog_data
,
851 uint32_t prog_data_size
,
852 const struct anv_pipeline_bind_map
*bind_map
);
855 VK_LOADER_DATA _loader_data
;
857 VkAllocationCallbacks alloc
;
859 struct anv_instance
* instance
;
861 struct gen_device_info info
;
862 struct isl_device isl_dev
;
865 bool can_chain_batches
;
866 bool robust_buffer_access
;
868 struct anv_bo_pool batch_bo_pool
;
870 struct anv_bo_cache bo_cache
;
872 struct anv_state_pool dynamic_state_pool
;
873 struct anv_state_pool instruction_state_pool
;
874 struct anv_state_pool surface_state_pool
;
876 struct anv_bo workaround_bo
;
877 struct anv_bo trivial_batch_bo
;
879 struct anv_pipeline_cache blorp_shader_cache
;
880 struct blorp_context blorp
;
882 struct anv_state border_colors
;
884 struct anv_queue queue
;
886 struct anv_scratch_pool scratch_pool
;
888 uint32_t default_mocs
;
890 pthread_mutex_t mutex
;
891 pthread_cond_t queue_submit
;
896 anv_state_flush(struct anv_device
*device
, struct anv_state state
)
898 if (device
->info
.has_llc
)
901 gen_flush_range(state
.map
, state
.alloc_size
);
904 void anv_device_init_blorp(struct anv_device
*device
);
905 void anv_device_finish_blorp(struct anv_device
*device
);
907 VkResult
anv_device_execbuf(struct anv_device
*device
,
908 struct drm_i915_gem_execbuffer2
*execbuf
,
909 struct anv_bo
**execbuf_bos
);
910 VkResult
anv_device_query_status(struct anv_device
*device
);
911 VkResult
anv_device_bo_busy(struct anv_device
*device
, struct anv_bo
*bo
);
912 VkResult
anv_device_wait(struct anv_device
*device
, struct anv_bo
*bo
,
915 void* anv_gem_mmap(struct anv_device
*device
,
916 uint32_t gem_handle
, uint64_t offset
, uint64_t size
, uint32_t flags
);
917 void anv_gem_munmap(void *p
, uint64_t size
);
918 uint32_t anv_gem_create(struct anv_device
*device
, uint64_t size
);
919 void anv_gem_close(struct anv_device
*device
, uint32_t gem_handle
);
920 uint32_t anv_gem_userptr(struct anv_device
*device
, void *mem
, size_t size
);
921 int anv_gem_busy(struct anv_device
*device
, uint32_t gem_handle
);
922 int anv_gem_wait(struct anv_device
*device
, uint32_t gem_handle
, int64_t *timeout_ns
);
923 int anv_gem_execbuffer(struct anv_device
*device
,
924 struct drm_i915_gem_execbuffer2
*execbuf
);
925 int anv_gem_set_tiling(struct anv_device
*device
, uint32_t gem_handle
,
926 uint32_t stride
, uint32_t tiling
);
927 int anv_gem_create_context(struct anv_device
*device
);
928 int anv_gem_destroy_context(struct anv_device
*device
, int context
);
929 int anv_gem_get_context_param(int fd
, int context
, uint32_t param
,
931 int anv_gem_get_param(int fd
, uint32_t param
);
932 bool anv_gem_get_bit6_swizzle(int fd
, uint32_t tiling
);
933 int anv_gem_get_aperture(int fd
, uint64_t *size
);
934 bool anv_gem_supports_48b_addresses(int fd
);
935 int anv_gem_gpu_get_reset_stats(struct anv_device
*device
,
936 uint32_t *active
, uint32_t *pending
);
937 int anv_gem_handle_to_fd(struct anv_device
*device
, uint32_t gem_handle
);
938 uint32_t anv_gem_fd_to_handle(struct anv_device
*device
, int fd
);
939 int anv_gem_set_caching(struct anv_device
*device
, uint32_t gem_handle
, uint32_t caching
);
940 int anv_gem_set_domain(struct anv_device
*device
, uint32_t gem_handle
,
941 uint32_t read_domains
, uint32_t write_domain
);
942 int anv_gem_sync_file_merge(struct anv_device
*device
, int fd1
, int fd2
);
943 uint32_t anv_gem_syncobj_create(struct anv_device
*device
, uint32_t flags
);
944 void anv_gem_syncobj_destroy(struct anv_device
*device
, uint32_t handle
);
945 int anv_gem_syncobj_handle_to_fd(struct anv_device
*device
, uint32_t handle
);
946 uint32_t anv_gem_syncobj_fd_to_handle(struct anv_device
*device
, int fd
);
947 int anv_gem_syncobj_export_sync_file(struct anv_device
*device
,
949 int anv_gem_syncobj_import_sync_file(struct anv_device
*device
,
950 uint32_t handle
, int fd
);
951 void anv_gem_syncobj_reset(struct anv_device
*device
, uint32_t handle
);
952 bool anv_gem_supports_syncobj_wait(int fd
);
953 int anv_gem_syncobj_wait(struct anv_device
*device
,
954 uint32_t *handles
, uint32_t num_handles
,
955 int64_t abs_timeout_ns
, bool wait_all
);
957 VkResult
anv_bo_init_new(struct anv_bo
*bo
, struct anv_device
*device
, uint64_t size
);
959 struct anv_reloc_list
{
961 uint32_t array_length
;
962 struct drm_i915_gem_relocation_entry
* relocs
;
963 struct anv_bo
** reloc_bos
;
966 VkResult
anv_reloc_list_init(struct anv_reloc_list
*list
,
967 const VkAllocationCallbacks
*alloc
);
968 void anv_reloc_list_finish(struct anv_reloc_list
*list
,
969 const VkAllocationCallbacks
*alloc
);
971 VkResult
anv_reloc_list_add(struct anv_reloc_list
*list
,
972 const VkAllocationCallbacks
*alloc
,
973 uint32_t offset
, struct anv_bo
*target_bo
,
976 struct anv_batch_bo
{
977 /* Link in the anv_cmd_buffer.owned_batch_bos list */
978 struct list_head link
;
982 /* Bytes actually consumed in this batch BO */
985 struct anv_reloc_list relocs
;
989 const VkAllocationCallbacks
* alloc
;
995 struct anv_reloc_list
* relocs
;
997 /* This callback is called (with the associated user data) in the event
998 * that the batch runs out of space.
1000 VkResult (*extend_cb
)(struct anv_batch
*, void *);
1004 * Current error status of the command buffer. Used to track inconsistent
1005 * or incomplete command buffer states that are the consequence of run-time
1006 * errors such as out of memory scenarios. We want to track this in the
1007 * batch because the command buffer object is not visible to some parts
1013 void *anv_batch_emit_dwords(struct anv_batch
*batch
, int num_dwords
);
1014 void anv_batch_emit_batch(struct anv_batch
*batch
, struct anv_batch
*other
);
1015 uint64_t anv_batch_emit_reloc(struct anv_batch
*batch
,
1016 void *location
, struct anv_bo
*bo
, uint32_t offset
);
1017 VkResult
anv_device_submit_simple_batch(struct anv_device
*device
,
1018 struct anv_batch
*batch
);
1020 static inline VkResult
1021 anv_batch_set_error(struct anv_batch
*batch
, VkResult error
)
1023 assert(error
!= VK_SUCCESS
);
1024 if (batch
->status
== VK_SUCCESS
)
1025 batch
->status
= error
;
1026 return batch
->status
;
1030 anv_batch_has_error(struct anv_batch
*batch
)
1032 return batch
->status
!= VK_SUCCESS
;
1035 struct anv_address
{
1040 static inline uint64_t
1041 _anv_combine_address(struct anv_batch
*batch
, void *location
,
1042 const struct anv_address address
, uint32_t delta
)
1044 if (address
.bo
== NULL
) {
1045 return address
.offset
+ delta
;
1047 assert(batch
->start
<= location
&& location
< batch
->end
);
1049 return anv_batch_emit_reloc(batch
, location
, address
.bo
, address
.offset
+ delta
);
1053 #define __gen_address_type struct anv_address
1054 #define __gen_user_data struct anv_batch
1055 #define __gen_combine_address _anv_combine_address
1057 /* Wrapper macros needed to work around preprocessor argument issues. In
1058 * particular, arguments don't get pre-evaluated if they are concatenated.
1059 * This means that, if you pass GENX(3DSTATE_PS) into the emit macro, the
1060 * GENX macro won't get evaluated if the emit macro contains "cmd ## foo".
1061 * We can work around this easily enough with these helpers.
1063 #define __anv_cmd_length(cmd) cmd ## _length
1064 #define __anv_cmd_length_bias(cmd) cmd ## _length_bias
1065 #define __anv_cmd_header(cmd) cmd ## _header
1066 #define __anv_cmd_pack(cmd) cmd ## _pack
1067 #define __anv_reg_num(reg) reg ## _num
1069 #define anv_pack_struct(dst, struc, ...) do { \
1070 struct struc __template = { \
1073 __anv_cmd_pack(struc)(NULL, dst, &__template); \
1074 VG(VALGRIND_CHECK_MEM_IS_DEFINED(dst, __anv_cmd_length(struc) * 4)); \
1077 #define anv_batch_emitn(batch, n, cmd, ...) ({ \
1078 void *__dst = anv_batch_emit_dwords(batch, n); \
1080 struct cmd __template = { \
1081 __anv_cmd_header(cmd), \
1082 .DWordLength = n - __anv_cmd_length_bias(cmd), \
1085 __anv_cmd_pack(cmd)(batch, __dst, &__template); \
1090 #define anv_batch_emit_merge(batch, dwords0, dwords1) \
1094 STATIC_ASSERT(ARRAY_SIZE(dwords0) == ARRAY_SIZE(dwords1)); \
1095 dw = anv_batch_emit_dwords((batch), ARRAY_SIZE(dwords0)); \
1098 for (uint32_t i = 0; i < ARRAY_SIZE(dwords0); i++) \
1099 dw[i] = (dwords0)[i] | (dwords1)[i]; \
1100 VG(VALGRIND_CHECK_MEM_IS_DEFINED(dw, ARRAY_SIZE(dwords0) * 4));\
1103 #define anv_batch_emit(batch, cmd, name) \
1104 for (struct cmd name = { __anv_cmd_header(cmd) }, \
1105 *_dst = anv_batch_emit_dwords(batch, __anv_cmd_length(cmd)); \
1106 __builtin_expect(_dst != NULL, 1); \
1107 ({ __anv_cmd_pack(cmd)(batch, _dst, &name); \
1108 VG(VALGRIND_CHECK_MEM_IS_DEFINED(_dst, __anv_cmd_length(cmd) * 4)); \
1112 #define GEN7_MOCS (struct GEN7_MEMORY_OBJECT_CONTROL_STATE) { \
1113 .GraphicsDataTypeGFDT = 0, \
1114 .LLCCacheabilityControlLLCCC = 0, \
1115 .L3CacheabilityControlL3CC = 1, \
1118 #define GEN75_MOCS (struct GEN75_MEMORY_OBJECT_CONTROL_STATE) { \
1119 .LLCeLLCCacheabilityControlLLCCC = 0, \
1120 .L3CacheabilityControlL3CC = 1, \
1123 #define GEN8_MOCS (struct GEN8_MEMORY_OBJECT_CONTROL_STATE) { \
1124 .MemoryTypeLLCeLLCCacheabilityControl = WB, \
1125 .TargetCache = L3DefertoPATforLLCeLLCselection, \
1126 .AgeforQUADLRU = 0 \
1129 /* Skylake: MOCS is now an index into an array of 62 different caching
1130 * configurations programmed by the kernel.
1133 #define GEN9_MOCS (struct GEN9_MEMORY_OBJECT_CONTROL_STATE) { \
1134 /* TC=LLC/eLLC, LeCC=WB, LRUM=3, L3CC=WB */ \
1135 .IndextoMOCSTables = 2 \
1138 #define GEN9_MOCS_PTE { \
1139 /* TC=LLC/eLLC, LeCC=WB, LRUM=3, L3CC=WB */ \
1140 .IndextoMOCSTables = 1 \
1143 /* Cannonlake MOCS defines are duplicates of Skylake MOCS defines. */
1144 #define GEN10_MOCS (struct GEN10_MEMORY_OBJECT_CONTROL_STATE) { \
1145 /* TC=LLC/eLLC, LeCC=WB, LRUM=3, L3CC=WB */ \
1146 .IndextoMOCSTables = 2 \
1149 #define GEN10_MOCS_PTE { \
1150 /* TC=LLC/eLLC, LeCC=WB, LRUM=3, L3CC=WB */ \
1151 .IndextoMOCSTables = 1 \
1154 struct anv_device_memory
{
1156 struct anv_memory_type
* type
;
1157 VkDeviceSize map_size
;
1162 * Header for Vertex URB Entry (VUE)
1164 struct anv_vue_header
{
1166 uint32_t RTAIndex
; /* RenderTargetArrayIndex */
1167 uint32_t ViewportIndex
;
1171 struct anv_descriptor_set_binding_layout
{
1173 /* The type of the descriptors in this binding */
1174 VkDescriptorType type
;
1177 /* Number of array elements in this binding */
1178 uint16_t array_size
;
1180 /* Index into the flattend descriptor set */
1181 uint16_t descriptor_index
;
1183 /* Index into the dynamic state array for a dynamic buffer */
1184 int16_t dynamic_offset_index
;
1186 /* Index into the descriptor set buffer views */
1187 int16_t buffer_index
;
1190 /* Index into the binding table for the associated surface */
1191 int16_t surface_index
;
1193 /* Index into the sampler table for the associated sampler */
1194 int16_t sampler_index
;
1196 /* Index into the image table for the associated image */
1197 int16_t image_index
;
1198 } stage
[MESA_SHADER_STAGES
];
1200 /* Immutable samplers (or NULL if no immutable samplers) */
1201 struct anv_sampler
**immutable_samplers
;
1204 struct anv_descriptor_set_layout
{
1205 /* Number of bindings in this descriptor set */
1206 uint16_t binding_count
;
1208 /* Total size of the descriptor set with room for all array entries */
1211 /* Shader stages affected by this descriptor set */
1212 uint16_t shader_stages
;
1214 /* Number of buffers in this descriptor set */
1215 uint16_t buffer_count
;
1217 /* Number of dynamic offsets used by this descriptor set */
1218 uint16_t dynamic_offset_count
;
1220 /* Bindings in this descriptor set */
1221 struct anv_descriptor_set_binding_layout binding
[0];
1224 struct anv_descriptor
{
1225 VkDescriptorType type
;
1229 VkImageLayout layout
;
1230 struct anv_image_view
*image_view
;
1231 struct anv_sampler
*sampler
;
1235 struct anv_buffer
*buffer
;
1240 struct anv_buffer_view
*buffer_view
;
1244 struct anv_descriptor_set
{
1245 const struct anv_descriptor_set_layout
*layout
;
1247 uint32_t buffer_count
;
1248 struct anv_buffer_view
*buffer_views
;
1249 struct anv_descriptor descriptors
[0];
1252 struct anv_buffer_view
{
1253 enum isl_format format
; /**< VkBufferViewCreateInfo::format */
1255 uint32_t offset
; /**< Offset into bo. */
1256 uint64_t range
; /**< VkBufferViewCreateInfo::range */
1258 struct anv_state surface_state
;
1259 struct anv_state storage_surface_state
;
1260 struct anv_state writeonly_storage_surface_state
;
1262 struct brw_image_param storage_image_param
;
1265 struct anv_push_descriptor_set
{
1266 struct anv_descriptor_set set
;
1268 /* Put this field right behind anv_descriptor_set so it fills up the
1269 * descriptors[0] field. */
1270 struct anv_descriptor descriptors
[MAX_PUSH_DESCRIPTORS
];
1271 struct anv_buffer_view buffer_views
[MAX_PUSH_DESCRIPTORS
];
1274 struct anv_descriptor_pool
{
1279 struct anv_state_stream surface_state_stream
;
1280 void *surface_state_free_list
;
1285 enum anv_descriptor_template_entry_type
{
1286 ANV_DESCRIPTOR_TEMPLATE_ENTRY_TYPE_IMAGE
,
1287 ANV_DESCRIPTOR_TEMPLATE_ENTRY_TYPE_BUFFER
,
1288 ANV_DESCRIPTOR_TEMPLATE_ENTRY_TYPE_BUFFER_VIEW
1291 struct anv_descriptor_template_entry
{
1292 /* The type of descriptor in this entry */
1293 VkDescriptorType type
;
1295 /* Binding in the descriptor set */
1298 /* Offset at which to write into the descriptor set binding */
1299 uint32_t array_element
;
1301 /* Number of elements to write into the descriptor set binding */
1302 uint32_t array_count
;
1304 /* Offset into the user provided data */
1307 /* Stride between elements into the user provided data */
1311 struct anv_descriptor_update_template
{
1312 /* The descriptor set this template corresponds to. This value is only
1313 * valid if the template was created with the templateType
1314 * VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET_KHR.
1318 /* Number of entries in this template */
1319 uint32_t entry_count
;
1321 /* Entries of the template */
1322 struct anv_descriptor_template_entry entries
[0];
1326 anv_descriptor_set_binding_layout_get_hw_size(const struct anv_descriptor_set_binding_layout
*binding
);
1329 anv_descriptor_set_layout_size(const struct anv_descriptor_set_layout
*layout
);
1332 anv_descriptor_set_write_image_view(struct anv_descriptor_set
*set
,
1333 const struct gen_device_info
* const devinfo
,
1334 const VkDescriptorImageInfo
* const info
,
1335 VkDescriptorType type
,
1340 anv_descriptor_set_write_buffer_view(struct anv_descriptor_set
*set
,
1341 VkDescriptorType type
,
1342 struct anv_buffer_view
*buffer_view
,
1347 anv_descriptor_set_write_buffer(struct anv_descriptor_set
*set
,
1348 struct anv_device
*device
,
1349 struct anv_state_stream
*alloc_stream
,
1350 VkDescriptorType type
,
1351 struct anv_buffer
*buffer
,
1354 VkDeviceSize offset
,
1355 VkDeviceSize range
);
1358 anv_descriptor_set_write_template(struct anv_descriptor_set
*set
,
1359 struct anv_device
*device
,
1360 struct anv_state_stream
*alloc_stream
,
1361 const struct anv_descriptor_update_template
*template,
1365 anv_descriptor_set_create(struct anv_device
*device
,
1366 struct anv_descriptor_pool
*pool
,
1367 const struct anv_descriptor_set_layout
*layout
,
1368 struct anv_descriptor_set
**out_set
);
1371 anv_descriptor_set_destroy(struct anv_device
*device
,
1372 struct anv_descriptor_pool
*pool
,
1373 struct anv_descriptor_set
*set
);
1375 #define ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS UINT8_MAX
1377 struct anv_pipeline_binding
{
1378 /* The descriptor set this surface corresponds to. The special value of
1379 * ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS indicates that the offset refers
1380 * to a color attachment and not a regular descriptor.
1384 /* Binding in the descriptor set */
1387 /* Index in the binding */
1390 /* Plane in the binding index */
1393 /* Input attachment index (relative to the subpass) */
1394 uint8_t input_attachment_index
;
1396 /* For a storage image, whether it is write-only */
1400 struct anv_pipeline_layout
{
1402 struct anv_descriptor_set_layout
*layout
;
1403 uint32_t dynamic_offset_start
;
1409 bool has_dynamic_offsets
;
1410 } stage
[MESA_SHADER_STAGES
];
1412 unsigned char sha1
[20];
1416 struct anv_device
* device
;
1419 VkBufferUsageFlags usage
;
1421 /* Set when bound */
1423 VkDeviceSize offset
;
1426 static inline uint64_t
1427 anv_buffer_get_range(struct anv_buffer
*buffer
, uint64_t offset
, uint64_t range
)
1429 assert(offset
<= buffer
->size
);
1430 if (range
== VK_WHOLE_SIZE
) {
1431 return buffer
->size
- offset
;
1433 assert(range
<= buffer
->size
);
1438 enum anv_cmd_dirty_bits
{
1439 ANV_CMD_DIRTY_DYNAMIC_VIEWPORT
= 1 << 0, /* VK_DYNAMIC_STATE_VIEWPORT */
1440 ANV_CMD_DIRTY_DYNAMIC_SCISSOR
= 1 << 1, /* VK_DYNAMIC_STATE_SCISSOR */
1441 ANV_CMD_DIRTY_DYNAMIC_LINE_WIDTH
= 1 << 2, /* VK_DYNAMIC_STATE_LINE_WIDTH */
1442 ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS
= 1 << 3, /* VK_DYNAMIC_STATE_DEPTH_BIAS */
1443 ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS
= 1 << 4, /* VK_DYNAMIC_STATE_BLEND_CONSTANTS */
1444 ANV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS
= 1 << 5, /* VK_DYNAMIC_STATE_DEPTH_BOUNDS */
1445 ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK
= 1 << 6, /* VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK */
1446 ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK
= 1 << 7, /* VK_DYNAMIC_STATE_STENCIL_WRITE_MASK */
1447 ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE
= 1 << 8, /* VK_DYNAMIC_STATE_STENCIL_REFERENCE */
1448 ANV_CMD_DIRTY_DYNAMIC_ALL
= (1 << 9) - 1,
1449 ANV_CMD_DIRTY_PIPELINE
= 1 << 9,
1450 ANV_CMD_DIRTY_INDEX_BUFFER
= 1 << 10,
1451 ANV_CMD_DIRTY_RENDER_TARGETS
= 1 << 11,
1453 typedef uint32_t anv_cmd_dirty_mask_t
;
1455 enum anv_pipe_bits
{
1456 ANV_PIPE_DEPTH_CACHE_FLUSH_BIT
= (1 << 0),
1457 ANV_PIPE_STALL_AT_SCOREBOARD_BIT
= (1 << 1),
1458 ANV_PIPE_STATE_CACHE_INVALIDATE_BIT
= (1 << 2),
1459 ANV_PIPE_CONSTANT_CACHE_INVALIDATE_BIT
= (1 << 3),
1460 ANV_PIPE_VF_CACHE_INVALIDATE_BIT
= (1 << 4),
1461 ANV_PIPE_DATA_CACHE_FLUSH_BIT
= (1 << 5),
1462 ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT
= (1 << 10),
1463 ANV_PIPE_INSTRUCTION_CACHE_INVALIDATE_BIT
= (1 << 11),
1464 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT
= (1 << 12),
1465 ANV_PIPE_DEPTH_STALL_BIT
= (1 << 13),
1466 ANV_PIPE_CS_STALL_BIT
= (1 << 20),
1468 /* This bit does not exist directly in PIPE_CONTROL. Instead it means that
1469 * a flush has happened but not a CS stall. The next time we do any sort
1470 * of invalidation we need to insert a CS stall at that time. Otherwise,
1471 * we would have to CS stall on every flush which could be bad.
1473 ANV_PIPE_NEEDS_CS_STALL_BIT
= (1 << 21),
1476 #define ANV_PIPE_FLUSH_BITS ( \
1477 ANV_PIPE_DEPTH_CACHE_FLUSH_BIT | \
1478 ANV_PIPE_DATA_CACHE_FLUSH_BIT | \
1479 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT)
1481 #define ANV_PIPE_STALL_BITS ( \
1482 ANV_PIPE_STALL_AT_SCOREBOARD_BIT | \
1483 ANV_PIPE_DEPTH_STALL_BIT | \
1484 ANV_PIPE_CS_STALL_BIT)
1486 #define ANV_PIPE_INVALIDATE_BITS ( \
1487 ANV_PIPE_STATE_CACHE_INVALIDATE_BIT | \
1488 ANV_PIPE_CONSTANT_CACHE_INVALIDATE_BIT | \
1489 ANV_PIPE_VF_CACHE_INVALIDATE_BIT | \
1490 ANV_PIPE_DATA_CACHE_FLUSH_BIT | \
1491 ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT | \
1492 ANV_PIPE_INSTRUCTION_CACHE_INVALIDATE_BIT)
1494 static inline enum anv_pipe_bits
1495 anv_pipe_flush_bits_for_access_flags(VkAccessFlags flags
)
1497 enum anv_pipe_bits pipe_bits
= 0;
1500 for_each_bit(b
, flags
) {
1501 switch ((VkAccessFlagBits
)(1 << b
)) {
1502 case VK_ACCESS_SHADER_WRITE_BIT
:
1503 pipe_bits
|= ANV_PIPE_DATA_CACHE_FLUSH_BIT
;
1505 case VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT
:
1506 pipe_bits
|= ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT
;
1508 case VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT
:
1509 pipe_bits
|= ANV_PIPE_DEPTH_CACHE_FLUSH_BIT
;
1511 case VK_ACCESS_TRANSFER_WRITE_BIT
:
1512 pipe_bits
|= ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT
;
1513 pipe_bits
|= ANV_PIPE_DEPTH_CACHE_FLUSH_BIT
;
1516 break; /* Nothing to do */
1523 static inline enum anv_pipe_bits
1524 anv_pipe_invalidate_bits_for_access_flags(VkAccessFlags flags
)
1526 enum anv_pipe_bits pipe_bits
= 0;
1529 for_each_bit(b
, flags
) {
1530 switch ((VkAccessFlagBits
)(1 << b
)) {
1531 case VK_ACCESS_INDIRECT_COMMAND_READ_BIT
:
1532 case VK_ACCESS_INDEX_READ_BIT
:
1533 case VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT
:
1534 pipe_bits
|= ANV_PIPE_VF_CACHE_INVALIDATE_BIT
;
1536 case VK_ACCESS_UNIFORM_READ_BIT
:
1537 pipe_bits
|= ANV_PIPE_CONSTANT_CACHE_INVALIDATE_BIT
;
1538 pipe_bits
|= ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT
;
1540 case VK_ACCESS_SHADER_READ_BIT
:
1541 case VK_ACCESS_INPUT_ATTACHMENT_READ_BIT
:
1542 case VK_ACCESS_TRANSFER_READ_BIT
:
1543 pipe_bits
|= ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT
;
1546 break; /* Nothing to do */
1553 #define VK_IMAGE_ASPECT_ANY_COLOR_BIT ( \
1554 VK_IMAGE_ASPECT_COLOR_BIT | \
1555 VK_IMAGE_ASPECT_PLANE_0_BIT_KHR | \
1556 VK_IMAGE_ASPECT_PLANE_1_BIT_KHR | \
1557 VK_IMAGE_ASPECT_PLANE_2_BIT_KHR)
1558 #define VK_IMAGE_ASPECT_PLANES_BITS ( \
1559 VK_IMAGE_ASPECT_PLANE_0_BIT_KHR | \
1560 VK_IMAGE_ASPECT_PLANE_1_BIT_KHR | \
1561 VK_IMAGE_ASPECT_PLANE_2_BIT_KHR)
1563 struct anv_vertex_binding
{
1564 struct anv_buffer
* buffer
;
1565 VkDeviceSize offset
;
1568 #define ANV_PARAM_PUSH(offset) ((1 << 16) | (uint32_t)(offset))
1569 #define ANV_PARAM_PUSH_OFFSET(param) ((param) & 0xffff)
1571 struct anv_push_constants
{
1572 /* Current allocated size of this push constants data structure.
1573 * Because a decent chunk of it may not be used (images on SKL, for
1574 * instance), we won't actually allocate the entire structure up-front.
1578 /* Push constant data provided by the client through vkPushConstants */
1579 uint8_t client_data
[MAX_PUSH_CONSTANTS_SIZE
];
1581 /* Image data for image_load_store on pre-SKL */
1582 struct brw_image_param images
[MAX_IMAGES
];
1585 struct anv_dynamic_state
{
1588 VkViewport viewports
[MAX_VIEWPORTS
];
1593 VkRect2D scissors
[MAX_SCISSORS
];
1604 float blend_constants
[4];
1614 } stencil_compare_mask
;
1619 } stencil_write_mask
;
1624 } stencil_reference
;
1627 extern const struct anv_dynamic_state default_dynamic_state
;
1629 void anv_dynamic_state_copy(struct anv_dynamic_state
*dest
,
1630 const struct anv_dynamic_state
*src
,
1631 uint32_t copy_mask
);
1633 struct anv_surface_state
{
1634 struct anv_state state
;
1635 /** Address of the surface referred to by this state
1637 * This address is relative to the start of the BO.
1640 /* Address of the aux surface, if any
1642 * This field is 0 if and only if no aux surface exists.
1644 * This address is relative to the start of the BO. On gen7, the bottom 12
1645 * bits of this address include extra aux information.
1647 uint64_t aux_address
;
1651 * Attachment state when recording a renderpass instance.
1653 * The clear value is valid only if there exists a pending clear.
1655 struct anv_attachment_state
{
1656 enum isl_aux_usage aux_usage
;
1657 enum isl_aux_usage input_aux_usage
;
1658 struct anv_surface_state color
;
1659 struct anv_surface_state input
;
1661 VkImageLayout current_layout
;
1662 VkImageAspectFlags pending_clear_aspects
;
1664 VkClearValue clear_value
;
1665 bool clear_color_is_zero_one
;
1666 bool clear_color_is_zero
;
1669 /** State required while building cmd buffer */
1670 struct anv_cmd_state
{
1671 /* PIPELINE_SELECT.PipelineSelection */
1672 uint32_t current_pipeline
;
1673 const struct gen_l3_config
* current_l3_config
;
1675 anv_cmd_dirty_mask_t dirty
;
1676 anv_cmd_dirty_mask_t compute_dirty
;
1677 enum anv_pipe_bits pending_pipe_bits
;
1678 uint32_t num_workgroups_offset
;
1679 struct anv_bo
*num_workgroups_bo
;
1680 VkShaderStageFlags descriptors_dirty
;
1681 VkShaderStageFlags push_constants_dirty
;
1682 uint32_t scratch_size
;
1683 struct anv_pipeline
* pipeline
;
1684 struct anv_pipeline
* compute_pipeline
;
1685 struct anv_framebuffer
* framebuffer
;
1686 struct anv_render_pass
* pass
;
1687 struct anv_subpass
* subpass
;
1688 VkRect2D render_area
;
1689 uint32_t restart_index
;
1690 struct anv_vertex_binding vertex_bindings
[MAX_VBS
];
1691 struct anv_descriptor_set
* descriptors
[MAX_SETS
];
1692 uint32_t dynamic_offsets
[MAX_DYNAMIC_BUFFERS
];
1693 VkShaderStageFlags push_constant_stages
;
1694 struct anv_push_constants
* push_constants
[MESA_SHADER_STAGES
];
1695 struct anv_state binding_tables
[MESA_SHADER_STAGES
];
1696 struct anv_state samplers
[MESA_SHADER_STAGES
];
1697 struct anv_dynamic_state dynamic
;
1700 struct anv_push_descriptor_set
* push_descriptors
[MAX_SETS
];
1703 * Whether or not the gen8 PMA fix is enabled. We ensure that, at the top
1704 * of any command buffer it is disabled by disabling it in EndCommandBuffer
1705 * and before invoking the secondary in ExecuteCommands.
1707 bool pma_fix_enabled
;
1710 * Whether or not we know for certain that HiZ is enabled for the current
1711 * subpass. If, for whatever reason, we are unsure as to whether HiZ is
1712 * enabled or not, this will be false.
1717 * Array length is anv_cmd_state::pass::attachment_count. Array content is
1718 * valid only when recording a render pass instance.
1720 struct anv_attachment_state
* attachments
;
1723 * Surface states for color render targets. These are stored in a single
1724 * flat array. For depth-stencil attachments, the surface state is simply
1727 struct anv_state render_pass_states
;
1730 * A null surface state of the right size to match the framebuffer. This
1731 * is one of the states in render_pass_states.
1733 struct anv_state null_surface_state
;
1736 struct anv_buffer
* index_buffer
;
1737 uint32_t index_type
; /**< 3DSTATE_INDEX_BUFFER.IndexFormat */
1738 uint32_t index_offset
;
1742 struct anv_cmd_pool
{
1743 VkAllocationCallbacks alloc
;
1744 struct list_head cmd_buffers
;
1747 #define ANV_CMD_BUFFER_BATCH_SIZE 8192
1749 enum anv_cmd_buffer_exec_mode
{
1750 ANV_CMD_BUFFER_EXEC_MODE_PRIMARY
,
1751 ANV_CMD_BUFFER_EXEC_MODE_EMIT
,
1752 ANV_CMD_BUFFER_EXEC_MODE_GROW_AND_EMIT
,
1753 ANV_CMD_BUFFER_EXEC_MODE_CHAIN
,
1754 ANV_CMD_BUFFER_EXEC_MODE_COPY_AND_CHAIN
,
1757 struct anv_cmd_buffer
{
1758 VK_LOADER_DATA _loader_data
;
1760 struct anv_device
* device
;
1762 struct anv_cmd_pool
* pool
;
1763 struct list_head pool_link
;
1765 struct anv_batch batch
;
1767 /* Fields required for the actual chain of anv_batch_bo's.
1769 * These fields are initialized by anv_cmd_buffer_init_batch_bo_chain().
1771 struct list_head batch_bos
;
1772 enum anv_cmd_buffer_exec_mode exec_mode
;
1774 /* A vector of anv_batch_bo pointers for every batch or surface buffer
1775 * referenced by this command buffer
1777 * initialized by anv_cmd_buffer_init_batch_bo_chain()
1779 struct u_vector seen_bbos
;
1781 /* A vector of int32_t's for every block of binding tables.
1783 * initialized by anv_cmd_buffer_init_batch_bo_chain()
1785 struct u_vector bt_block_states
;
1788 struct anv_reloc_list surface_relocs
;
1789 /** Last seen surface state block pool center bo offset */
1790 uint32_t last_ss_pool_center
;
1792 /* Serial for tracking buffer completion */
1795 /* Stream objects for storing temporary data */
1796 struct anv_state_stream surface_state_stream
;
1797 struct anv_state_stream dynamic_state_stream
;
1799 VkCommandBufferUsageFlags usage_flags
;
1800 VkCommandBufferLevel level
;
1802 struct anv_cmd_state state
;
1805 VkResult
anv_cmd_buffer_init_batch_bo_chain(struct anv_cmd_buffer
*cmd_buffer
);
1806 void anv_cmd_buffer_fini_batch_bo_chain(struct anv_cmd_buffer
*cmd_buffer
);
1807 void anv_cmd_buffer_reset_batch_bo_chain(struct anv_cmd_buffer
*cmd_buffer
);
1808 void anv_cmd_buffer_end_batch_buffer(struct anv_cmd_buffer
*cmd_buffer
);
1809 void anv_cmd_buffer_add_secondary(struct anv_cmd_buffer
*primary
,
1810 struct anv_cmd_buffer
*secondary
);
1811 void anv_cmd_buffer_prepare_execbuf(struct anv_cmd_buffer
*cmd_buffer
);
1812 VkResult
anv_cmd_buffer_execbuf(struct anv_device
*device
,
1813 struct anv_cmd_buffer
*cmd_buffer
,
1814 const VkSemaphore
*in_semaphores
,
1815 uint32_t num_in_semaphores
,
1816 const VkSemaphore
*out_semaphores
,
1817 uint32_t num_out_semaphores
,
1820 VkResult
anv_cmd_buffer_reset(struct anv_cmd_buffer
*cmd_buffer
);
1823 anv_cmd_buffer_ensure_push_constants_size(struct anv_cmd_buffer
*cmd_buffer
,
1824 gl_shader_stage stage
, uint32_t size
);
1825 #define anv_cmd_buffer_ensure_push_constant_field(cmd_buffer, stage, field) \
1826 anv_cmd_buffer_ensure_push_constants_size(cmd_buffer, stage, \
1827 (offsetof(struct anv_push_constants, field) + \
1828 sizeof(cmd_buffer->state.push_constants[0]->field)))
1830 struct anv_state
anv_cmd_buffer_emit_dynamic(struct anv_cmd_buffer
*cmd_buffer
,
1831 const void *data
, uint32_t size
, uint32_t alignment
);
1832 struct anv_state
anv_cmd_buffer_merge_dynamic(struct anv_cmd_buffer
*cmd_buffer
,
1833 uint32_t *a
, uint32_t *b
,
1834 uint32_t dwords
, uint32_t alignment
);
1837 anv_cmd_buffer_surface_base_address(struct anv_cmd_buffer
*cmd_buffer
);
1839 anv_cmd_buffer_alloc_binding_table(struct anv_cmd_buffer
*cmd_buffer
,
1840 uint32_t entries
, uint32_t *state_offset
);
1842 anv_cmd_buffer_alloc_surface_state(struct anv_cmd_buffer
*cmd_buffer
);
1844 anv_cmd_buffer_alloc_dynamic_state(struct anv_cmd_buffer
*cmd_buffer
,
1845 uint32_t size
, uint32_t alignment
);
1848 anv_cmd_buffer_new_binding_table_block(struct anv_cmd_buffer
*cmd_buffer
);
1850 void gen8_cmd_buffer_emit_viewport(struct anv_cmd_buffer
*cmd_buffer
);
1851 void gen8_cmd_buffer_emit_depth_viewport(struct anv_cmd_buffer
*cmd_buffer
,
1852 bool depth_clamp_enable
);
1853 void gen7_cmd_buffer_emit_scissor(struct anv_cmd_buffer
*cmd_buffer
);
1855 void anv_cmd_buffer_setup_attachments(struct anv_cmd_buffer
*cmd_buffer
,
1856 struct anv_render_pass
*pass
,
1857 struct anv_framebuffer
*framebuffer
,
1858 const VkClearValue
*clear_values
);
1860 void anv_cmd_buffer_emit_state_base_address(struct anv_cmd_buffer
*cmd_buffer
);
1863 anv_cmd_buffer_push_constants(struct anv_cmd_buffer
*cmd_buffer
,
1864 gl_shader_stage stage
);
1866 anv_cmd_buffer_cs_push_constants(struct anv_cmd_buffer
*cmd_buffer
);
1868 void anv_cmd_buffer_clear_subpass(struct anv_cmd_buffer
*cmd_buffer
);
1869 void anv_cmd_buffer_resolve_subpass(struct anv_cmd_buffer
*cmd_buffer
);
1871 const struct anv_image_view
*
1872 anv_cmd_buffer_get_depth_stencil_view(const struct anv_cmd_buffer
*cmd_buffer
);
1875 anv_cmd_buffer_alloc_blorp_binding_table(struct anv_cmd_buffer
*cmd_buffer
,
1876 uint32_t num_entries
,
1877 uint32_t *state_offset
,
1878 struct anv_state
*bt_state
);
1880 void anv_cmd_buffer_dump(struct anv_cmd_buffer
*cmd_buffer
);
1882 enum anv_fence_type
{
1883 ANV_FENCE_TYPE_NONE
= 0,
1885 ANV_FENCE_TYPE_SYNCOBJ
,
1888 enum anv_bo_fence_state
{
1889 /** Indicates that this is a new (or newly reset fence) */
1890 ANV_BO_FENCE_STATE_RESET
,
1892 /** Indicates that this fence has been submitted to the GPU but is still
1893 * (as far as we know) in use by the GPU.
1895 ANV_BO_FENCE_STATE_SUBMITTED
,
1897 ANV_BO_FENCE_STATE_SIGNALED
,
1900 struct anv_fence_impl
{
1901 enum anv_fence_type type
;
1904 /** Fence implementation for BO fences
1906 * These fences use a BO and a set of CPU-tracked state flags. The BO
1907 * is added to the object list of the last execbuf call in a QueueSubmit
1908 * and is marked EXEC_WRITE. The state flags track when the BO has been
1909 * submitted to the kernel. We need to do this because Vulkan lets you
1910 * wait on a fence that has not yet been submitted and I915_GEM_BUSY
1911 * will say it's idle in this case.
1915 enum anv_bo_fence_state state
;
1918 /** DRM syncobj handle for syncobj-based fences */
1924 /* Permanent fence state. Every fence has some form of permanent state
1925 * (type != ANV_SEMAPHORE_TYPE_NONE). This may be a BO to fence on (for
1926 * cross-process fences) or it could just be a dummy for use internally.
1928 struct anv_fence_impl permanent
;
1930 /* Temporary fence state. A fence *may* have temporary state. That state
1931 * is added to the fence by an import operation and is reset back to
1932 * ANV_SEMAPHORE_TYPE_NONE when the fence is reset. A fence with temporary
1933 * state cannot be signaled because the fence must already be signaled
1934 * before the temporary state can be exported from the fence in the other
1935 * process and imported here.
1937 struct anv_fence_impl temporary
;
1942 struct anv_state state
;
1945 enum anv_semaphore_type
{
1946 ANV_SEMAPHORE_TYPE_NONE
= 0,
1947 ANV_SEMAPHORE_TYPE_DUMMY
,
1948 ANV_SEMAPHORE_TYPE_BO
,
1949 ANV_SEMAPHORE_TYPE_SYNC_FILE
,
1950 ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ
,
1953 struct anv_semaphore_impl
{
1954 enum anv_semaphore_type type
;
1957 /* A BO representing this semaphore when type == ANV_SEMAPHORE_TYPE_BO.
1958 * This BO will be added to the object list on any execbuf2 calls for
1959 * which this semaphore is used as a wait or signal fence. When used as
1960 * a signal fence, the EXEC_OBJECT_WRITE flag will be set.
1964 /* The sync file descriptor when type == ANV_SEMAPHORE_TYPE_SYNC_FILE.
1965 * If the semaphore is in the unsignaled state due to either just being
1966 * created or because it has been used for a wait, fd will be -1.
1970 /* Sync object handle when type == ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ.
1971 * Unlike GEM BOs, DRM sync objects aren't deduplicated by the kernel on
1972 * import so we don't need to bother with a userspace cache.
1978 struct anv_semaphore
{
1979 /* Permanent semaphore state. Every semaphore has some form of permanent
1980 * state (type != ANV_SEMAPHORE_TYPE_NONE). This may be a BO to fence on
1981 * (for cross-process semaphores0 or it could just be a dummy for use
1984 struct anv_semaphore_impl permanent
;
1986 /* Temporary semaphore state. A semaphore *may* have temporary state.
1987 * That state is added to the semaphore by an import operation and is reset
1988 * back to ANV_SEMAPHORE_TYPE_NONE when the semaphore is waited on. A
1989 * semaphore with temporary state cannot be signaled because the semaphore
1990 * must already be signaled before the temporary state can be exported from
1991 * the semaphore in the other process and imported here.
1993 struct anv_semaphore_impl temporary
;
1996 void anv_semaphore_reset_temporary(struct anv_device
*device
,
1997 struct anv_semaphore
*semaphore
);
1999 struct anv_shader_module
{
2000 unsigned char sha1
[20];
2005 static inline gl_shader_stage
2006 vk_to_mesa_shader_stage(VkShaderStageFlagBits vk_stage
)
2008 assert(__builtin_popcount(vk_stage
) == 1);
2009 return ffs(vk_stage
) - 1;
2012 static inline VkShaderStageFlagBits
2013 mesa_to_vk_shader_stage(gl_shader_stage mesa_stage
)
2015 return (1 << mesa_stage
);
2018 #define ANV_STAGE_MASK ((1 << MESA_SHADER_STAGES) - 1)
2020 #define anv_foreach_stage(stage, stage_bits) \
2021 for (gl_shader_stage stage, \
2022 __tmp = (gl_shader_stage)((stage_bits) & ANV_STAGE_MASK); \
2023 stage = __builtin_ffs(__tmp) - 1, __tmp; \
2024 __tmp &= ~(1 << (stage)))
2026 struct anv_pipeline_bind_map
{
2027 uint32_t surface_count
;
2028 uint32_t sampler_count
;
2029 uint32_t image_count
;
2031 struct anv_pipeline_binding
* surface_to_descriptor
;
2032 struct anv_pipeline_binding
* sampler_to_descriptor
;
2035 struct anv_shader_bin_key
{
2040 struct anv_shader_bin
{
2043 const struct anv_shader_bin_key
*key
;
2045 struct anv_state kernel
;
2046 uint32_t kernel_size
;
2048 const struct brw_stage_prog_data
*prog_data
;
2049 uint32_t prog_data_size
;
2051 struct anv_pipeline_bind_map bind_map
;
2054 struct anv_shader_bin
*
2055 anv_shader_bin_create(struct anv_device
*device
,
2056 const void *key
, uint32_t key_size
,
2057 const void *kernel
, uint32_t kernel_size
,
2058 const struct brw_stage_prog_data
*prog_data
,
2059 uint32_t prog_data_size
, const void *prog_data_param
,
2060 const struct anv_pipeline_bind_map
*bind_map
);
2063 anv_shader_bin_destroy(struct anv_device
*device
, struct anv_shader_bin
*shader
);
2066 anv_shader_bin_ref(struct anv_shader_bin
*shader
)
2068 assert(shader
&& shader
->ref_cnt
>= 1);
2069 p_atomic_inc(&shader
->ref_cnt
);
2073 anv_shader_bin_unref(struct anv_device
*device
, struct anv_shader_bin
*shader
)
2075 assert(shader
&& shader
->ref_cnt
>= 1);
2076 if (p_atomic_dec_zero(&shader
->ref_cnt
))
2077 anv_shader_bin_destroy(device
, shader
);
2080 struct anv_pipeline
{
2081 struct anv_device
* device
;
2082 struct anv_batch batch
;
2083 uint32_t batch_data
[512];
2084 struct anv_reloc_list batch_relocs
;
2085 uint32_t dynamic_state_mask
;
2086 struct anv_dynamic_state dynamic_state
;
2088 struct anv_subpass
* subpass
;
2089 struct anv_pipeline_layout
* layout
;
2091 bool needs_data_cache
;
2093 struct anv_shader_bin
* shaders
[MESA_SHADER_STAGES
];
2096 const struct gen_l3_config
* l3_config
;
2097 uint32_t total_size
;
2100 VkShaderStageFlags active_stages
;
2101 struct anv_state blend_state
;
2104 uint32_t binding_stride
[MAX_VBS
];
2105 bool instancing_enable
[MAX_VBS
];
2106 bool primitive_restart
;
2109 uint32_t cs_right_mask
;
2112 bool depth_test_enable
;
2113 bool writes_stencil
;
2114 bool stencil_test_enable
;
2115 bool depth_clamp_enable
;
2116 bool sample_shading_enable
;
2121 uint32_t depth_stencil_state
[3];
2127 uint32_t wm_depth_stencil
[3];
2131 uint32_t wm_depth_stencil
[4];
2134 uint32_t interface_descriptor_data
[8];
2138 anv_pipeline_has_stage(const struct anv_pipeline
*pipeline
,
2139 gl_shader_stage stage
)
2141 return (pipeline
->active_stages
& mesa_to_vk_shader_stage(stage
)) != 0;
2144 #define ANV_DECL_GET_PROG_DATA_FUNC(prefix, stage) \
2145 static inline const struct brw_##prefix##_prog_data * \
2146 get_##prefix##_prog_data(const struct anv_pipeline *pipeline) \
2148 if (anv_pipeline_has_stage(pipeline, stage)) { \
2149 return (const struct brw_##prefix##_prog_data *) \
2150 pipeline->shaders[stage]->prog_data; \
2156 ANV_DECL_GET_PROG_DATA_FUNC(vs
, MESA_SHADER_VERTEX
)
2157 ANV_DECL_GET_PROG_DATA_FUNC(tcs
, MESA_SHADER_TESS_CTRL
)
2158 ANV_DECL_GET_PROG_DATA_FUNC(tes
, MESA_SHADER_TESS_EVAL
)
2159 ANV_DECL_GET_PROG_DATA_FUNC(gs
, MESA_SHADER_GEOMETRY
)
2160 ANV_DECL_GET_PROG_DATA_FUNC(wm
, MESA_SHADER_FRAGMENT
)
2161 ANV_DECL_GET_PROG_DATA_FUNC(cs
, MESA_SHADER_COMPUTE
)
2163 static inline const struct brw_vue_prog_data
*
2164 anv_pipeline_get_last_vue_prog_data(const struct anv_pipeline
*pipeline
)
2166 if (anv_pipeline_has_stage(pipeline
, MESA_SHADER_GEOMETRY
))
2167 return &get_gs_prog_data(pipeline
)->base
;
2168 else if (anv_pipeline_has_stage(pipeline
, MESA_SHADER_TESS_EVAL
))
2169 return &get_tes_prog_data(pipeline
)->base
;
2171 return &get_vs_prog_data(pipeline
)->base
;
2175 anv_pipeline_init(struct anv_pipeline
*pipeline
, struct anv_device
*device
,
2176 struct anv_pipeline_cache
*cache
,
2177 const VkGraphicsPipelineCreateInfo
*pCreateInfo
,
2178 const VkAllocationCallbacks
*alloc
);
2181 anv_pipeline_compile_cs(struct anv_pipeline
*pipeline
,
2182 struct anv_pipeline_cache
*cache
,
2183 const VkComputePipelineCreateInfo
*info
,
2184 struct anv_shader_module
*module
,
2185 const char *entrypoint
,
2186 const VkSpecializationInfo
*spec_info
);
2188 struct anv_format_plane
{
2189 enum isl_format isl_format
:16;
2190 struct isl_swizzle swizzle
;
2192 /* Whether this plane contains chroma channels */
2195 /* For downscaling of YUV planes */
2196 uint8_t denominator_scales
[2];
2198 /* How to map sampled ycbcr planes to a single 4 component element. */
2199 struct isl_swizzle ycbcr_swizzle
;
2204 struct anv_format_plane planes
[3];
2209 static inline uint32_t
2210 anv_image_aspect_to_plane(VkImageAspectFlags image_aspects
,
2211 VkImageAspectFlags aspect_mask
)
2213 switch (aspect_mask
) {
2214 case VK_IMAGE_ASPECT_COLOR_BIT
:
2215 case VK_IMAGE_ASPECT_DEPTH_BIT
:
2216 case VK_IMAGE_ASPECT_PLANE_0_BIT_KHR
:
2218 case VK_IMAGE_ASPECT_STENCIL_BIT
:
2219 if ((image_aspects
& VK_IMAGE_ASPECT_DEPTH_BIT
) == 0)
2222 case VK_IMAGE_ASPECT_PLANE_1_BIT_KHR
:
2224 case VK_IMAGE_ASPECT_PLANE_2_BIT_KHR
:
2227 /* Purposefully assert with depth/stencil aspects. */
2228 unreachable("invalid image aspect");
2232 static inline uint32_t
2233 anv_image_aspect_get_planes(VkImageAspectFlags aspect_mask
)
2235 uint32_t planes
= 0;
2237 if (aspect_mask
& (VK_IMAGE_ASPECT_COLOR_BIT
|
2238 VK_IMAGE_ASPECT_DEPTH_BIT
|
2239 VK_IMAGE_ASPECT_STENCIL_BIT
|
2240 VK_IMAGE_ASPECT_PLANE_0_BIT_KHR
))
2242 if (aspect_mask
& VK_IMAGE_ASPECT_PLANE_1_BIT_KHR
)
2244 if (aspect_mask
& VK_IMAGE_ASPECT_PLANE_2_BIT_KHR
)
2250 static inline VkImageAspectFlags
2251 anv_plane_to_aspect(VkImageAspectFlags image_aspects
,
2254 if (image_aspects
& VK_IMAGE_ASPECT_ANY_COLOR_BIT
) {
2255 if (_mesa_bitcount(image_aspects
) > 1)
2256 return VK_IMAGE_ASPECT_PLANE_0_BIT_KHR
<< plane
;
2257 return VK_IMAGE_ASPECT_COLOR_BIT
;
2259 if (image_aspects
& VK_IMAGE_ASPECT_DEPTH_BIT
)
2260 return VK_IMAGE_ASPECT_DEPTH_BIT
<< plane
;
2261 assert(image_aspects
== VK_IMAGE_ASPECT_STENCIL_BIT
);
2262 return VK_IMAGE_ASPECT_STENCIL_BIT
;
2265 #define anv_foreach_image_aspect_bit(b, image, aspects) \
2266 for_each_bit(b, anv_image_expand_aspects(image, aspects))
2268 const struct anv_format
*
2269 anv_get_format(VkFormat format
);
2271 static inline uint32_t
2272 anv_get_format_planes(VkFormat vk_format
)
2274 const struct anv_format
*format
= anv_get_format(vk_format
);
2276 return format
!= NULL
? format
->n_planes
: 0;
2279 struct anv_format_plane
2280 anv_get_format_plane(const struct gen_device_info
*devinfo
, VkFormat vk_format
,
2281 VkImageAspectFlags aspect
, VkImageTiling tiling
);
2283 static inline enum isl_format
2284 anv_get_isl_format(const struct gen_device_info
*devinfo
, VkFormat vk_format
,
2285 VkImageAspectFlags aspect
, VkImageTiling tiling
)
2287 return anv_get_format_plane(devinfo
, vk_format
, aspect
, tiling
).isl_format
;
2290 static inline struct isl_swizzle
2291 anv_swizzle_for_render(struct isl_swizzle swizzle
)
2293 /* Sometimes the swizzle will have alpha map to one. We do this to fake
2294 * RGB as RGBA for texturing
2296 assert(swizzle
.a
== ISL_CHANNEL_SELECT_ONE
||
2297 swizzle
.a
== ISL_CHANNEL_SELECT_ALPHA
);
2299 /* But it doesn't matter what we render to that channel */
2300 swizzle
.a
= ISL_CHANNEL_SELECT_ALPHA
;
2306 anv_pipeline_setup_l3_config(struct anv_pipeline
*pipeline
, bool needs_slm
);
2309 * Subsurface of an anv_image.
2311 struct anv_surface
{
2312 /** Valid only if isl_surf::size > 0. */
2313 struct isl_surf isl
;
2316 * Offset from VkImage's base address, as bound by vkBindImageMemory().
2323 /* The original VkFormat provided by the client. This may not match any
2324 * of the actual surface formats.
2327 const struct anv_format
*format
;
2329 VkImageAspectFlags aspects
;
2332 uint32_t array_size
;
2333 uint32_t samples
; /**< VkImageCreateInfo::samples */
2335 VkImageUsageFlags usage
; /**< Superset of VkImageCreateInfo::usage. */
2336 VkImageTiling tiling
; /** VkImageCreateInfo::tiling */
2341 /* Whether the image is made of several underlying buffer objects rather a
2342 * single one with different offsets.
2349 * For each foo, anv_image::planes[x].surface is valid if and only if
2350 * anv_image::aspects has a x aspect. Refer to anv_image_aspect_to_plane()
2351 * to figure the number associated with a given aspect.
2353 * The hardware requires that the depth buffer and stencil buffer be
2354 * separate surfaces. From Vulkan's perspective, though, depth and stencil
2355 * reside in the same VkImage. To satisfy both the hardware and Vulkan, we
2356 * allocate the depth and stencil buffers as separate surfaces in the same
2361 * -----------------------
2363 * ----------------------- |
2364 * | shadow surface0 | |
2365 * ----------------------- | Plane 0
2366 * | aux surface0 | |
2367 * ----------------------- |
2368 * | fast clear colors0 | \|/
2369 * -----------------------
2371 * ----------------------- |
2372 * | shadow surface1 | |
2373 * ----------------------- | Plane 1
2374 * | aux surface1 | |
2375 * ----------------------- |
2376 * | fast clear colors1 | \|/
2377 * -----------------------
2380 * -----------------------
2384 * Offset of the entire plane (whenever the image is disjoint this is
2392 struct anv_surface surface
;
2395 * A surface which shadows the main surface and may have different
2396 * tiling. This is used for sampling using a tiling that isn't supported
2397 * for other operations.
2399 struct anv_surface shadow_surface
;
2402 * For color images, this is the aux usage for this image when not used
2403 * as a color attachment.
2405 * For depth/stencil images, this is set to ISL_AUX_USAGE_HIZ if the
2406 * image has a HiZ buffer.
2408 enum isl_aux_usage aux_usage
;
2410 struct anv_surface aux_surface
;
2413 * Offset of the fast clear state (used to compute the
2414 * fast_clear_state_offset of the following planes).
2416 uint32_t fast_clear_state_offset
;
2419 * BO associated with this plane, set when bound.
2422 VkDeviceSize bo_offset
;
2425 * When destroying the image, also free the bo.
2431 /* Returns the number of auxiliary buffer levels attached to an image. */
2432 static inline uint8_t
2433 anv_image_aux_levels(const struct anv_image
* const image
,
2434 VkImageAspectFlagBits aspect
)
2436 uint32_t plane
= anv_image_aspect_to_plane(image
->aspects
, aspect
);
2437 return image
->planes
[plane
].aux_surface
.isl
.size
> 0 ?
2438 image
->planes
[plane
].aux_surface
.isl
.levels
: 0;
2441 /* Returns the number of auxiliary buffer layers attached to an image. */
2442 static inline uint32_t
2443 anv_image_aux_layers(const struct anv_image
* const image
,
2444 VkImageAspectFlagBits aspect
,
2445 const uint8_t miplevel
)
2449 /* The miplevel must exist in the main buffer. */
2450 assert(miplevel
< image
->levels
);
2452 if (miplevel
>= anv_image_aux_levels(image
, aspect
)) {
2453 /* There are no layers with auxiliary data because the miplevel has no
2458 uint32_t plane
= anv_image_aspect_to_plane(image
->aspects
, aspect
);
2459 return MAX2(image
->planes
[plane
].aux_surface
.isl
.logical_level0_px
.array_len
,
2460 image
->planes
[plane
].aux_surface
.isl
.logical_level0_px
.depth
>> miplevel
);
2464 static inline unsigned
2465 anv_fast_clear_state_entry_size(const struct anv_device
*device
)
2469 * +--------------------------------------------+
2470 * | clear value dword(s) | needs resolve dword |
2471 * +--------------------------------------------+
2474 /* Ensure that the needs resolve dword is in fact dword-aligned to enable
2475 * GPU memcpy operations.
2477 assert(device
->isl_dev
.ss
.clear_value_size
% 4 == 0);
2478 return device
->isl_dev
.ss
.clear_value_size
+ 4;
2481 /* Returns true if a HiZ-enabled depth buffer can be sampled from. */
2483 anv_can_sample_with_hiz(const struct gen_device_info
* const devinfo
,
2484 const struct anv_image
*image
)
2486 if (!(image
->aspects
& VK_IMAGE_ASPECT_DEPTH_BIT
))
2489 if (devinfo
->gen
< 8)
2492 return image
->samples
== 1;
2496 anv_gen8_hiz_op_resolve(struct anv_cmd_buffer
*cmd_buffer
,
2497 const struct anv_image
*image
,
2498 enum blorp_hiz_op op
);
2500 anv_ccs_resolve(struct anv_cmd_buffer
* const cmd_buffer
,
2501 const struct anv_state surface_state
,
2502 const struct anv_image
* const image
,
2503 VkImageAspectFlagBits aspect
,
2504 const uint8_t level
, const uint32_t layer_count
,
2505 const enum blorp_fast_clear_op op
);
2508 anv_image_fast_clear(struct anv_cmd_buffer
*cmd_buffer
,
2509 const struct anv_image
*image
,
2510 VkImageAspectFlagBits aspect
,
2511 const uint32_t base_level
, const uint32_t level_count
,
2512 const uint32_t base_layer
, uint32_t layer_count
);
2515 anv_image_copy_to_shadow(struct anv_cmd_buffer
*cmd_buffer
,
2516 const struct anv_image
*image
,
2517 uint32_t base_level
, uint32_t level_count
,
2518 uint32_t base_layer
, uint32_t layer_count
);
2521 anv_layout_to_aux_usage(const struct gen_device_info
* const devinfo
,
2522 const struct anv_image
*image
,
2523 const VkImageAspectFlagBits aspect
,
2524 const VkImageLayout layout
);
2526 /* This is defined as a macro so that it works for both
2527 * VkImageSubresourceRange and VkImageSubresourceLayers
2529 #define anv_get_layerCount(_image, _range) \
2530 ((_range)->layerCount == VK_REMAINING_ARRAY_LAYERS ? \
2531 (_image)->array_size - (_range)->baseArrayLayer : (_range)->layerCount)
2533 static inline uint32_t
2534 anv_get_levelCount(const struct anv_image
*image
,
2535 const VkImageSubresourceRange
*range
)
2537 return range
->levelCount
== VK_REMAINING_MIP_LEVELS
?
2538 image
->levels
- range
->baseMipLevel
: range
->levelCount
;
2541 static inline VkImageAspectFlags
2542 anv_image_expand_aspects(const struct anv_image
*image
,
2543 VkImageAspectFlags aspects
)
2545 /* If the underlying image has color plane aspects and
2546 * VK_IMAGE_ASPECT_COLOR_BIT has been requested, then return the aspects of
2547 * the underlying image. */
2548 if ((image
->aspects
& VK_IMAGE_ASPECT_PLANES_BITS
) != 0 &&
2549 aspects
== VK_IMAGE_ASPECT_COLOR_BIT
)
2550 return image
->aspects
;
2556 anv_image_aspects_compatible(VkImageAspectFlags aspects1
,
2557 VkImageAspectFlags aspects2
)
2559 if (aspects1
== aspects2
)
2562 /* Only 1 color aspects are compatibles. */
2563 if ((aspects1
& VK_IMAGE_ASPECT_ANY_COLOR_BIT
) != 0 &&
2564 (aspects2
& VK_IMAGE_ASPECT_ANY_COLOR_BIT
) != 0 &&
2565 _mesa_bitcount(aspects1
) == _mesa_bitcount(aspects2
))
2571 struct anv_image_view
{
2572 const struct anv_image
*image
; /**< VkImageViewCreateInfo::image */
2574 VkImageAspectFlags aspect_mask
;
2576 VkExtent3D extent
; /**< Extent of VkImageViewCreateInfo::baseMipLevel. */
2580 uint32_t image_plane
;
2582 struct isl_view isl
;
2585 * RENDER_SURFACE_STATE when using image as a sampler surface with an
2586 * image layout of SHADER_READ_ONLY_OPTIMAL or
2587 * DEPTH_STENCIL_READ_ONLY_OPTIMAL.
2589 struct anv_surface_state optimal_sampler_surface_state
;
2592 * RENDER_SURFACE_STATE when using image as a sampler surface with an
2593 * image layout of GENERAL.
2595 struct anv_surface_state general_sampler_surface_state
;
2598 * RENDER_SURFACE_STATE when using image as a storage image. Separate
2599 * states for write-only and readable, using the real format for
2600 * write-only and the lowered format for readable.
2602 struct anv_surface_state storage_surface_state
;
2603 struct anv_surface_state writeonly_storage_surface_state
;
2605 struct brw_image_param storage_image_param
;
2609 enum anv_image_view_state_flags
{
2610 ANV_IMAGE_VIEW_STATE_STORAGE_WRITE_ONLY
= (1 << 0),
2611 ANV_IMAGE_VIEW_STATE_TEXTURE_OPTIMAL
= (1 << 1),
2614 void anv_image_fill_surface_state(struct anv_device
*device
,
2615 const struct anv_image
*image
,
2616 VkImageAspectFlagBits aspect
,
2617 const struct isl_view
*view
,
2618 isl_surf_usage_flags_t view_usage
,
2619 enum isl_aux_usage aux_usage
,
2620 const union isl_color_value
*clear_color
,
2621 enum anv_image_view_state_flags flags
,
2622 struct anv_surface_state
*state_inout
,
2623 struct brw_image_param
*image_param_out
);
2625 struct anv_image_create_info
{
2626 const VkImageCreateInfo
*vk_info
;
2628 /** An opt-in bitmask which filters an ISL-mapping of the Vulkan tiling. */
2629 isl_tiling_flags_t isl_tiling_flags
;
2634 VkResult
anv_image_create(VkDevice _device
,
2635 const struct anv_image_create_info
*info
,
2636 const VkAllocationCallbacks
* alloc
,
2639 const struct anv_surface
*
2640 anv_image_get_surface_for_aspect_mask(const struct anv_image
*image
,
2641 VkImageAspectFlags aspect_mask
);
2644 anv_isl_format_for_descriptor_type(VkDescriptorType type
);
2646 static inline struct VkExtent3D
2647 anv_sanitize_image_extent(const VkImageType imageType
,
2648 const struct VkExtent3D imageExtent
)
2650 switch (imageType
) {
2651 case VK_IMAGE_TYPE_1D
:
2652 return (VkExtent3D
) { imageExtent
.width
, 1, 1 };
2653 case VK_IMAGE_TYPE_2D
:
2654 return (VkExtent3D
) { imageExtent
.width
, imageExtent
.height
, 1 };
2655 case VK_IMAGE_TYPE_3D
:
2658 unreachable("invalid image type");
2662 static inline struct VkOffset3D
2663 anv_sanitize_image_offset(const VkImageType imageType
,
2664 const struct VkOffset3D imageOffset
)
2666 switch (imageType
) {
2667 case VK_IMAGE_TYPE_1D
:
2668 return (VkOffset3D
) { imageOffset
.x
, 0, 0 };
2669 case VK_IMAGE_TYPE_2D
:
2670 return (VkOffset3D
) { imageOffset
.x
, imageOffset
.y
, 0 };
2671 case VK_IMAGE_TYPE_3D
:
2674 unreachable("invalid image type");
2679 void anv_fill_buffer_surface_state(struct anv_device
*device
,
2680 struct anv_state state
,
2681 enum isl_format format
,
2682 uint32_t offset
, uint32_t range
,
2686 struct anv_ycbcr_conversion
{
2687 const struct anv_format
* format
;
2688 VkSamplerYcbcrModelConversionKHR ycbcr_model
;
2689 VkSamplerYcbcrRangeKHR ycbcr_range
;
2690 VkComponentSwizzle mapping
[4];
2691 VkChromaLocationKHR chroma_offsets
[2];
2692 VkFilter chroma_filter
;
2693 bool chroma_reconstruction
;
2696 struct anv_sampler
{
2697 uint32_t state
[3][4];
2699 struct anv_ycbcr_conversion
*conversion
;
2702 struct anv_framebuffer
{
2707 uint32_t attachment_count
;
2708 struct anv_image_view
* attachments
[0];
2711 struct anv_subpass
{
2712 uint32_t attachment_count
;
2715 * A pointer to all attachment references used in this subpass.
2716 * Only valid if ::attachment_count > 0.
2718 VkAttachmentReference
* attachments
;
2719 uint32_t input_count
;
2720 VkAttachmentReference
* input_attachments
;
2721 uint32_t color_count
;
2722 VkAttachmentReference
* color_attachments
;
2723 VkAttachmentReference
* resolve_attachments
;
2725 VkAttachmentReference depth_stencil_attachment
;
2729 /** Subpass has a depth/stencil self-dependency */
2730 bool has_ds_self_dep
;
2732 /** Subpass has at least one resolve attachment */
2736 static inline unsigned
2737 anv_subpass_view_count(const struct anv_subpass
*subpass
)
2739 return MAX2(1, _mesa_bitcount(subpass
->view_mask
));
2742 struct anv_render_pass_attachment
{
2743 /* TODO: Consider using VkAttachmentDescription instead of storing each of
2744 * its members individually.
2748 VkImageUsageFlags usage
;
2749 VkAttachmentLoadOp load_op
;
2750 VkAttachmentStoreOp store_op
;
2751 VkAttachmentLoadOp stencil_load_op
;
2752 VkImageLayout initial_layout
;
2753 VkImageLayout final_layout
;
2754 VkImageLayout first_subpass_layout
;
2756 /* The subpass id in which the attachment will be used last. */
2757 uint32_t last_subpass_idx
;
2760 struct anv_render_pass
{
2761 uint32_t attachment_count
;
2762 uint32_t subpass_count
;
2763 /* An array of subpass_count+1 flushes, one per subpass boundary */
2764 enum anv_pipe_bits
* subpass_flushes
;
2765 struct anv_render_pass_attachment
* attachments
;
2766 struct anv_subpass subpasses
[0];
2769 #define ANV_PIPELINE_STATISTICS_MASK 0x000007ff
2771 struct anv_query_pool
{
2773 VkQueryPipelineStatisticFlags pipeline_statistics
;
2774 /** Stride between slots, in bytes */
2776 /** Number of slots in this query pool */
2781 void *anv_lookup_entrypoint(const struct gen_device_info
*devinfo
,
2784 void anv_dump_image_to_ppm(struct anv_device
*device
,
2785 struct anv_image
*image
, unsigned miplevel
,
2786 unsigned array_layer
, VkImageAspectFlagBits aspect
,
2787 const char *filename
);
2789 enum anv_dump_action
{
2790 ANV_DUMP_FRAMEBUFFERS_BIT
= 0x1,
2793 void anv_dump_start(struct anv_device
*device
, enum anv_dump_action actions
);
2794 void anv_dump_finish(void);
2796 void anv_dump_add_framebuffer(struct anv_cmd_buffer
*cmd_buffer
,
2797 struct anv_framebuffer
*fb
);
2799 static inline uint32_t
2800 anv_get_subpass_id(const struct anv_cmd_state
* const cmd_state
)
2802 /* This function must be called from within a subpass. */
2803 assert(cmd_state
->pass
&& cmd_state
->subpass
);
2805 const uint32_t subpass_id
= cmd_state
->subpass
- cmd_state
->pass
->subpasses
;
2807 /* The id of this subpass shouldn't exceed the number of subpasses in this
2808 * render pass minus 1.
2810 assert(subpass_id
< cmd_state
->pass
->subpass_count
);
2814 #define ANV_DEFINE_HANDLE_CASTS(__anv_type, __VkType) \
2816 static inline struct __anv_type * \
2817 __anv_type ## _from_handle(__VkType _handle) \
2819 return (struct __anv_type *) _handle; \
2822 static inline __VkType \
2823 __anv_type ## _to_handle(struct __anv_type *_obj) \
2825 return (__VkType) _obj; \
2828 #define ANV_DEFINE_NONDISP_HANDLE_CASTS(__anv_type, __VkType) \
2830 static inline struct __anv_type * \
2831 __anv_type ## _from_handle(__VkType _handle) \
2833 return (struct __anv_type *)(uintptr_t) _handle; \
2836 static inline __VkType \
2837 __anv_type ## _to_handle(struct __anv_type *_obj) \
2839 return (__VkType)(uintptr_t) _obj; \
2842 #define ANV_FROM_HANDLE(__anv_type, __name, __handle) \
2843 struct __anv_type *__name = __anv_type ## _from_handle(__handle)
2845 ANV_DEFINE_HANDLE_CASTS(anv_cmd_buffer
, VkCommandBuffer
)
2846 ANV_DEFINE_HANDLE_CASTS(anv_device
, VkDevice
)
2847 ANV_DEFINE_HANDLE_CASTS(anv_instance
, VkInstance
)
2848 ANV_DEFINE_HANDLE_CASTS(anv_physical_device
, VkPhysicalDevice
)
2849 ANV_DEFINE_HANDLE_CASTS(anv_queue
, VkQueue
)
2851 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_cmd_pool
, VkCommandPool
)
2852 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_buffer
, VkBuffer
)
2853 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_buffer_view
, VkBufferView
)
2854 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_descriptor_pool
, VkDescriptorPool
)
2855 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_descriptor_set
, VkDescriptorSet
)
2856 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_descriptor_set_layout
, VkDescriptorSetLayout
)
2857 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_descriptor_update_template
, VkDescriptorUpdateTemplateKHR
)
2858 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_device_memory
, VkDeviceMemory
)
2859 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_fence
, VkFence
)
2860 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_event
, VkEvent
)
2861 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_framebuffer
, VkFramebuffer
)
2862 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_image
, VkImage
)
2863 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_image_view
, VkImageView
);
2864 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_pipeline_cache
, VkPipelineCache
)
2865 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_pipeline
, VkPipeline
)
2866 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_pipeline_layout
, VkPipelineLayout
)
2867 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_query_pool
, VkQueryPool
)
2868 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_render_pass
, VkRenderPass
)
2869 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_sampler
, VkSampler
)
2870 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_semaphore
, VkSemaphore
)
2871 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_shader_module
, VkShaderModule
)
2872 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_debug_report_callback
, VkDebugReportCallbackEXT
)
2873 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_ycbcr_conversion
, VkSamplerYcbcrConversionKHR
)
2875 /* Gen-specific function declarations */
2877 # include "anv_genX.h"
2879 # define genX(x) gen7_##x
2880 # include "anv_genX.h"
2882 # define genX(x) gen75_##x
2883 # include "anv_genX.h"
2885 # define genX(x) gen8_##x
2886 # include "anv_genX.h"
2888 # define genX(x) gen9_##x
2889 # include "anv_genX.h"
2891 # define genX(x) gen10_##x
2892 # include "anv_genX.h"
2896 #endif /* ANV_PRIVATE_H */