2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
39 #define __gen_validate_value(x) VALGRIND_CHECK_MEM_IS_DEFINED(&(x), sizeof(x))
44 #include "common/gen_clflush.h"
45 #include "common/gen_device_info.h"
46 #include "blorp/blorp.h"
47 #include "compiler/brw_compiler.h"
48 #include "util/macros.h"
49 #include "util/list.h"
50 #include "util/u_atomic.h"
51 #include "util/u_vector.h"
54 /* Pre-declarations needed for WSI entrypoints */
57 typedef struct xcb_connection_t xcb_connection_t
;
58 typedef uint32_t xcb_visualid_t
;
59 typedef uint32_t xcb_window_t
;
62 struct anv_buffer_view
;
63 struct anv_image_view
;
65 struct anv_debug_report_callback
;
69 #include <vulkan/vulkan.h>
70 #include <vulkan/vulkan_intel.h>
71 #include <vulkan/vk_icd.h>
73 #include "anv_entrypoints.h"
76 #include "common/gen_debug.h"
77 #include "wsi_common.h"
79 /* Allowing different clear colors requires us to perform a depth resolve at
80 * the end of certain render passes. This is because while slow clears store
81 * the clear color in the HiZ buffer, fast clears (without a resolve) don't.
82 * See the PRMs for examples describing when additional resolves would be
83 * necessary. To enable fast clears without requiring extra resolves, we set
84 * the clear value to a globally-defined one. We could allow different values
85 * if the user doesn't expect coherent data during or after a render passes
86 * (VK_ATTACHMENT_STORE_OP_DONT_CARE), but such users (aside from the CTS)
87 * don't seem to exist yet. In almost all Vulkan applications tested thus far,
88 * 1.0f seems to be the only value used. The only application that doesn't set
89 * this value does so through the usage of an seemingly uninitialized clear
92 #define ANV_HZ_FC_VAL 1.0f
97 #define MAX_VIEWPORTS 16
98 #define MAX_SCISSORS 16
99 #define MAX_PUSH_CONSTANTS_SIZE 128
100 #define MAX_DYNAMIC_BUFFERS 16
102 #define MAX_PUSH_DESCRIPTORS 32 /* Minimum requirement */
104 #define ANV_SVGS_VB_INDEX MAX_VBS
105 #define ANV_DRAWID_VB_INDEX (MAX_VBS + 1)
107 #define anv_printflike(a, b) __attribute__((__format__(__printf__, a, b)))
109 static inline uint32_t
110 align_down_npot_u32(uint32_t v
, uint32_t a
)
115 static inline uint32_t
116 align_u32(uint32_t v
, uint32_t a
)
118 assert(a
!= 0 && a
== (a
& -a
));
119 return (v
+ a
- 1) & ~(a
- 1);
122 static inline uint64_t
123 align_u64(uint64_t v
, uint64_t a
)
125 assert(a
!= 0 && a
== (a
& -a
));
126 return (v
+ a
- 1) & ~(a
- 1);
129 static inline int32_t
130 align_i32(int32_t v
, int32_t a
)
132 assert(a
!= 0 && a
== (a
& -a
));
133 return (v
+ a
- 1) & ~(a
- 1);
136 /** Alignment must be a power of 2. */
138 anv_is_aligned(uintmax_t n
, uintmax_t a
)
140 assert(a
== (a
& -a
));
141 return (n
& (a
- 1)) == 0;
144 static inline uint32_t
145 anv_minify(uint32_t n
, uint32_t levels
)
147 if (unlikely(n
== 0))
150 return MAX2(n
>> levels
, 1);
154 anv_clamp_f(float f
, float min
, float max
)
167 anv_clear_mask(uint32_t *inout_mask
, uint32_t clear_mask
)
169 if (*inout_mask
& clear_mask
) {
170 *inout_mask
&= ~clear_mask
;
177 static inline union isl_color_value
178 vk_to_isl_color(VkClearColorValue color
)
180 return (union isl_color_value
) {
190 #define for_each_bit(b, dword) \
191 for (uint32_t __dword = (dword); \
192 (b) = __builtin_ffs(__dword) - 1, __dword; \
193 __dword &= ~(1 << (b)))
195 #define typed_memcpy(dest, src, count) ({ \
196 STATIC_ASSERT(sizeof(*src) == sizeof(*dest)); \
197 memcpy((dest), (src), (count) * sizeof(*(src))); \
200 /* Mapping from anv object to VkDebugReportObjectTypeEXT. New types need
201 * to be added here in order to utilize mapping in debug/error/perf macros.
203 #define REPORT_OBJECT_TYPE(o) \
204 __builtin_choose_expr ( \
205 __builtin_types_compatible_p (__typeof (o), struct anv_instance*), \
206 VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT, \
207 __builtin_choose_expr ( \
208 __builtin_types_compatible_p (__typeof (o), struct anv_physical_device*), \
209 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, \
210 __builtin_choose_expr ( \
211 __builtin_types_compatible_p (__typeof (o), struct anv_device*), \
212 VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, \
213 __builtin_choose_expr ( \
214 __builtin_types_compatible_p (__typeof (o), const struct anv_device*), \
215 VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, \
216 __builtin_choose_expr ( \
217 __builtin_types_compatible_p (__typeof (o), struct anv_queue*), \
218 VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT, \
219 __builtin_choose_expr ( \
220 __builtin_types_compatible_p (__typeof (o), struct anv_semaphore*), \
221 VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT, \
222 __builtin_choose_expr ( \
223 __builtin_types_compatible_p (__typeof (o), struct anv_cmd_buffer*), \
224 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, \
225 __builtin_choose_expr ( \
226 __builtin_types_compatible_p (__typeof (o), struct anv_fence*), \
227 VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT, \
228 __builtin_choose_expr ( \
229 __builtin_types_compatible_p (__typeof (o), struct anv_device_memory*), \
230 VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, \
231 __builtin_choose_expr ( \
232 __builtin_types_compatible_p (__typeof (o), struct anv_buffer*), \
233 VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, \
234 __builtin_choose_expr ( \
235 __builtin_types_compatible_p (__typeof (o), struct anv_image*), \
236 VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, \
237 __builtin_choose_expr ( \
238 __builtin_types_compatible_p (__typeof (o), const struct anv_image*), \
239 VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, \
240 __builtin_choose_expr ( \
241 __builtin_types_compatible_p (__typeof (o), struct anv_event*), \
242 VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT, \
243 __builtin_choose_expr ( \
244 __builtin_types_compatible_p (__typeof (o), struct anv_query_pool*), \
245 VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, \
246 __builtin_choose_expr ( \
247 __builtin_types_compatible_p (__typeof (o), struct anv_buffer_view*), \
248 VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT, \
249 __builtin_choose_expr ( \
250 __builtin_types_compatible_p (__typeof (o), struct anv_image_view*), \
251 VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT, \
252 __builtin_choose_expr ( \
253 __builtin_types_compatible_p (__typeof (o), struct anv_shader_module*), \
254 VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT, \
255 __builtin_choose_expr ( \
256 __builtin_types_compatible_p (__typeof (o), struct anv_pipeline_cache*), \
257 VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_CACHE_EXT, \
258 __builtin_choose_expr ( \
259 __builtin_types_compatible_p (__typeof (o), struct anv_pipeline_layout*), \
260 VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT, \
261 __builtin_choose_expr ( \
262 __builtin_types_compatible_p (__typeof (o), struct anv_render_pass*), \
263 VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, \
264 __builtin_choose_expr ( \
265 __builtin_types_compatible_p (__typeof (o), struct anv_pipeline*), \
266 VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT, \
267 __builtin_choose_expr ( \
268 __builtin_types_compatible_p (__typeof (o), struct anv_descriptor_set_layout*), \
269 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT, \
270 __builtin_choose_expr ( \
271 __builtin_types_compatible_p (__typeof (o), struct anv_sampler*), \
272 VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT, \
273 __builtin_choose_expr ( \
274 __builtin_types_compatible_p (__typeof (o), struct anv_descriptor_pool*), \
275 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT, \
276 __builtin_choose_expr ( \
277 __builtin_types_compatible_p (__typeof (o), struct anv_descriptor_set*), \
278 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, \
279 __builtin_choose_expr ( \
280 __builtin_types_compatible_p (__typeof (o), struct anv_framebuffer*), \
281 VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT, \
282 __builtin_choose_expr ( \
283 __builtin_types_compatible_p (__typeof (o), struct anv_cmd_pool*), \
284 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT, \
285 __builtin_choose_expr ( \
286 __builtin_types_compatible_p (__typeof (o), struct anv_surface*), \
287 VK_DEBUG_REPORT_OBJECT_TYPE_SURFACE_KHR_EXT, \
288 __builtin_choose_expr ( \
289 __builtin_types_compatible_p (__typeof (o), struct wsi_swapchain*), \
290 VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT, \
291 __builtin_choose_expr ( \
292 __builtin_types_compatible_p (__typeof (o), struct anv_debug_callback*), \
293 VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_CALLBACK_EXT_EXT, \
294 __builtin_choose_expr ( \
295 __builtin_types_compatible_p (__typeof (o), void*), \
296 VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, \
297 /* The void expression results in a compile-time error \
298 when assigning the result to something. */ \
299 (void)0)))))))))))))))))))))))))))))))
301 /* Whenever we generate an error, pass it through this function. Useful for
302 * debugging, where we can break on it. Only call at error site, not when
303 * propagating errors. Might be useful to plug in a stack trace here.
306 VkResult
__vk_errorf(struct anv_instance
*instance
, const void *object
,
307 VkDebugReportObjectTypeEXT type
, VkResult error
,
308 const char *file
, int line
, const char *format
, ...);
311 #define vk_error(error) __vk_errorf(NULL, NULL,\
312 VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,\
313 error, __FILE__, __LINE__, NULL);
314 #define vk_errorf(instance, obj, error, format, ...)\
315 __vk_errorf(instance, obj, REPORT_OBJECT_TYPE(obj), error,\
316 __FILE__, __LINE__, format, ## __VA_ARGS__);
317 #define anv_debug(format, ...) fprintf(stderr, "debug: " format, ##__VA_ARGS__)
319 #define vk_error(error) error
320 #define vk_errorf(instance, obj, error, format, ...) error
321 #define anv_debug(format, ...)
325 * Warn on ignored extension structs.
327 * The Vulkan spec requires us to ignore unsupported or unknown structs in
328 * a pNext chain. In debug mode, emitting warnings for ignored structs may
329 * help us discover structs that we should not have ignored.
332 * From the Vulkan 1.0.38 spec:
334 * Any component of the implementation (the loader, any enabled layers,
335 * and drivers) must skip over, without processing (other than reading the
336 * sType and pNext members) any chained structures with sType values not
337 * defined by extensions supported by that component.
339 #define anv_debug_ignored_stype(sType) \
340 anv_debug("%s: ignored VkStructureType %u\n", __func__, (sType))
342 void __anv_finishme(const char *file
, int line
, const char *format
, ...)
343 anv_printflike(3, 4);
344 void __anv_perf_warn(struct anv_instance
*instance
, const void *object
,
345 VkDebugReportObjectTypeEXT type
, const char *file
,
346 int line
, const char *format
, ...)
347 anv_printflike(6, 7);
348 void anv_loge(const char *format
, ...) anv_printflike(1, 2);
349 void anv_loge_v(const char *format
, va_list va
);
351 void anv_debug_report(struct anv_instance
*instance
,
352 VkDebugReportFlagsEXT flags
,
353 VkDebugReportObjectTypeEXT object_type
,
357 const char* pLayerPrefix
,
358 const char *pMessage
);
361 * Print a FINISHME message, including its source location.
363 #define anv_finishme(format, ...) \
365 static bool reported = false; \
367 __anv_finishme(__FILE__, __LINE__, format, ##__VA_ARGS__); \
373 * Print a perf warning message. Set INTEL_DEBUG=perf to see these.
375 #define anv_perf_warn(instance, obj, format, ...) \
377 static bool reported = false; \
378 if (!reported && unlikely(INTEL_DEBUG & DEBUG_PERF)) { \
379 __anv_perf_warn(instance, obj, REPORT_OBJECT_TYPE(obj), __FILE__, __LINE__,\
380 format, ##__VA_ARGS__); \
385 /* A non-fatal assert. Useful for debugging. */
387 #define anv_assert(x) ({ \
388 if (unlikely(!(x))) \
389 fprintf(stderr, "%s:%d ASSERT: %s\n", __FILE__, __LINE__, #x); \
392 #define anv_assert(x)
395 /* A multi-pointer allocator
397 * When copying data structures from the user (such as a render pass), it's
398 * common to need to allocate data for a bunch of different things. Instead
399 * of doing several allocations and having to handle all of the error checking
400 * that entails, it can be easier to do a single allocation. This struct
401 * helps facilitate that. The intended usage looks like this:
404 * anv_multialloc_add(&ma, &main_ptr, 1);
405 * anv_multialloc_add(&ma, &substruct1, substruct1Count);
406 * anv_multialloc_add(&ma, &substruct2, substruct2Count);
408 * if (!anv_multialloc_alloc(&ma, pAllocator, VK_ALLOCATION_SCOPE_FOO))
409 * return vk_error(VK_ERROR_OUT_OF_HOST_MEORY);
411 struct anv_multialloc
{
419 #define ANV_MULTIALLOC_INIT \
420 ((struct anv_multialloc) { 0, })
422 #define ANV_MULTIALLOC(_name) \
423 struct anv_multialloc _name = ANV_MULTIALLOC_INIT
425 __attribute__((always_inline
))
427 _anv_multialloc_add(struct anv_multialloc
*ma
,
428 void **ptr
, size_t size
, size_t align
)
430 size_t offset
= align_u64(ma
->size
, align
);
431 ma
->size
= offset
+ size
;
432 ma
->align
= MAX2(ma
->align
, align
);
434 /* Store the offset in the pointer. */
435 *ptr
= (void *)(uintptr_t)offset
;
437 assert(ma
->ptr_count
< ARRAY_SIZE(ma
->ptrs
));
438 ma
->ptrs
[ma
->ptr_count
++] = ptr
;
441 #define anv_multialloc_add_size(_ma, _ptr, _size) \
442 _anv_multialloc_add((_ma), (void **)(_ptr), (_size), __alignof__(**(_ptr)))
444 #define anv_multialloc_add(_ma, _ptr, _count) \
445 anv_multialloc_add_size(_ma, _ptr, (_count) * sizeof(**(_ptr)));
447 __attribute__((always_inline
))
449 anv_multialloc_alloc(struct anv_multialloc
*ma
,
450 const VkAllocationCallbacks
*alloc
,
451 VkSystemAllocationScope scope
)
453 void *ptr
= vk_alloc(alloc
, ma
->size
, ma
->align
, scope
);
457 /* Fill out each of the pointers with their final value.
459 * for (uint32_t i = 0; i < ma->ptr_count; i++)
460 * *ma->ptrs[i] = ptr + (uintptr_t)*ma->ptrs[i];
462 * Unfortunately, even though ma->ptr_count is basically guaranteed to be a
463 * constant, GCC is incapable of figuring this out and unrolling the loop
464 * so we have to give it a little help.
466 STATIC_ASSERT(ARRAY_SIZE(ma
->ptrs
) == 8);
467 #define _ANV_MULTIALLOC_UPDATE_POINTER(_i) \
468 if ((_i) < ma->ptr_count) \
469 *ma->ptrs[_i] = ptr + (uintptr_t)*ma->ptrs[_i]
470 _ANV_MULTIALLOC_UPDATE_POINTER(0);
471 _ANV_MULTIALLOC_UPDATE_POINTER(1);
472 _ANV_MULTIALLOC_UPDATE_POINTER(2);
473 _ANV_MULTIALLOC_UPDATE_POINTER(3);
474 _ANV_MULTIALLOC_UPDATE_POINTER(4);
475 _ANV_MULTIALLOC_UPDATE_POINTER(5);
476 _ANV_MULTIALLOC_UPDATE_POINTER(6);
477 _ANV_MULTIALLOC_UPDATE_POINTER(7);
478 #undef _ANV_MULTIALLOC_UPDATE_POINTER
483 __attribute__((always_inline
))
485 anv_multialloc_alloc2(struct anv_multialloc
*ma
,
486 const VkAllocationCallbacks
*parent_alloc
,
487 const VkAllocationCallbacks
*alloc
,
488 VkSystemAllocationScope scope
)
490 return anv_multialloc_alloc(ma
, alloc
? alloc
: parent_alloc
, scope
);
496 /* Index into the current validation list. This is used by the
497 * validation list building alrogithm to track which buffers are already
498 * in the validation list so that we can ensure uniqueness.
502 /* Last known offset. This value is provided by the kernel when we
503 * execbuf and is used as the presumed offset for the next bunch of
511 /** Flags to pass to the kernel through drm_i915_exec_object2::flags */
516 anv_bo_init(struct anv_bo
*bo
, uint32_t gem_handle
, uint64_t size
)
518 bo
->gem_handle
= gem_handle
;
526 /* Represents a lock-free linked list of "free" things. This is used by
527 * both the block pool and the state pools. Unfortunately, in order to
528 * solve the ABA problem, we can't use a single uint32_t head.
530 union anv_free_list
{
534 /* A simple count that is incremented every time the head changes. */
540 #define ANV_FREE_LIST_EMPTY ((union anv_free_list) { { 1, 0 } })
542 struct anv_block_state
{
552 struct anv_block_pool
{
553 struct anv_device
*device
;
557 /* The offset from the start of the bo to the "center" of the block
558 * pool. Pointers to allocated blocks are given by
559 * bo.map + center_bo_offset + offsets.
561 uint32_t center_bo_offset
;
563 /* Current memory map of the block pool. This pointer may or may not
564 * point to the actual beginning of the block pool memory. If
565 * anv_block_pool_alloc_back has ever been called, then this pointer
566 * will point to the "center" position of the buffer and all offsets
567 * (negative or positive) given out by the block pool alloc functions
568 * will be valid relative to this pointer.
570 * In particular, map == bo.map + center_offset
576 * Array of mmaps and gem handles owned by the block pool, reclaimed when
577 * the block pool is destroyed.
579 struct u_vector mmap_cleanups
;
581 struct anv_block_state state
;
583 struct anv_block_state back_state
;
586 /* Block pools are backed by a fixed-size 1GB memfd */
587 #define BLOCK_POOL_MEMFD_SIZE (1ul << 30)
589 /* The center of the block pool is also the middle of the memfd. This may
590 * change in the future if we decide differently for some reason.
592 #define BLOCK_POOL_MEMFD_CENTER (BLOCK_POOL_MEMFD_SIZE / 2)
594 static inline uint32_t
595 anv_block_pool_size(struct anv_block_pool
*pool
)
597 return pool
->state
.end
+ pool
->back_state
.end
;
606 #define ANV_STATE_NULL ((struct anv_state) { .alloc_size = 0 })
608 struct anv_fixed_size_state_pool
{
609 union anv_free_list free_list
;
610 struct anv_block_state block
;
613 #define ANV_MIN_STATE_SIZE_LOG2 6
614 #define ANV_MAX_STATE_SIZE_LOG2 20
616 #define ANV_STATE_BUCKETS (ANV_MAX_STATE_SIZE_LOG2 - ANV_MIN_STATE_SIZE_LOG2 + 1)
618 struct anv_state_pool
{
619 struct anv_block_pool block_pool
;
621 /* The size of blocks which will be allocated from the block pool */
624 /** Free list for "back" allocations */
625 union anv_free_list back_alloc_free_list
;
627 struct anv_fixed_size_state_pool buckets
[ANV_STATE_BUCKETS
];
630 struct anv_state_stream_block
;
632 struct anv_state_stream
{
633 struct anv_state_pool
*state_pool
;
635 /* The size of blocks to allocate from the state pool */
638 /* Current block we're allocating from */
639 struct anv_state block
;
641 /* Offset into the current block at which to allocate the next state */
644 /* List of all blocks allocated from this pool */
645 struct anv_state_stream_block
*block_list
;
648 /* The block_pool functions exported for testing only. The block pool should
649 * only be used via a state pool (see below).
651 VkResult
anv_block_pool_init(struct anv_block_pool
*pool
,
652 struct anv_device
*device
,
653 uint32_t initial_size
);
654 void anv_block_pool_finish(struct anv_block_pool
*pool
);
655 int32_t anv_block_pool_alloc(struct anv_block_pool
*pool
,
656 uint32_t block_size
);
657 int32_t anv_block_pool_alloc_back(struct anv_block_pool
*pool
,
658 uint32_t block_size
);
660 VkResult
anv_state_pool_init(struct anv_state_pool
*pool
,
661 struct anv_device
*device
,
662 uint32_t block_size
);
663 void anv_state_pool_finish(struct anv_state_pool
*pool
);
664 struct anv_state
anv_state_pool_alloc(struct anv_state_pool
*pool
,
665 uint32_t state_size
, uint32_t alignment
);
666 struct anv_state
anv_state_pool_alloc_back(struct anv_state_pool
*pool
);
667 void anv_state_pool_free(struct anv_state_pool
*pool
, struct anv_state state
);
668 void anv_state_stream_init(struct anv_state_stream
*stream
,
669 struct anv_state_pool
*state_pool
,
670 uint32_t block_size
);
671 void anv_state_stream_finish(struct anv_state_stream
*stream
);
672 struct anv_state
anv_state_stream_alloc(struct anv_state_stream
*stream
,
673 uint32_t size
, uint32_t alignment
);
676 * Implements a pool of re-usable BOs. The interface is identical to that
677 * of block_pool except that each block is its own BO.
680 struct anv_device
*device
;
685 void anv_bo_pool_init(struct anv_bo_pool
*pool
, struct anv_device
*device
);
686 void anv_bo_pool_finish(struct anv_bo_pool
*pool
);
687 VkResult
anv_bo_pool_alloc(struct anv_bo_pool
*pool
, struct anv_bo
*bo
,
689 void anv_bo_pool_free(struct anv_bo_pool
*pool
, const struct anv_bo
*bo
);
691 struct anv_scratch_bo
{
696 struct anv_scratch_pool
{
697 /* Indexed by Per-Thread Scratch Space number (the hardware value) and stage */
698 struct anv_scratch_bo bos
[16][MESA_SHADER_STAGES
];
701 void anv_scratch_pool_init(struct anv_device
*device
,
702 struct anv_scratch_pool
*pool
);
703 void anv_scratch_pool_finish(struct anv_device
*device
,
704 struct anv_scratch_pool
*pool
);
705 struct anv_bo
*anv_scratch_pool_alloc(struct anv_device
*device
,
706 struct anv_scratch_pool
*pool
,
707 gl_shader_stage stage
,
708 unsigned per_thread_scratch
);
710 /** Implements a BO cache that ensures a 1-1 mapping of GEM BOs to anv_bos */
711 struct anv_bo_cache
{
712 struct hash_table
*bo_map
;
713 pthread_mutex_t mutex
;
716 VkResult
anv_bo_cache_init(struct anv_bo_cache
*cache
);
717 void anv_bo_cache_finish(struct anv_bo_cache
*cache
);
718 VkResult
anv_bo_cache_alloc(struct anv_device
*device
,
719 struct anv_bo_cache
*cache
,
720 uint64_t size
, struct anv_bo
**bo
);
721 VkResult
anv_bo_cache_import(struct anv_device
*device
,
722 struct anv_bo_cache
*cache
,
723 int fd
, uint64_t size
, struct anv_bo
**bo
);
724 VkResult
anv_bo_cache_export(struct anv_device
*device
,
725 struct anv_bo_cache
*cache
,
726 struct anv_bo
*bo_in
, int *fd_out
);
727 void anv_bo_cache_release(struct anv_device
*device
,
728 struct anv_bo_cache
*cache
,
731 struct anv_memory_type
{
732 /* Standard bits passed on to the client */
733 VkMemoryPropertyFlags propertyFlags
;
736 /* Driver-internal book-keeping */
737 VkBufferUsageFlags valid_buffer_usage
;
740 struct anv_memory_heap
{
741 /* Standard bits passed on to the client */
743 VkMemoryHeapFlags flags
;
745 /* Driver-internal book-keeping */
746 bool supports_48bit_addresses
;
749 struct anv_physical_device
{
750 VK_LOADER_DATA _loader_data
;
752 struct anv_instance
* instance
;
756 struct gen_device_info info
;
757 /** Amount of "GPU memory" we want to advertise
759 * Clearly, this value is bogus since Intel is a UMA architecture. On
760 * gen7 platforms, we are limited by GTT size unless we want to implement
761 * fine-grained tracking and GTT splitting. On Broadwell and above we are
762 * practically unlimited. However, we will never report more than 3/4 of
763 * the total system ram to try and avoid running out of RAM.
765 bool supports_48bit_addresses
;
766 struct brw_compiler
* compiler
;
767 struct isl_device isl_dev
;
768 int cmd_parser_version
;
772 bool has_syncobj_wait
;
775 uint32_t subslice_total
;
779 struct anv_memory_type types
[VK_MAX_MEMORY_TYPES
];
781 struct anv_memory_heap heaps
[VK_MAX_MEMORY_HEAPS
];
784 uint8_t pipeline_cache_uuid
[VK_UUID_SIZE
];
785 uint8_t driver_uuid
[VK_UUID_SIZE
];
786 uint8_t device_uuid
[VK_UUID_SIZE
];
788 struct wsi_device wsi_device
;
792 struct anv_debug_report_callback
{
793 /* Link in the 'callbacks' list in anv_instance struct. */
794 struct list_head link
;
795 VkDebugReportFlagsEXT flags
;
796 PFN_vkDebugReportCallbackEXT callback
;
800 struct anv_instance
{
801 VK_LOADER_DATA _loader_data
;
803 VkAllocationCallbacks alloc
;
806 int physicalDeviceCount
;
807 struct anv_physical_device physicalDevice
;
809 /* VK_EXT_debug_report debug callbacks */
810 pthread_mutex_t callbacks_mutex
;
811 struct list_head callbacks
;
812 struct anv_debug_report_callback destroy_debug_cb
;
815 VkResult
anv_init_wsi(struct anv_physical_device
*physical_device
);
816 void anv_finish_wsi(struct anv_physical_device
*physical_device
);
818 bool anv_instance_extension_supported(const char *name
);
819 uint32_t anv_physical_device_api_version(struct anv_physical_device
*dev
);
820 bool anv_physical_device_extension_supported(struct anv_physical_device
*dev
,
824 VK_LOADER_DATA _loader_data
;
826 struct anv_device
* device
;
828 struct anv_state_pool
* pool
;
831 struct anv_pipeline_cache
{
832 struct anv_device
* device
;
833 pthread_mutex_t mutex
;
835 struct hash_table
* cache
;
838 struct anv_pipeline_bind_map
;
840 void anv_pipeline_cache_init(struct anv_pipeline_cache
*cache
,
841 struct anv_device
*device
,
843 void anv_pipeline_cache_finish(struct anv_pipeline_cache
*cache
);
845 struct anv_shader_bin
*
846 anv_pipeline_cache_search(struct anv_pipeline_cache
*cache
,
847 const void *key
, uint32_t key_size
);
848 struct anv_shader_bin
*
849 anv_pipeline_cache_upload_kernel(struct anv_pipeline_cache
*cache
,
850 const void *key_data
, uint32_t key_size
,
851 const void *kernel_data
, uint32_t kernel_size
,
852 const struct brw_stage_prog_data
*prog_data
,
853 uint32_t prog_data_size
,
854 const struct anv_pipeline_bind_map
*bind_map
);
857 VK_LOADER_DATA _loader_data
;
859 VkAllocationCallbacks alloc
;
861 struct anv_instance
* instance
;
863 struct gen_device_info info
;
864 struct isl_device isl_dev
;
867 bool can_chain_batches
;
868 bool robust_buffer_access
;
870 struct anv_bo_pool batch_bo_pool
;
872 struct anv_bo_cache bo_cache
;
874 struct anv_state_pool dynamic_state_pool
;
875 struct anv_state_pool instruction_state_pool
;
876 struct anv_state_pool surface_state_pool
;
878 struct anv_bo workaround_bo
;
879 struct anv_bo trivial_batch_bo
;
881 struct anv_pipeline_cache blorp_shader_cache
;
882 struct blorp_context blorp
;
884 struct anv_state border_colors
;
886 struct anv_queue queue
;
888 struct anv_scratch_pool scratch_pool
;
890 uint32_t default_mocs
;
892 pthread_mutex_t mutex
;
893 pthread_cond_t queue_submit
;
898 anv_state_flush(struct anv_device
*device
, struct anv_state state
)
900 if (device
->info
.has_llc
)
903 gen_flush_range(state
.map
, state
.alloc_size
);
906 void anv_device_init_blorp(struct anv_device
*device
);
907 void anv_device_finish_blorp(struct anv_device
*device
);
909 VkResult
anv_device_execbuf(struct anv_device
*device
,
910 struct drm_i915_gem_execbuffer2
*execbuf
,
911 struct anv_bo
**execbuf_bos
);
912 VkResult
anv_device_query_status(struct anv_device
*device
);
913 VkResult
anv_device_bo_busy(struct anv_device
*device
, struct anv_bo
*bo
);
914 VkResult
anv_device_wait(struct anv_device
*device
, struct anv_bo
*bo
,
917 void* anv_gem_mmap(struct anv_device
*device
,
918 uint32_t gem_handle
, uint64_t offset
, uint64_t size
, uint32_t flags
);
919 void anv_gem_munmap(void *p
, uint64_t size
);
920 uint32_t anv_gem_create(struct anv_device
*device
, uint64_t size
);
921 void anv_gem_close(struct anv_device
*device
, uint32_t gem_handle
);
922 uint32_t anv_gem_userptr(struct anv_device
*device
, void *mem
, size_t size
);
923 int anv_gem_busy(struct anv_device
*device
, uint32_t gem_handle
);
924 int anv_gem_wait(struct anv_device
*device
, uint32_t gem_handle
, int64_t *timeout_ns
);
925 int anv_gem_execbuffer(struct anv_device
*device
,
926 struct drm_i915_gem_execbuffer2
*execbuf
);
927 int anv_gem_set_tiling(struct anv_device
*device
, uint32_t gem_handle
,
928 uint32_t stride
, uint32_t tiling
);
929 int anv_gem_create_context(struct anv_device
*device
);
930 int anv_gem_destroy_context(struct anv_device
*device
, int context
);
931 int anv_gem_get_context_param(int fd
, int context
, uint32_t param
,
933 int anv_gem_get_param(int fd
, uint32_t param
);
934 bool anv_gem_get_bit6_swizzle(int fd
, uint32_t tiling
);
935 int anv_gem_get_aperture(int fd
, uint64_t *size
);
936 bool anv_gem_supports_48b_addresses(int fd
);
937 int anv_gem_gpu_get_reset_stats(struct anv_device
*device
,
938 uint32_t *active
, uint32_t *pending
);
939 int anv_gem_handle_to_fd(struct anv_device
*device
, uint32_t gem_handle
);
940 uint32_t anv_gem_fd_to_handle(struct anv_device
*device
, int fd
);
941 int anv_gem_set_caching(struct anv_device
*device
, uint32_t gem_handle
, uint32_t caching
);
942 int anv_gem_set_domain(struct anv_device
*device
, uint32_t gem_handle
,
943 uint32_t read_domains
, uint32_t write_domain
);
944 int anv_gem_sync_file_merge(struct anv_device
*device
, int fd1
, int fd2
);
945 uint32_t anv_gem_syncobj_create(struct anv_device
*device
, uint32_t flags
);
946 void anv_gem_syncobj_destroy(struct anv_device
*device
, uint32_t handle
);
947 int anv_gem_syncobj_handle_to_fd(struct anv_device
*device
, uint32_t handle
);
948 uint32_t anv_gem_syncobj_fd_to_handle(struct anv_device
*device
, int fd
);
949 int anv_gem_syncobj_export_sync_file(struct anv_device
*device
,
951 int anv_gem_syncobj_import_sync_file(struct anv_device
*device
,
952 uint32_t handle
, int fd
);
953 void anv_gem_syncobj_reset(struct anv_device
*device
, uint32_t handle
);
954 bool anv_gem_supports_syncobj_wait(int fd
);
955 int anv_gem_syncobj_wait(struct anv_device
*device
,
956 uint32_t *handles
, uint32_t num_handles
,
957 int64_t abs_timeout_ns
, bool wait_all
);
959 VkResult
anv_bo_init_new(struct anv_bo
*bo
, struct anv_device
*device
, uint64_t size
);
961 struct anv_reloc_list
{
963 uint32_t array_length
;
964 struct drm_i915_gem_relocation_entry
* relocs
;
965 struct anv_bo
** reloc_bos
;
968 VkResult
anv_reloc_list_init(struct anv_reloc_list
*list
,
969 const VkAllocationCallbacks
*alloc
);
970 void anv_reloc_list_finish(struct anv_reloc_list
*list
,
971 const VkAllocationCallbacks
*alloc
);
973 VkResult
anv_reloc_list_add(struct anv_reloc_list
*list
,
974 const VkAllocationCallbacks
*alloc
,
975 uint32_t offset
, struct anv_bo
*target_bo
,
978 struct anv_batch_bo
{
979 /* Link in the anv_cmd_buffer.owned_batch_bos list */
980 struct list_head link
;
984 /* Bytes actually consumed in this batch BO */
987 struct anv_reloc_list relocs
;
991 const VkAllocationCallbacks
* alloc
;
997 struct anv_reloc_list
* relocs
;
999 /* This callback is called (with the associated user data) in the event
1000 * that the batch runs out of space.
1002 VkResult (*extend_cb
)(struct anv_batch
*, void *);
1006 * Current error status of the command buffer. Used to track inconsistent
1007 * or incomplete command buffer states that are the consequence of run-time
1008 * errors such as out of memory scenarios. We want to track this in the
1009 * batch because the command buffer object is not visible to some parts
1015 void *anv_batch_emit_dwords(struct anv_batch
*batch
, int num_dwords
);
1016 void anv_batch_emit_batch(struct anv_batch
*batch
, struct anv_batch
*other
);
1017 uint64_t anv_batch_emit_reloc(struct anv_batch
*batch
,
1018 void *location
, struct anv_bo
*bo
, uint32_t offset
);
1019 VkResult
anv_device_submit_simple_batch(struct anv_device
*device
,
1020 struct anv_batch
*batch
);
1022 static inline VkResult
1023 anv_batch_set_error(struct anv_batch
*batch
, VkResult error
)
1025 assert(error
!= VK_SUCCESS
);
1026 if (batch
->status
== VK_SUCCESS
)
1027 batch
->status
= error
;
1028 return batch
->status
;
1032 anv_batch_has_error(struct anv_batch
*batch
)
1034 return batch
->status
!= VK_SUCCESS
;
1037 struct anv_address
{
1042 static inline uint64_t
1043 _anv_combine_address(struct anv_batch
*batch
, void *location
,
1044 const struct anv_address address
, uint32_t delta
)
1046 if (address
.bo
== NULL
) {
1047 return address
.offset
+ delta
;
1049 assert(batch
->start
<= location
&& location
< batch
->end
);
1051 return anv_batch_emit_reloc(batch
, location
, address
.bo
, address
.offset
+ delta
);
1055 #define __gen_address_type struct anv_address
1056 #define __gen_user_data struct anv_batch
1057 #define __gen_combine_address _anv_combine_address
1059 /* Wrapper macros needed to work around preprocessor argument issues. In
1060 * particular, arguments don't get pre-evaluated if they are concatenated.
1061 * This means that, if you pass GENX(3DSTATE_PS) into the emit macro, the
1062 * GENX macro won't get evaluated if the emit macro contains "cmd ## foo".
1063 * We can work around this easily enough with these helpers.
1065 #define __anv_cmd_length(cmd) cmd ## _length
1066 #define __anv_cmd_length_bias(cmd) cmd ## _length_bias
1067 #define __anv_cmd_header(cmd) cmd ## _header
1068 #define __anv_cmd_pack(cmd) cmd ## _pack
1069 #define __anv_reg_num(reg) reg ## _num
1071 #define anv_pack_struct(dst, struc, ...) do { \
1072 struct struc __template = { \
1075 __anv_cmd_pack(struc)(NULL, dst, &__template); \
1076 VG(VALGRIND_CHECK_MEM_IS_DEFINED(dst, __anv_cmd_length(struc) * 4)); \
1079 #define anv_batch_emitn(batch, n, cmd, ...) ({ \
1080 void *__dst = anv_batch_emit_dwords(batch, n); \
1082 struct cmd __template = { \
1083 __anv_cmd_header(cmd), \
1084 .DWordLength = n - __anv_cmd_length_bias(cmd), \
1087 __anv_cmd_pack(cmd)(batch, __dst, &__template); \
1092 #define anv_batch_emit_merge(batch, dwords0, dwords1) \
1096 STATIC_ASSERT(ARRAY_SIZE(dwords0) == ARRAY_SIZE(dwords1)); \
1097 dw = anv_batch_emit_dwords((batch), ARRAY_SIZE(dwords0)); \
1100 for (uint32_t i = 0; i < ARRAY_SIZE(dwords0); i++) \
1101 dw[i] = (dwords0)[i] | (dwords1)[i]; \
1102 VG(VALGRIND_CHECK_MEM_IS_DEFINED(dw, ARRAY_SIZE(dwords0) * 4));\
1105 #define anv_batch_emit(batch, cmd, name) \
1106 for (struct cmd name = { __anv_cmd_header(cmd) }, \
1107 *_dst = anv_batch_emit_dwords(batch, __anv_cmd_length(cmd)); \
1108 __builtin_expect(_dst != NULL, 1); \
1109 ({ __anv_cmd_pack(cmd)(batch, _dst, &name); \
1110 VG(VALGRIND_CHECK_MEM_IS_DEFINED(_dst, __anv_cmd_length(cmd) * 4)); \
1114 #define GEN7_MOCS (struct GEN7_MEMORY_OBJECT_CONTROL_STATE) { \
1115 .GraphicsDataTypeGFDT = 0, \
1116 .LLCCacheabilityControlLLCCC = 0, \
1117 .L3CacheabilityControlL3CC = 1, \
1120 #define GEN75_MOCS (struct GEN75_MEMORY_OBJECT_CONTROL_STATE) { \
1121 .LLCeLLCCacheabilityControlLLCCC = 0, \
1122 .L3CacheabilityControlL3CC = 1, \
1125 #define GEN8_MOCS (struct GEN8_MEMORY_OBJECT_CONTROL_STATE) { \
1126 .MemoryTypeLLCeLLCCacheabilityControl = WB, \
1127 .TargetCache = L3DefertoPATforLLCeLLCselection, \
1128 .AgeforQUADLRU = 0 \
1131 /* Skylake: MOCS is now an index into an array of 62 different caching
1132 * configurations programmed by the kernel.
1135 #define GEN9_MOCS (struct GEN9_MEMORY_OBJECT_CONTROL_STATE) { \
1136 /* TC=LLC/eLLC, LeCC=WB, LRUM=3, L3CC=WB */ \
1137 .IndextoMOCSTables = 2 \
1140 #define GEN9_MOCS_PTE { \
1141 /* TC=LLC/eLLC, LeCC=WB, LRUM=3, L3CC=WB */ \
1142 .IndextoMOCSTables = 1 \
1145 /* Cannonlake MOCS defines are duplicates of Skylake MOCS defines. */
1146 #define GEN10_MOCS (struct GEN10_MEMORY_OBJECT_CONTROL_STATE) { \
1147 /* TC=LLC/eLLC, LeCC=WB, LRUM=3, L3CC=WB */ \
1148 .IndextoMOCSTables = 2 \
1151 #define GEN10_MOCS_PTE { \
1152 /* TC=LLC/eLLC, LeCC=WB, LRUM=3, L3CC=WB */ \
1153 .IndextoMOCSTables = 1 \
1156 struct anv_device_memory
{
1158 struct anv_memory_type
* type
;
1159 VkDeviceSize map_size
;
1164 * Header for Vertex URB Entry (VUE)
1166 struct anv_vue_header
{
1168 uint32_t RTAIndex
; /* RenderTargetArrayIndex */
1169 uint32_t ViewportIndex
;
1173 struct anv_descriptor_set_binding_layout
{
1175 /* The type of the descriptors in this binding */
1176 VkDescriptorType type
;
1179 /* Number of array elements in this binding */
1180 uint16_t array_size
;
1182 /* Index into the flattend descriptor set */
1183 uint16_t descriptor_index
;
1185 /* Index into the dynamic state array for a dynamic buffer */
1186 int16_t dynamic_offset_index
;
1188 /* Index into the descriptor set buffer views */
1189 int16_t buffer_index
;
1192 /* Index into the binding table for the associated surface */
1193 int16_t surface_index
;
1195 /* Index into the sampler table for the associated sampler */
1196 int16_t sampler_index
;
1198 /* Index into the image table for the associated image */
1199 int16_t image_index
;
1200 } stage
[MESA_SHADER_STAGES
];
1202 /* Immutable samplers (or NULL if no immutable samplers) */
1203 struct anv_sampler
**immutable_samplers
;
1206 struct anv_descriptor_set_layout
{
1207 /* Number of bindings in this descriptor set */
1208 uint16_t binding_count
;
1210 /* Total size of the descriptor set with room for all array entries */
1213 /* Shader stages affected by this descriptor set */
1214 uint16_t shader_stages
;
1216 /* Number of buffers in this descriptor set */
1217 uint16_t buffer_count
;
1219 /* Number of dynamic offsets used by this descriptor set */
1220 uint16_t dynamic_offset_count
;
1222 /* Bindings in this descriptor set */
1223 struct anv_descriptor_set_binding_layout binding
[0];
1226 struct anv_descriptor
{
1227 VkDescriptorType type
;
1231 VkImageLayout layout
;
1232 struct anv_image_view
*image_view
;
1233 struct anv_sampler
*sampler
;
1237 struct anv_buffer
*buffer
;
1242 struct anv_buffer_view
*buffer_view
;
1246 struct anv_descriptor_set
{
1247 const struct anv_descriptor_set_layout
*layout
;
1249 uint32_t buffer_count
;
1250 struct anv_buffer_view
*buffer_views
;
1251 struct anv_descriptor descriptors
[0];
1254 struct anv_buffer_view
{
1255 enum isl_format format
; /**< VkBufferViewCreateInfo::format */
1257 uint32_t offset
; /**< Offset into bo. */
1258 uint64_t range
; /**< VkBufferViewCreateInfo::range */
1260 struct anv_state surface_state
;
1261 struct anv_state storage_surface_state
;
1262 struct anv_state writeonly_storage_surface_state
;
1264 struct brw_image_param storage_image_param
;
1267 struct anv_push_descriptor_set
{
1268 struct anv_descriptor_set set
;
1270 /* Put this field right behind anv_descriptor_set so it fills up the
1271 * descriptors[0] field. */
1272 struct anv_descriptor descriptors
[MAX_PUSH_DESCRIPTORS
];
1273 struct anv_buffer_view buffer_views
[MAX_PUSH_DESCRIPTORS
];
1276 struct anv_descriptor_pool
{
1281 struct anv_state_stream surface_state_stream
;
1282 void *surface_state_free_list
;
1287 enum anv_descriptor_template_entry_type
{
1288 ANV_DESCRIPTOR_TEMPLATE_ENTRY_TYPE_IMAGE
,
1289 ANV_DESCRIPTOR_TEMPLATE_ENTRY_TYPE_BUFFER
,
1290 ANV_DESCRIPTOR_TEMPLATE_ENTRY_TYPE_BUFFER_VIEW
1293 struct anv_descriptor_template_entry
{
1294 /* The type of descriptor in this entry */
1295 VkDescriptorType type
;
1297 /* Binding in the descriptor set */
1300 /* Offset at which to write into the descriptor set binding */
1301 uint32_t array_element
;
1303 /* Number of elements to write into the descriptor set binding */
1304 uint32_t array_count
;
1306 /* Offset into the user provided data */
1309 /* Stride between elements into the user provided data */
1313 struct anv_descriptor_update_template
{
1314 /* The descriptor set this template corresponds to. This value is only
1315 * valid if the template was created with the templateType
1316 * VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET_KHR.
1320 /* Number of entries in this template */
1321 uint32_t entry_count
;
1323 /* Entries of the template */
1324 struct anv_descriptor_template_entry entries
[0];
1328 anv_descriptor_set_binding_layout_get_hw_size(const struct anv_descriptor_set_binding_layout
*binding
);
1331 anv_descriptor_set_layout_size(const struct anv_descriptor_set_layout
*layout
);
1334 anv_descriptor_set_write_image_view(struct anv_descriptor_set
*set
,
1335 const struct gen_device_info
* const devinfo
,
1336 const VkDescriptorImageInfo
* const info
,
1337 VkDescriptorType type
,
1342 anv_descriptor_set_write_buffer_view(struct anv_descriptor_set
*set
,
1343 VkDescriptorType type
,
1344 struct anv_buffer_view
*buffer_view
,
1349 anv_descriptor_set_write_buffer(struct anv_descriptor_set
*set
,
1350 struct anv_device
*device
,
1351 struct anv_state_stream
*alloc_stream
,
1352 VkDescriptorType type
,
1353 struct anv_buffer
*buffer
,
1356 VkDeviceSize offset
,
1357 VkDeviceSize range
);
1360 anv_descriptor_set_write_template(struct anv_descriptor_set
*set
,
1361 struct anv_device
*device
,
1362 struct anv_state_stream
*alloc_stream
,
1363 const struct anv_descriptor_update_template
*template,
1367 anv_descriptor_set_create(struct anv_device
*device
,
1368 struct anv_descriptor_pool
*pool
,
1369 const struct anv_descriptor_set_layout
*layout
,
1370 struct anv_descriptor_set
**out_set
);
1373 anv_descriptor_set_destroy(struct anv_device
*device
,
1374 struct anv_descriptor_pool
*pool
,
1375 struct anv_descriptor_set
*set
);
1377 #define ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS UINT8_MAX
1379 struct anv_pipeline_binding
{
1380 /* The descriptor set this surface corresponds to. The special value of
1381 * ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS indicates that the offset refers
1382 * to a color attachment and not a regular descriptor.
1386 /* Binding in the descriptor set */
1389 /* Index in the binding */
1392 /* Plane in the binding index */
1395 /* Input attachment index (relative to the subpass) */
1396 uint8_t input_attachment_index
;
1398 /* For a storage image, whether it is write-only */
1402 struct anv_pipeline_layout
{
1404 struct anv_descriptor_set_layout
*layout
;
1405 uint32_t dynamic_offset_start
;
1411 bool has_dynamic_offsets
;
1412 } stage
[MESA_SHADER_STAGES
];
1414 unsigned char sha1
[20];
1418 struct anv_device
* device
;
1421 VkBufferUsageFlags usage
;
1423 /* Set when bound */
1425 VkDeviceSize offset
;
1428 static inline uint64_t
1429 anv_buffer_get_range(struct anv_buffer
*buffer
, uint64_t offset
, uint64_t range
)
1431 assert(offset
<= buffer
->size
);
1432 if (range
== VK_WHOLE_SIZE
) {
1433 return buffer
->size
- offset
;
1435 assert(range
<= buffer
->size
);
1440 enum anv_cmd_dirty_bits
{
1441 ANV_CMD_DIRTY_DYNAMIC_VIEWPORT
= 1 << 0, /* VK_DYNAMIC_STATE_VIEWPORT */
1442 ANV_CMD_DIRTY_DYNAMIC_SCISSOR
= 1 << 1, /* VK_DYNAMIC_STATE_SCISSOR */
1443 ANV_CMD_DIRTY_DYNAMIC_LINE_WIDTH
= 1 << 2, /* VK_DYNAMIC_STATE_LINE_WIDTH */
1444 ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS
= 1 << 3, /* VK_DYNAMIC_STATE_DEPTH_BIAS */
1445 ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS
= 1 << 4, /* VK_DYNAMIC_STATE_BLEND_CONSTANTS */
1446 ANV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS
= 1 << 5, /* VK_DYNAMIC_STATE_DEPTH_BOUNDS */
1447 ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK
= 1 << 6, /* VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK */
1448 ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK
= 1 << 7, /* VK_DYNAMIC_STATE_STENCIL_WRITE_MASK */
1449 ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE
= 1 << 8, /* VK_DYNAMIC_STATE_STENCIL_REFERENCE */
1450 ANV_CMD_DIRTY_DYNAMIC_ALL
= (1 << 9) - 1,
1451 ANV_CMD_DIRTY_PIPELINE
= 1 << 9,
1452 ANV_CMD_DIRTY_INDEX_BUFFER
= 1 << 10,
1453 ANV_CMD_DIRTY_RENDER_TARGETS
= 1 << 11,
1455 typedef uint32_t anv_cmd_dirty_mask_t
;
1457 enum anv_pipe_bits
{
1458 ANV_PIPE_DEPTH_CACHE_FLUSH_BIT
= (1 << 0),
1459 ANV_PIPE_STALL_AT_SCOREBOARD_BIT
= (1 << 1),
1460 ANV_PIPE_STATE_CACHE_INVALIDATE_BIT
= (1 << 2),
1461 ANV_PIPE_CONSTANT_CACHE_INVALIDATE_BIT
= (1 << 3),
1462 ANV_PIPE_VF_CACHE_INVALIDATE_BIT
= (1 << 4),
1463 ANV_PIPE_DATA_CACHE_FLUSH_BIT
= (1 << 5),
1464 ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT
= (1 << 10),
1465 ANV_PIPE_INSTRUCTION_CACHE_INVALIDATE_BIT
= (1 << 11),
1466 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT
= (1 << 12),
1467 ANV_PIPE_DEPTH_STALL_BIT
= (1 << 13),
1468 ANV_PIPE_CS_STALL_BIT
= (1 << 20),
1470 /* This bit does not exist directly in PIPE_CONTROL. Instead it means that
1471 * a flush has happened but not a CS stall. The next time we do any sort
1472 * of invalidation we need to insert a CS stall at that time. Otherwise,
1473 * we would have to CS stall on every flush which could be bad.
1475 ANV_PIPE_NEEDS_CS_STALL_BIT
= (1 << 21),
1478 #define ANV_PIPE_FLUSH_BITS ( \
1479 ANV_PIPE_DEPTH_CACHE_FLUSH_BIT | \
1480 ANV_PIPE_DATA_CACHE_FLUSH_BIT | \
1481 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT)
1483 #define ANV_PIPE_STALL_BITS ( \
1484 ANV_PIPE_STALL_AT_SCOREBOARD_BIT | \
1485 ANV_PIPE_DEPTH_STALL_BIT | \
1486 ANV_PIPE_CS_STALL_BIT)
1488 #define ANV_PIPE_INVALIDATE_BITS ( \
1489 ANV_PIPE_STATE_CACHE_INVALIDATE_BIT | \
1490 ANV_PIPE_CONSTANT_CACHE_INVALIDATE_BIT | \
1491 ANV_PIPE_VF_CACHE_INVALIDATE_BIT | \
1492 ANV_PIPE_DATA_CACHE_FLUSH_BIT | \
1493 ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT | \
1494 ANV_PIPE_INSTRUCTION_CACHE_INVALIDATE_BIT)
1496 static inline enum anv_pipe_bits
1497 anv_pipe_flush_bits_for_access_flags(VkAccessFlags flags
)
1499 enum anv_pipe_bits pipe_bits
= 0;
1502 for_each_bit(b
, flags
) {
1503 switch ((VkAccessFlagBits
)(1 << b
)) {
1504 case VK_ACCESS_SHADER_WRITE_BIT
:
1505 pipe_bits
|= ANV_PIPE_DATA_CACHE_FLUSH_BIT
;
1507 case VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT
:
1508 pipe_bits
|= ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT
;
1510 case VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT
:
1511 pipe_bits
|= ANV_PIPE_DEPTH_CACHE_FLUSH_BIT
;
1513 case VK_ACCESS_TRANSFER_WRITE_BIT
:
1514 pipe_bits
|= ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT
;
1515 pipe_bits
|= ANV_PIPE_DEPTH_CACHE_FLUSH_BIT
;
1518 break; /* Nothing to do */
1525 static inline enum anv_pipe_bits
1526 anv_pipe_invalidate_bits_for_access_flags(VkAccessFlags flags
)
1528 enum anv_pipe_bits pipe_bits
= 0;
1531 for_each_bit(b
, flags
) {
1532 switch ((VkAccessFlagBits
)(1 << b
)) {
1533 case VK_ACCESS_INDIRECT_COMMAND_READ_BIT
:
1534 case VK_ACCESS_INDEX_READ_BIT
:
1535 case VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT
:
1536 pipe_bits
|= ANV_PIPE_VF_CACHE_INVALIDATE_BIT
;
1538 case VK_ACCESS_UNIFORM_READ_BIT
:
1539 pipe_bits
|= ANV_PIPE_CONSTANT_CACHE_INVALIDATE_BIT
;
1540 pipe_bits
|= ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT
;
1542 case VK_ACCESS_SHADER_READ_BIT
:
1543 case VK_ACCESS_INPUT_ATTACHMENT_READ_BIT
:
1544 case VK_ACCESS_TRANSFER_READ_BIT
:
1545 pipe_bits
|= ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT
;
1548 break; /* Nothing to do */
1555 #define VK_IMAGE_ASPECT_ANY_COLOR_BIT ( \
1556 VK_IMAGE_ASPECT_COLOR_BIT | \
1557 VK_IMAGE_ASPECT_PLANE_0_BIT_KHR | \
1558 VK_IMAGE_ASPECT_PLANE_1_BIT_KHR | \
1559 VK_IMAGE_ASPECT_PLANE_2_BIT_KHR)
1560 #define VK_IMAGE_ASPECT_PLANES_BITS ( \
1561 VK_IMAGE_ASPECT_PLANE_0_BIT_KHR | \
1562 VK_IMAGE_ASPECT_PLANE_1_BIT_KHR | \
1563 VK_IMAGE_ASPECT_PLANE_2_BIT_KHR)
1565 struct anv_vertex_binding
{
1566 struct anv_buffer
* buffer
;
1567 VkDeviceSize offset
;
1570 struct anv_push_constants
{
1571 /* Current allocated size of this push constants data structure.
1572 * Because a decent chunk of it may not be used (images on SKL, for
1573 * instance), we won't actually allocate the entire structure up-front.
1577 /* Push constant data provided by the client through vkPushConstants */
1578 uint8_t client_data
[MAX_PUSH_CONSTANTS_SIZE
];
1580 /* Image data for image_load_store on pre-SKL */
1581 struct brw_image_param images
[MAX_IMAGES
];
1584 struct anv_dynamic_state
{
1587 VkViewport viewports
[MAX_VIEWPORTS
];
1592 VkRect2D scissors
[MAX_SCISSORS
];
1603 float blend_constants
[4];
1613 } stencil_compare_mask
;
1618 } stencil_write_mask
;
1623 } stencil_reference
;
1626 extern const struct anv_dynamic_state default_dynamic_state
;
1628 void anv_dynamic_state_copy(struct anv_dynamic_state
*dest
,
1629 const struct anv_dynamic_state
*src
,
1630 uint32_t copy_mask
);
1632 struct anv_surface_state
{
1633 struct anv_state state
;
1634 /** Address of the surface referred to by this state
1636 * This address is relative to the start of the BO.
1639 /* Address of the aux surface, if any
1641 * This field is 0 if and only if no aux surface exists.
1643 * This address is relative to the start of the BO. On gen7, the bottom 12
1644 * bits of this address include extra aux information.
1646 uint64_t aux_address
;
1650 * Attachment state when recording a renderpass instance.
1652 * The clear value is valid only if there exists a pending clear.
1654 struct anv_attachment_state
{
1655 enum isl_aux_usage aux_usage
;
1656 enum isl_aux_usage input_aux_usage
;
1657 struct anv_surface_state color
;
1658 struct anv_surface_state input
;
1660 VkImageLayout current_layout
;
1661 VkImageAspectFlags pending_clear_aspects
;
1663 VkClearValue clear_value
;
1664 bool clear_color_is_zero_one
;
1665 bool clear_color_is_zero
;
1668 /** State required while building cmd buffer */
1669 struct anv_cmd_state
{
1670 /* PIPELINE_SELECT.PipelineSelection */
1671 uint32_t current_pipeline
;
1672 const struct gen_l3_config
* current_l3_config
;
1674 anv_cmd_dirty_mask_t dirty
;
1675 anv_cmd_dirty_mask_t compute_dirty
;
1676 enum anv_pipe_bits pending_pipe_bits
;
1677 uint32_t num_workgroups_offset
;
1678 struct anv_bo
*num_workgroups_bo
;
1679 VkShaderStageFlags descriptors_dirty
;
1680 VkShaderStageFlags push_constants_dirty
;
1681 uint32_t scratch_size
;
1682 struct anv_pipeline
* pipeline
;
1683 struct anv_pipeline
* compute_pipeline
;
1684 struct anv_framebuffer
* framebuffer
;
1685 struct anv_render_pass
* pass
;
1686 struct anv_subpass
* subpass
;
1687 VkRect2D render_area
;
1688 uint32_t restart_index
;
1689 struct anv_vertex_binding vertex_bindings
[MAX_VBS
];
1690 struct anv_descriptor_set
* descriptors
[MAX_SETS
];
1691 uint32_t dynamic_offsets
[MAX_DYNAMIC_BUFFERS
];
1692 VkShaderStageFlags push_constant_stages
;
1693 struct anv_push_constants
* push_constants
[MESA_SHADER_STAGES
];
1694 struct anv_state binding_tables
[MESA_SHADER_STAGES
];
1695 struct anv_state samplers
[MESA_SHADER_STAGES
];
1696 struct anv_dynamic_state dynamic
;
1699 struct anv_push_descriptor_set
* push_descriptors
[MAX_SETS
];
1702 * Whether or not the gen8 PMA fix is enabled. We ensure that, at the top
1703 * of any command buffer it is disabled by disabling it in EndCommandBuffer
1704 * and before invoking the secondary in ExecuteCommands.
1706 bool pma_fix_enabled
;
1709 * Whether or not we know for certain that HiZ is enabled for the current
1710 * subpass. If, for whatever reason, we are unsure as to whether HiZ is
1711 * enabled or not, this will be false.
1716 * Array length is anv_cmd_state::pass::attachment_count. Array content is
1717 * valid only when recording a render pass instance.
1719 struct anv_attachment_state
* attachments
;
1722 * Surface states for color render targets. These are stored in a single
1723 * flat array. For depth-stencil attachments, the surface state is simply
1726 struct anv_state render_pass_states
;
1729 * A null surface state of the right size to match the framebuffer. This
1730 * is one of the states in render_pass_states.
1732 struct anv_state null_surface_state
;
1735 struct anv_buffer
* index_buffer
;
1736 uint32_t index_type
; /**< 3DSTATE_INDEX_BUFFER.IndexFormat */
1737 uint32_t index_offset
;
1741 struct anv_cmd_pool
{
1742 VkAllocationCallbacks alloc
;
1743 struct list_head cmd_buffers
;
1746 #define ANV_CMD_BUFFER_BATCH_SIZE 8192
1748 enum anv_cmd_buffer_exec_mode
{
1749 ANV_CMD_BUFFER_EXEC_MODE_PRIMARY
,
1750 ANV_CMD_BUFFER_EXEC_MODE_EMIT
,
1751 ANV_CMD_BUFFER_EXEC_MODE_GROW_AND_EMIT
,
1752 ANV_CMD_BUFFER_EXEC_MODE_CHAIN
,
1753 ANV_CMD_BUFFER_EXEC_MODE_COPY_AND_CHAIN
,
1756 struct anv_cmd_buffer
{
1757 VK_LOADER_DATA _loader_data
;
1759 struct anv_device
* device
;
1761 struct anv_cmd_pool
* pool
;
1762 struct list_head pool_link
;
1764 struct anv_batch batch
;
1766 /* Fields required for the actual chain of anv_batch_bo's.
1768 * These fields are initialized by anv_cmd_buffer_init_batch_bo_chain().
1770 struct list_head batch_bos
;
1771 enum anv_cmd_buffer_exec_mode exec_mode
;
1773 /* A vector of anv_batch_bo pointers for every batch or surface buffer
1774 * referenced by this command buffer
1776 * initialized by anv_cmd_buffer_init_batch_bo_chain()
1778 struct u_vector seen_bbos
;
1780 /* A vector of int32_t's for every block of binding tables.
1782 * initialized by anv_cmd_buffer_init_batch_bo_chain()
1784 struct u_vector bt_block_states
;
1787 struct anv_reloc_list surface_relocs
;
1788 /** Last seen surface state block pool center bo offset */
1789 uint32_t last_ss_pool_center
;
1791 /* Serial for tracking buffer completion */
1794 /* Stream objects for storing temporary data */
1795 struct anv_state_stream surface_state_stream
;
1796 struct anv_state_stream dynamic_state_stream
;
1798 VkCommandBufferUsageFlags usage_flags
;
1799 VkCommandBufferLevel level
;
1801 struct anv_cmd_state state
;
1804 VkResult
anv_cmd_buffer_init_batch_bo_chain(struct anv_cmd_buffer
*cmd_buffer
);
1805 void anv_cmd_buffer_fini_batch_bo_chain(struct anv_cmd_buffer
*cmd_buffer
);
1806 void anv_cmd_buffer_reset_batch_bo_chain(struct anv_cmd_buffer
*cmd_buffer
);
1807 void anv_cmd_buffer_end_batch_buffer(struct anv_cmd_buffer
*cmd_buffer
);
1808 void anv_cmd_buffer_add_secondary(struct anv_cmd_buffer
*primary
,
1809 struct anv_cmd_buffer
*secondary
);
1810 void anv_cmd_buffer_prepare_execbuf(struct anv_cmd_buffer
*cmd_buffer
);
1811 VkResult
anv_cmd_buffer_execbuf(struct anv_device
*device
,
1812 struct anv_cmd_buffer
*cmd_buffer
,
1813 const VkSemaphore
*in_semaphores
,
1814 uint32_t num_in_semaphores
,
1815 const VkSemaphore
*out_semaphores
,
1816 uint32_t num_out_semaphores
,
1819 VkResult
anv_cmd_buffer_reset(struct anv_cmd_buffer
*cmd_buffer
);
1822 anv_cmd_buffer_ensure_push_constants_size(struct anv_cmd_buffer
*cmd_buffer
,
1823 gl_shader_stage stage
, uint32_t size
);
1824 #define anv_cmd_buffer_ensure_push_constant_field(cmd_buffer, stage, field) \
1825 anv_cmd_buffer_ensure_push_constants_size(cmd_buffer, stage, \
1826 (offsetof(struct anv_push_constants, field) + \
1827 sizeof(cmd_buffer->state.push_constants[0]->field)))
1829 struct anv_state
anv_cmd_buffer_emit_dynamic(struct anv_cmd_buffer
*cmd_buffer
,
1830 const void *data
, uint32_t size
, uint32_t alignment
);
1831 struct anv_state
anv_cmd_buffer_merge_dynamic(struct anv_cmd_buffer
*cmd_buffer
,
1832 uint32_t *a
, uint32_t *b
,
1833 uint32_t dwords
, uint32_t alignment
);
1836 anv_cmd_buffer_surface_base_address(struct anv_cmd_buffer
*cmd_buffer
);
1838 anv_cmd_buffer_alloc_binding_table(struct anv_cmd_buffer
*cmd_buffer
,
1839 uint32_t entries
, uint32_t *state_offset
);
1841 anv_cmd_buffer_alloc_surface_state(struct anv_cmd_buffer
*cmd_buffer
);
1843 anv_cmd_buffer_alloc_dynamic_state(struct anv_cmd_buffer
*cmd_buffer
,
1844 uint32_t size
, uint32_t alignment
);
1847 anv_cmd_buffer_new_binding_table_block(struct anv_cmd_buffer
*cmd_buffer
);
1849 void gen8_cmd_buffer_emit_viewport(struct anv_cmd_buffer
*cmd_buffer
);
1850 void gen8_cmd_buffer_emit_depth_viewport(struct anv_cmd_buffer
*cmd_buffer
,
1851 bool depth_clamp_enable
);
1852 void gen7_cmd_buffer_emit_scissor(struct anv_cmd_buffer
*cmd_buffer
);
1854 void anv_cmd_buffer_setup_attachments(struct anv_cmd_buffer
*cmd_buffer
,
1855 struct anv_render_pass
*pass
,
1856 struct anv_framebuffer
*framebuffer
,
1857 const VkClearValue
*clear_values
);
1859 void anv_cmd_buffer_emit_state_base_address(struct anv_cmd_buffer
*cmd_buffer
);
1862 anv_cmd_buffer_push_constants(struct anv_cmd_buffer
*cmd_buffer
,
1863 gl_shader_stage stage
);
1865 anv_cmd_buffer_cs_push_constants(struct anv_cmd_buffer
*cmd_buffer
);
1867 void anv_cmd_buffer_clear_subpass(struct anv_cmd_buffer
*cmd_buffer
);
1868 void anv_cmd_buffer_resolve_subpass(struct anv_cmd_buffer
*cmd_buffer
);
1870 const struct anv_image_view
*
1871 anv_cmd_buffer_get_depth_stencil_view(const struct anv_cmd_buffer
*cmd_buffer
);
1874 anv_cmd_buffer_alloc_blorp_binding_table(struct anv_cmd_buffer
*cmd_buffer
,
1875 uint32_t num_entries
,
1876 uint32_t *state_offset
,
1877 struct anv_state
*bt_state
);
1879 void anv_cmd_buffer_dump(struct anv_cmd_buffer
*cmd_buffer
);
1881 enum anv_fence_type
{
1882 ANV_FENCE_TYPE_NONE
= 0,
1884 ANV_FENCE_TYPE_SYNCOBJ
,
1887 enum anv_bo_fence_state
{
1888 /** Indicates that this is a new (or newly reset fence) */
1889 ANV_BO_FENCE_STATE_RESET
,
1891 /** Indicates that this fence has been submitted to the GPU but is still
1892 * (as far as we know) in use by the GPU.
1894 ANV_BO_FENCE_STATE_SUBMITTED
,
1896 ANV_BO_FENCE_STATE_SIGNALED
,
1899 struct anv_fence_impl
{
1900 enum anv_fence_type type
;
1903 /** Fence implementation for BO fences
1905 * These fences use a BO and a set of CPU-tracked state flags. The BO
1906 * is added to the object list of the last execbuf call in a QueueSubmit
1907 * and is marked EXEC_WRITE. The state flags track when the BO has been
1908 * submitted to the kernel. We need to do this because Vulkan lets you
1909 * wait on a fence that has not yet been submitted and I915_GEM_BUSY
1910 * will say it's idle in this case.
1914 enum anv_bo_fence_state state
;
1917 /** DRM syncobj handle for syncobj-based fences */
1923 /* Permanent fence state. Every fence has some form of permanent state
1924 * (type != ANV_SEMAPHORE_TYPE_NONE). This may be a BO to fence on (for
1925 * cross-process fences) or it could just be a dummy for use internally.
1927 struct anv_fence_impl permanent
;
1929 /* Temporary fence state. A fence *may* have temporary state. That state
1930 * is added to the fence by an import operation and is reset back to
1931 * ANV_SEMAPHORE_TYPE_NONE when the fence is reset. A fence with temporary
1932 * state cannot be signaled because the fence must already be signaled
1933 * before the temporary state can be exported from the fence in the other
1934 * process and imported here.
1936 struct anv_fence_impl temporary
;
1941 struct anv_state state
;
1944 enum anv_semaphore_type
{
1945 ANV_SEMAPHORE_TYPE_NONE
= 0,
1946 ANV_SEMAPHORE_TYPE_DUMMY
,
1947 ANV_SEMAPHORE_TYPE_BO
,
1948 ANV_SEMAPHORE_TYPE_SYNC_FILE
,
1949 ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ
,
1952 struct anv_semaphore_impl
{
1953 enum anv_semaphore_type type
;
1956 /* A BO representing this semaphore when type == ANV_SEMAPHORE_TYPE_BO.
1957 * This BO will be added to the object list on any execbuf2 calls for
1958 * which this semaphore is used as a wait or signal fence. When used as
1959 * a signal fence, the EXEC_OBJECT_WRITE flag will be set.
1963 /* The sync file descriptor when type == ANV_SEMAPHORE_TYPE_SYNC_FILE.
1964 * If the semaphore is in the unsignaled state due to either just being
1965 * created or because it has been used for a wait, fd will be -1.
1969 /* Sync object handle when type == ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ.
1970 * Unlike GEM BOs, DRM sync objects aren't deduplicated by the kernel on
1971 * import so we don't need to bother with a userspace cache.
1977 struct anv_semaphore
{
1978 /* Permanent semaphore state. Every semaphore has some form of permanent
1979 * state (type != ANV_SEMAPHORE_TYPE_NONE). This may be a BO to fence on
1980 * (for cross-process semaphores0 or it could just be a dummy for use
1983 struct anv_semaphore_impl permanent
;
1985 /* Temporary semaphore state. A semaphore *may* have temporary state.
1986 * That state is added to the semaphore by an import operation and is reset
1987 * back to ANV_SEMAPHORE_TYPE_NONE when the semaphore is waited on. A
1988 * semaphore with temporary state cannot be signaled because the semaphore
1989 * must already be signaled before the temporary state can be exported from
1990 * the semaphore in the other process and imported here.
1992 struct anv_semaphore_impl temporary
;
1995 void anv_semaphore_reset_temporary(struct anv_device
*device
,
1996 struct anv_semaphore
*semaphore
);
1998 struct anv_shader_module
{
1999 unsigned char sha1
[20];
2004 static inline gl_shader_stage
2005 vk_to_mesa_shader_stage(VkShaderStageFlagBits vk_stage
)
2007 assert(__builtin_popcount(vk_stage
) == 1);
2008 return ffs(vk_stage
) - 1;
2011 static inline VkShaderStageFlagBits
2012 mesa_to_vk_shader_stage(gl_shader_stage mesa_stage
)
2014 return (1 << mesa_stage
);
2017 #define ANV_STAGE_MASK ((1 << MESA_SHADER_STAGES) - 1)
2019 #define anv_foreach_stage(stage, stage_bits) \
2020 for (gl_shader_stage stage, \
2021 __tmp = (gl_shader_stage)((stage_bits) & ANV_STAGE_MASK); \
2022 stage = __builtin_ffs(__tmp) - 1, __tmp; \
2023 __tmp &= ~(1 << (stage)))
2025 struct anv_pipeline_bind_map
{
2026 uint32_t surface_count
;
2027 uint32_t sampler_count
;
2028 uint32_t image_count
;
2030 struct anv_pipeline_binding
* surface_to_descriptor
;
2031 struct anv_pipeline_binding
* sampler_to_descriptor
;
2034 struct anv_shader_bin_key
{
2039 struct anv_shader_bin
{
2042 const struct anv_shader_bin_key
*key
;
2044 struct anv_state kernel
;
2045 uint32_t kernel_size
;
2047 const struct brw_stage_prog_data
*prog_data
;
2048 uint32_t prog_data_size
;
2050 struct anv_pipeline_bind_map bind_map
;
2052 /* Prog data follows, then params, then the key, all aligned to 8-bytes */
2055 struct anv_shader_bin
*
2056 anv_shader_bin_create(struct anv_device
*device
,
2057 const void *key
, uint32_t key_size
,
2058 const void *kernel
, uint32_t kernel_size
,
2059 const struct brw_stage_prog_data
*prog_data
,
2060 uint32_t prog_data_size
, const void *prog_data_param
,
2061 const struct anv_pipeline_bind_map
*bind_map
);
2064 anv_shader_bin_destroy(struct anv_device
*device
, struct anv_shader_bin
*shader
);
2067 anv_shader_bin_ref(struct anv_shader_bin
*shader
)
2069 assert(shader
&& shader
->ref_cnt
>= 1);
2070 p_atomic_inc(&shader
->ref_cnt
);
2074 anv_shader_bin_unref(struct anv_device
*device
, struct anv_shader_bin
*shader
)
2076 assert(shader
&& shader
->ref_cnt
>= 1);
2077 if (p_atomic_dec_zero(&shader
->ref_cnt
))
2078 anv_shader_bin_destroy(device
, shader
);
2081 struct anv_pipeline
{
2082 struct anv_device
* device
;
2083 struct anv_batch batch
;
2084 uint32_t batch_data
[512];
2085 struct anv_reloc_list batch_relocs
;
2086 uint32_t dynamic_state_mask
;
2087 struct anv_dynamic_state dynamic_state
;
2089 struct anv_subpass
* subpass
;
2090 struct anv_pipeline_layout
* layout
;
2092 bool needs_data_cache
;
2094 struct anv_shader_bin
* shaders
[MESA_SHADER_STAGES
];
2097 const struct gen_l3_config
* l3_config
;
2098 uint32_t total_size
;
2101 VkShaderStageFlags active_stages
;
2102 struct anv_state blend_state
;
2105 uint32_t binding_stride
[MAX_VBS
];
2106 bool instancing_enable
[MAX_VBS
];
2107 bool primitive_restart
;
2110 uint32_t cs_right_mask
;
2113 bool depth_test_enable
;
2114 bool writes_stencil
;
2115 bool stencil_test_enable
;
2116 bool depth_clamp_enable
;
2117 bool sample_shading_enable
;
2122 uint32_t depth_stencil_state
[3];
2128 uint32_t wm_depth_stencil
[3];
2132 uint32_t wm_depth_stencil
[4];
2135 uint32_t interface_descriptor_data
[8];
2139 anv_pipeline_has_stage(const struct anv_pipeline
*pipeline
,
2140 gl_shader_stage stage
)
2142 return (pipeline
->active_stages
& mesa_to_vk_shader_stage(stage
)) != 0;
2145 #define ANV_DECL_GET_PROG_DATA_FUNC(prefix, stage) \
2146 static inline const struct brw_##prefix##_prog_data * \
2147 get_##prefix##_prog_data(const struct anv_pipeline *pipeline) \
2149 if (anv_pipeline_has_stage(pipeline, stage)) { \
2150 return (const struct brw_##prefix##_prog_data *) \
2151 pipeline->shaders[stage]->prog_data; \
2157 ANV_DECL_GET_PROG_DATA_FUNC(vs
, MESA_SHADER_VERTEX
)
2158 ANV_DECL_GET_PROG_DATA_FUNC(tcs
, MESA_SHADER_TESS_CTRL
)
2159 ANV_DECL_GET_PROG_DATA_FUNC(tes
, MESA_SHADER_TESS_EVAL
)
2160 ANV_DECL_GET_PROG_DATA_FUNC(gs
, MESA_SHADER_GEOMETRY
)
2161 ANV_DECL_GET_PROG_DATA_FUNC(wm
, MESA_SHADER_FRAGMENT
)
2162 ANV_DECL_GET_PROG_DATA_FUNC(cs
, MESA_SHADER_COMPUTE
)
2164 static inline const struct brw_vue_prog_data
*
2165 anv_pipeline_get_last_vue_prog_data(const struct anv_pipeline
*pipeline
)
2167 if (anv_pipeline_has_stage(pipeline
, MESA_SHADER_GEOMETRY
))
2168 return &get_gs_prog_data(pipeline
)->base
;
2169 else if (anv_pipeline_has_stage(pipeline
, MESA_SHADER_TESS_EVAL
))
2170 return &get_tes_prog_data(pipeline
)->base
;
2172 return &get_vs_prog_data(pipeline
)->base
;
2176 anv_pipeline_init(struct anv_pipeline
*pipeline
, struct anv_device
*device
,
2177 struct anv_pipeline_cache
*cache
,
2178 const VkGraphicsPipelineCreateInfo
*pCreateInfo
,
2179 const VkAllocationCallbacks
*alloc
);
2182 anv_pipeline_compile_cs(struct anv_pipeline
*pipeline
,
2183 struct anv_pipeline_cache
*cache
,
2184 const VkComputePipelineCreateInfo
*info
,
2185 struct anv_shader_module
*module
,
2186 const char *entrypoint
,
2187 const VkSpecializationInfo
*spec_info
);
2189 struct anv_format_plane
{
2190 enum isl_format isl_format
:16;
2191 struct isl_swizzle swizzle
;
2193 /* Whether this plane contains chroma channels */
2196 /* For downscaling of YUV planes */
2197 uint8_t denominator_scales
[2];
2199 /* How to map sampled ycbcr planes to a single 4 component element. */
2200 struct isl_swizzle ycbcr_swizzle
;
2205 struct anv_format_plane planes
[3];
2210 static inline uint32_t
2211 anv_image_aspect_to_plane(VkImageAspectFlags image_aspects
,
2212 VkImageAspectFlags aspect_mask
)
2214 switch (aspect_mask
) {
2215 case VK_IMAGE_ASPECT_COLOR_BIT
:
2216 case VK_IMAGE_ASPECT_DEPTH_BIT
:
2217 case VK_IMAGE_ASPECT_PLANE_0_BIT_KHR
:
2219 case VK_IMAGE_ASPECT_STENCIL_BIT
:
2220 if ((image_aspects
& VK_IMAGE_ASPECT_DEPTH_BIT
) == 0)
2223 case VK_IMAGE_ASPECT_PLANE_1_BIT_KHR
:
2225 case VK_IMAGE_ASPECT_PLANE_2_BIT_KHR
:
2228 /* Purposefully assert with depth/stencil aspects. */
2229 unreachable("invalid image aspect");
2233 static inline uint32_t
2234 anv_image_aspect_get_planes(VkImageAspectFlags aspect_mask
)
2236 uint32_t planes
= 0;
2238 if (aspect_mask
& (VK_IMAGE_ASPECT_COLOR_BIT
|
2239 VK_IMAGE_ASPECT_DEPTH_BIT
|
2240 VK_IMAGE_ASPECT_STENCIL_BIT
|
2241 VK_IMAGE_ASPECT_PLANE_0_BIT_KHR
))
2243 if (aspect_mask
& VK_IMAGE_ASPECT_PLANE_1_BIT_KHR
)
2245 if (aspect_mask
& VK_IMAGE_ASPECT_PLANE_2_BIT_KHR
)
2251 static inline VkImageAspectFlags
2252 anv_plane_to_aspect(VkImageAspectFlags image_aspects
,
2255 if (image_aspects
& VK_IMAGE_ASPECT_ANY_COLOR_BIT
) {
2256 if (_mesa_bitcount(image_aspects
) > 1)
2257 return VK_IMAGE_ASPECT_PLANE_0_BIT_KHR
<< plane
;
2258 return VK_IMAGE_ASPECT_COLOR_BIT
;
2260 if (image_aspects
& VK_IMAGE_ASPECT_DEPTH_BIT
)
2261 return VK_IMAGE_ASPECT_DEPTH_BIT
<< plane
;
2262 assert(image_aspects
== VK_IMAGE_ASPECT_STENCIL_BIT
);
2263 return VK_IMAGE_ASPECT_STENCIL_BIT
;
2266 #define anv_foreach_image_aspect_bit(b, image, aspects) \
2267 for_each_bit(b, anv_image_expand_aspects(image, aspects))
2269 const struct anv_format
*
2270 anv_get_format(VkFormat format
);
2272 static inline uint32_t
2273 anv_get_format_planes(VkFormat vk_format
)
2275 const struct anv_format
*format
= anv_get_format(vk_format
);
2277 return format
!= NULL
? format
->n_planes
: 0;
2280 struct anv_format_plane
2281 anv_get_format_plane(const struct gen_device_info
*devinfo
, VkFormat vk_format
,
2282 VkImageAspectFlags aspect
, VkImageTiling tiling
);
2284 static inline enum isl_format
2285 anv_get_isl_format(const struct gen_device_info
*devinfo
, VkFormat vk_format
,
2286 VkImageAspectFlags aspect
, VkImageTiling tiling
)
2288 return anv_get_format_plane(devinfo
, vk_format
, aspect
, tiling
).isl_format
;
2291 static inline struct isl_swizzle
2292 anv_swizzle_for_render(struct isl_swizzle swizzle
)
2294 /* Sometimes the swizzle will have alpha map to one. We do this to fake
2295 * RGB as RGBA for texturing
2297 assert(swizzle
.a
== ISL_CHANNEL_SELECT_ONE
||
2298 swizzle
.a
== ISL_CHANNEL_SELECT_ALPHA
);
2300 /* But it doesn't matter what we render to that channel */
2301 swizzle
.a
= ISL_CHANNEL_SELECT_ALPHA
;
2307 anv_pipeline_setup_l3_config(struct anv_pipeline
*pipeline
, bool needs_slm
);
2310 * Subsurface of an anv_image.
2312 struct anv_surface
{
2313 /** Valid only if isl_surf::size > 0. */
2314 struct isl_surf isl
;
2317 * Offset from VkImage's base address, as bound by vkBindImageMemory().
2324 /* The original VkFormat provided by the client. This may not match any
2325 * of the actual surface formats.
2328 const struct anv_format
*format
;
2330 VkImageAspectFlags aspects
;
2333 uint32_t array_size
;
2334 uint32_t samples
; /**< VkImageCreateInfo::samples */
2336 VkImageUsageFlags usage
; /**< Superset of VkImageCreateInfo::usage. */
2337 VkImageTiling tiling
; /** VkImageCreateInfo::tiling */
2342 /* Whether the image is made of several underlying buffer objects rather a
2343 * single one with different offsets.
2350 * For each foo, anv_image::planes[x].surface is valid if and only if
2351 * anv_image::aspects has a x aspect. Refer to anv_image_aspect_to_plane()
2352 * to figure the number associated with a given aspect.
2354 * The hardware requires that the depth buffer and stencil buffer be
2355 * separate surfaces. From Vulkan's perspective, though, depth and stencil
2356 * reside in the same VkImage. To satisfy both the hardware and Vulkan, we
2357 * allocate the depth and stencil buffers as separate surfaces in the same
2362 * -----------------------
2364 * ----------------------- |
2365 * | shadow surface0 | |
2366 * ----------------------- | Plane 0
2367 * | aux surface0 | |
2368 * ----------------------- |
2369 * | fast clear colors0 | \|/
2370 * -----------------------
2372 * ----------------------- |
2373 * | shadow surface1 | |
2374 * ----------------------- | Plane 1
2375 * | aux surface1 | |
2376 * ----------------------- |
2377 * | fast clear colors1 | \|/
2378 * -----------------------
2381 * -----------------------
2385 * Offset of the entire plane (whenever the image is disjoint this is
2393 struct anv_surface surface
;
2396 * A surface which shadows the main surface and may have different
2397 * tiling. This is used for sampling using a tiling that isn't supported
2398 * for other operations.
2400 struct anv_surface shadow_surface
;
2403 * For color images, this is the aux usage for this image when not used
2404 * as a color attachment.
2406 * For depth/stencil images, this is set to ISL_AUX_USAGE_HIZ if the
2407 * image has a HiZ buffer.
2409 enum isl_aux_usage aux_usage
;
2411 struct anv_surface aux_surface
;
2414 * Offset of the fast clear state (used to compute the
2415 * fast_clear_state_offset of the following planes).
2417 uint32_t fast_clear_state_offset
;
2420 * BO associated with this plane, set when bound.
2423 VkDeviceSize bo_offset
;
2427 /* Returns the number of auxiliary buffer levels attached to an image. */
2428 static inline uint8_t
2429 anv_image_aux_levels(const struct anv_image
* const image
,
2430 VkImageAspectFlagBits aspect
)
2432 uint32_t plane
= anv_image_aspect_to_plane(image
->aspects
, aspect
);
2433 return image
->planes
[plane
].aux_surface
.isl
.size
> 0 ?
2434 image
->planes
[plane
].aux_surface
.isl
.levels
: 0;
2437 /* Returns the number of auxiliary buffer layers attached to an image. */
2438 static inline uint32_t
2439 anv_image_aux_layers(const struct anv_image
* const image
,
2440 VkImageAspectFlagBits aspect
,
2441 const uint8_t miplevel
)
2445 /* The miplevel must exist in the main buffer. */
2446 assert(miplevel
< image
->levels
);
2448 if (miplevel
>= anv_image_aux_levels(image
, aspect
)) {
2449 /* There are no layers with auxiliary data because the miplevel has no
2454 uint32_t plane
= anv_image_aspect_to_plane(image
->aspects
, aspect
);
2455 return MAX2(image
->planes
[plane
].aux_surface
.isl
.logical_level0_px
.array_len
,
2456 image
->planes
[plane
].aux_surface
.isl
.logical_level0_px
.depth
>> miplevel
);
2460 static inline unsigned
2461 anv_fast_clear_state_entry_size(const struct anv_device
*device
)
2465 * +--------------------------------------------+
2466 * | clear value dword(s) | needs resolve dword |
2467 * +--------------------------------------------+
2470 /* Ensure that the needs resolve dword is in fact dword-aligned to enable
2471 * GPU memcpy operations.
2473 assert(device
->isl_dev
.ss
.clear_value_size
% 4 == 0);
2474 return device
->isl_dev
.ss
.clear_value_size
+ 4;
2477 /* Returns true if a HiZ-enabled depth buffer can be sampled from. */
2479 anv_can_sample_with_hiz(const struct gen_device_info
* const devinfo
,
2480 const struct anv_image
*image
)
2482 if (!(image
->aspects
& VK_IMAGE_ASPECT_DEPTH_BIT
))
2485 if (devinfo
->gen
< 8)
2488 return image
->samples
== 1;
2492 anv_gen8_hiz_op_resolve(struct anv_cmd_buffer
*cmd_buffer
,
2493 const struct anv_image
*image
,
2494 enum blorp_hiz_op op
);
2496 anv_ccs_resolve(struct anv_cmd_buffer
* const cmd_buffer
,
2497 const struct anv_state surface_state
,
2498 const struct anv_image
* const image
,
2499 VkImageAspectFlagBits aspect
,
2500 const uint8_t level
, const uint32_t layer_count
,
2501 const enum blorp_fast_clear_op op
);
2504 anv_image_fast_clear(struct anv_cmd_buffer
*cmd_buffer
,
2505 const struct anv_image
*image
,
2506 VkImageAspectFlagBits aspect
,
2507 const uint32_t base_level
, const uint32_t level_count
,
2508 const uint32_t base_layer
, uint32_t layer_count
);
2511 anv_image_copy_to_shadow(struct anv_cmd_buffer
*cmd_buffer
,
2512 const struct anv_image
*image
,
2513 uint32_t base_level
, uint32_t level_count
,
2514 uint32_t base_layer
, uint32_t layer_count
);
2517 anv_layout_to_aux_usage(const struct gen_device_info
* const devinfo
,
2518 const struct anv_image
*image
,
2519 const VkImageAspectFlagBits aspect
,
2520 const VkImageLayout layout
);
2522 /* This is defined as a macro so that it works for both
2523 * VkImageSubresourceRange and VkImageSubresourceLayers
2525 #define anv_get_layerCount(_image, _range) \
2526 ((_range)->layerCount == VK_REMAINING_ARRAY_LAYERS ? \
2527 (_image)->array_size - (_range)->baseArrayLayer : (_range)->layerCount)
2529 static inline uint32_t
2530 anv_get_levelCount(const struct anv_image
*image
,
2531 const VkImageSubresourceRange
*range
)
2533 return range
->levelCount
== VK_REMAINING_MIP_LEVELS
?
2534 image
->levels
- range
->baseMipLevel
: range
->levelCount
;
2537 static inline VkImageAspectFlags
2538 anv_image_expand_aspects(const struct anv_image
*image
,
2539 VkImageAspectFlags aspects
)
2541 /* If the underlying image has color plane aspects and
2542 * VK_IMAGE_ASPECT_COLOR_BIT has been requested, then return the aspects of
2543 * the underlying image. */
2544 if ((image
->aspects
& VK_IMAGE_ASPECT_PLANES_BITS
) != 0 &&
2545 aspects
== VK_IMAGE_ASPECT_COLOR_BIT
)
2546 return image
->aspects
;
2552 anv_image_aspects_compatible(VkImageAspectFlags aspects1
,
2553 VkImageAspectFlags aspects2
)
2555 if (aspects1
== aspects2
)
2558 /* Only 1 color aspects are compatibles. */
2559 if ((aspects1
& VK_IMAGE_ASPECT_ANY_COLOR_BIT
) != 0 &&
2560 (aspects2
& VK_IMAGE_ASPECT_ANY_COLOR_BIT
) != 0 &&
2561 _mesa_bitcount(aspects1
) == _mesa_bitcount(aspects2
))
2567 struct anv_image_view
{
2568 const struct anv_image
*image
; /**< VkImageViewCreateInfo::image */
2570 VkImageAspectFlags aspect_mask
;
2572 VkExtent3D extent
; /**< Extent of VkImageViewCreateInfo::baseMipLevel. */
2576 uint32_t image_plane
;
2578 struct isl_view isl
;
2581 * RENDER_SURFACE_STATE when using image as a sampler surface with an
2582 * image layout of SHADER_READ_ONLY_OPTIMAL or
2583 * DEPTH_STENCIL_READ_ONLY_OPTIMAL.
2585 struct anv_surface_state optimal_sampler_surface_state
;
2588 * RENDER_SURFACE_STATE when using image as a sampler surface with an
2589 * image layout of GENERAL.
2591 struct anv_surface_state general_sampler_surface_state
;
2594 * RENDER_SURFACE_STATE when using image as a storage image. Separate
2595 * states for write-only and readable, using the real format for
2596 * write-only and the lowered format for readable.
2598 struct anv_surface_state storage_surface_state
;
2599 struct anv_surface_state writeonly_storage_surface_state
;
2601 struct brw_image_param storage_image_param
;
2605 enum anv_image_view_state_flags
{
2606 ANV_IMAGE_VIEW_STATE_STORAGE_WRITE_ONLY
= (1 << 0),
2607 ANV_IMAGE_VIEW_STATE_TEXTURE_OPTIMAL
= (1 << 1),
2610 void anv_image_fill_surface_state(struct anv_device
*device
,
2611 const struct anv_image
*image
,
2612 VkImageAspectFlagBits aspect
,
2613 const struct isl_view
*view
,
2614 isl_surf_usage_flags_t view_usage
,
2615 enum isl_aux_usage aux_usage
,
2616 const union isl_color_value
*clear_color
,
2617 enum anv_image_view_state_flags flags
,
2618 struct anv_surface_state
*state_inout
,
2619 struct brw_image_param
*image_param_out
);
2621 struct anv_image_create_info
{
2622 const VkImageCreateInfo
*vk_info
;
2624 /** An opt-in bitmask which filters an ISL-mapping of the Vulkan tiling. */
2625 isl_tiling_flags_t isl_tiling_flags
;
2630 VkResult
anv_image_create(VkDevice _device
,
2631 const struct anv_image_create_info
*info
,
2632 const VkAllocationCallbacks
* alloc
,
2635 const struct anv_surface
*
2636 anv_image_get_surface_for_aspect_mask(const struct anv_image
*image
,
2637 VkImageAspectFlags aspect_mask
);
2640 anv_isl_format_for_descriptor_type(VkDescriptorType type
);
2642 static inline struct VkExtent3D
2643 anv_sanitize_image_extent(const VkImageType imageType
,
2644 const struct VkExtent3D imageExtent
)
2646 switch (imageType
) {
2647 case VK_IMAGE_TYPE_1D
:
2648 return (VkExtent3D
) { imageExtent
.width
, 1, 1 };
2649 case VK_IMAGE_TYPE_2D
:
2650 return (VkExtent3D
) { imageExtent
.width
, imageExtent
.height
, 1 };
2651 case VK_IMAGE_TYPE_3D
:
2654 unreachable("invalid image type");
2658 static inline struct VkOffset3D
2659 anv_sanitize_image_offset(const VkImageType imageType
,
2660 const struct VkOffset3D imageOffset
)
2662 switch (imageType
) {
2663 case VK_IMAGE_TYPE_1D
:
2664 return (VkOffset3D
) { imageOffset
.x
, 0, 0 };
2665 case VK_IMAGE_TYPE_2D
:
2666 return (VkOffset3D
) { imageOffset
.x
, imageOffset
.y
, 0 };
2667 case VK_IMAGE_TYPE_3D
:
2670 unreachable("invalid image type");
2675 void anv_fill_buffer_surface_state(struct anv_device
*device
,
2676 struct anv_state state
,
2677 enum isl_format format
,
2678 uint32_t offset
, uint32_t range
,
2682 struct anv_ycbcr_conversion
{
2683 const struct anv_format
* format
;
2684 VkSamplerYcbcrModelConversionKHR ycbcr_model
;
2685 VkSamplerYcbcrRangeKHR ycbcr_range
;
2686 VkComponentSwizzle mapping
[4];
2687 VkChromaLocationKHR chroma_offsets
[2];
2688 VkFilter chroma_filter
;
2689 bool chroma_reconstruction
;
2692 struct anv_sampler
{
2693 uint32_t state
[3][4];
2695 struct anv_ycbcr_conversion
*conversion
;
2698 struct anv_framebuffer
{
2703 uint32_t attachment_count
;
2704 struct anv_image_view
* attachments
[0];
2707 struct anv_subpass
{
2708 uint32_t attachment_count
;
2711 * A pointer to all attachment references used in this subpass.
2712 * Only valid if ::attachment_count > 0.
2714 VkAttachmentReference
* attachments
;
2715 uint32_t input_count
;
2716 VkAttachmentReference
* input_attachments
;
2717 uint32_t color_count
;
2718 VkAttachmentReference
* color_attachments
;
2719 VkAttachmentReference
* resolve_attachments
;
2721 VkAttachmentReference depth_stencil_attachment
;
2725 /** Subpass has a depth/stencil self-dependency */
2726 bool has_ds_self_dep
;
2728 /** Subpass has at least one resolve attachment */
2732 static inline unsigned
2733 anv_subpass_view_count(const struct anv_subpass
*subpass
)
2735 return MAX2(1, _mesa_bitcount(subpass
->view_mask
));
2738 struct anv_render_pass_attachment
{
2739 /* TODO: Consider using VkAttachmentDescription instead of storing each of
2740 * its members individually.
2744 VkImageUsageFlags usage
;
2745 VkAttachmentLoadOp load_op
;
2746 VkAttachmentStoreOp store_op
;
2747 VkAttachmentLoadOp stencil_load_op
;
2748 VkImageLayout initial_layout
;
2749 VkImageLayout final_layout
;
2750 VkImageLayout first_subpass_layout
;
2752 /* The subpass id in which the attachment will be used last. */
2753 uint32_t last_subpass_idx
;
2756 struct anv_render_pass
{
2757 uint32_t attachment_count
;
2758 uint32_t subpass_count
;
2759 /* An array of subpass_count+1 flushes, one per subpass boundary */
2760 enum anv_pipe_bits
* subpass_flushes
;
2761 struct anv_render_pass_attachment
* attachments
;
2762 struct anv_subpass subpasses
[0];
2765 #define ANV_PIPELINE_STATISTICS_MASK 0x000007ff
2767 struct anv_query_pool
{
2769 VkQueryPipelineStatisticFlags pipeline_statistics
;
2770 /** Stride between slots, in bytes */
2772 /** Number of slots in this query pool */
2777 void *anv_lookup_entrypoint(const struct gen_device_info
*devinfo
,
2780 void anv_dump_image_to_ppm(struct anv_device
*device
,
2781 struct anv_image
*image
, unsigned miplevel
,
2782 unsigned array_layer
, VkImageAspectFlagBits aspect
,
2783 const char *filename
);
2785 enum anv_dump_action
{
2786 ANV_DUMP_FRAMEBUFFERS_BIT
= 0x1,
2789 void anv_dump_start(struct anv_device
*device
, enum anv_dump_action actions
);
2790 void anv_dump_finish(void);
2792 void anv_dump_add_framebuffer(struct anv_cmd_buffer
*cmd_buffer
,
2793 struct anv_framebuffer
*fb
);
2795 static inline uint32_t
2796 anv_get_subpass_id(const struct anv_cmd_state
* const cmd_state
)
2798 /* This function must be called from within a subpass. */
2799 assert(cmd_state
->pass
&& cmd_state
->subpass
);
2801 const uint32_t subpass_id
= cmd_state
->subpass
- cmd_state
->pass
->subpasses
;
2803 /* The id of this subpass shouldn't exceed the number of subpasses in this
2804 * render pass minus 1.
2806 assert(subpass_id
< cmd_state
->pass
->subpass_count
);
2810 #define ANV_DEFINE_HANDLE_CASTS(__anv_type, __VkType) \
2812 static inline struct __anv_type * \
2813 __anv_type ## _from_handle(__VkType _handle) \
2815 return (struct __anv_type *) _handle; \
2818 static inline __VkType \
2819 __anv_type ## _to_handle(struct __anv_type *_obj) \
2821 return (__VkType) _obj; \
2824 #define ANV_DEFINE_NONDISP_HANDLE_CASTS(__anv_type, __VkType) \
2826 static inline struct __anv_type * \
2827 __anv_type ## _from_handle(__VkType _handle) \
2829 return (struct __anv_type *)(uintptr_t) _handle; \
2832 static inline __VkType \
2833 __anv_type ## _to_handle(struct __anv_type *_obj) \
2835 return (__VkType)(uintptr_t) _obj; \
2838 #define ANV_FROM_HANDLE(__anv_type, __name, __handle) \
2839 struct __anv_type *__name = __anv_type ## _from_handle(__handle)
2841 ANV_DEFINE_HANDLE_CASTS(anv_cmd_buffer
, VkCommandBuffer
)
2842 ANV_DEFINE_HANDLE_CASTS(anv_device
, VkDevice
)
2843 ANV_DEFINE_HANDLE_CASTS(anv_instance
, VkInstance
)
2844 ANV_DEFINE_HANDLE_CASTS(anv_physical_device
, VkPhysicalDevice
)
2845 ANV_DEFINE_HANDLE_CASTS(anv_queue
, VkQueue
)
2847 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_cmd_pool
, VkCommandPool
)
2848 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_buffer
, VkBuffer
)
2849 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_buffer_view
, VkBufferView
)
2850 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_descriptor_pool
, VkDescriptorPool
)
2851 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_descriptor_set
, VkDescriptorSet
)
2852 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_descriptor_set_layout
, VkDescriptorSetLayout
)
2853 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_descriptor_update_template
, VkDescriptorUpdateTemplateKHR
)
2854 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_device_memory
, VkDeviceMemory
)
2855 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_fence
, VkFence
)
2856 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_event
, VkEvent
)
2857 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_framebuffer
, VkFramebuffer
)
2858 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_image
, VkImage
)
2859 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_image_view
, VkImageView
);
2860 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_pipeline_cache
, VkPipelineCache
)
2861 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_pipeline
, VkPipeline
)
2862 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_pipeline_layout
, VkPipelineLayout
)
2863 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_query_pool
, VkQueryPool
)
2864 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_render_pass
, VkRenderPass
)
2865 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_sampler
, VkSampler
)
2866 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_semaphore
, VkSemaphore
)
2867 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_shader_module
, VkShaderModule
)
2868 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_debug_report_callback
, VkDebugReportCallbackEXT
)
2869 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_ycbcr_conversion
, VkSamplerYcbcrConversionKHR
)
2871 /* Gen-specific function declarations */
2873 # include "anv_genX.h"
2875 # define genX(x) gen7_##x
2876 # include "anv_genX.h"
2878 # define genX(x) gen75_##x
2879 # include "anv_genX.h"
2881 # define genX(x) gen8_##x
2882 # include "anv_genX.h"
2884 # define genX(x) gen9_##x
2885 # include "anv_genX.h"
2887 # define genX(x) gen10_##x
2888 # include "anv_genX.h"
2892 #endif /* ANV_PRIVATE_H */