2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
39 #define __gen_validate_value(x) VALGRIND_CHECK_MEM_IS_DEFINED(&(x), sizeof(x))
44 #include "common/gen_device_info.h"
45 #include "blorp/blorp.h"
46 #include "compiler/brw_compiler.h"
47 #include "util/macros.h"
48 #include "util/list.h"
49 #include "util/u_vector.h"
50 #include "util/vk_alloc.h"
52 /* Pre-declarations needed for WSI entrypoints */
55 typedef struct xcb_connection_t xcb_connection_t
;
56 typedef uint32_t xcb_visualid_t
;
57 typedef uint32_t xcb_window_t
;
60 struct anv_buffer_view
;
61 struct anv_image_view
;
65 #include <vulkan/vulkan.h>
66 #include <vulkan/vulkan_intel.h>
67 #include <vulkan/vk_icd.h>
69 #include "anv_entrypoints.h"
72 #include "common/gen_debug.h"
73 #include "wsi_common.h"
75 /* Allowing different clear colors requires us to perform a depth resolve at
76 * the end of certain render passes. This is because while slow clears store
77 * the clear color in the HiZ buffer, fast clears (without a resolve) don't.
78 * See the PRMs for examples describing when additional resolves would be
79 * necessary. To enable fast clears without requiring extra resolves, we set
80 * the clear value to a globally-defined one. We could allow different values
81 * if the user doesn't expect coherent data during or after a render passes
82 * (VK_ATTACHMENT_STORE_OP_DONT_CARE), but such users (aside from the CTS)
83 * don't seem to exist yet. In almost all Vulkan applications tested thus far,
84 * 1.0f seems to be the only value used. The only application that doesn't set
85 * this value does so through the usage of an seemingly uninitialized clear
88 #define ANV_HZ_FC_VAL 1.0f
93 #define MAX_VIEWPORTS 16
94 #define MAX_SCISSORS 16
95 #define MAX_PUSH_CONSTANTS_SIZE 128
96 #define MAX_DYNAMIC_BUFFERS 16
98 #define MAX_PUSH_DESCRIPTORS 32 /* Minimum requirement */
100 #define ANV_SVGS_VB_INDEX MAX_VBS
101 #define ANV_DRAWID_VB_INDEX (MAX_VBS + 1)
103 #define anv_printflike(a, b) __attribute__((__format__(__printf__, a, b)))
105 static inline uint32_t
106 align_down_npot_u32(uint32_t v
, uint32_t a
)
111 static inline uint32_t
112 align_u32(uint32_t v
, uint32_t a
)
114 assert(a
!= 0 && a
== (a
& -a
));
115 return (v
+ a
- 1) & ~(a
- 1);
118 static inline uint64_t
119 align_u64(uint64_t v
, uint64_t a
)
121 assert(a
!= 0 && a
== (a
& -a
));
122 return (v
+ a
- 1) & ~(a
- 1);
125 static inline int32_t
126 align_i32(int32_t v
, int32_t a
)
128 assert(a
!= 0 && a
== (a
& -a
));
129 return (v
+ a
- 1) & ~(a
- 1);
132 /** Alignment must be a power of 2. */
134 anv_is_aligned(uintmax_t n
, uintmax_t a
)
136 assert(a
== (a
& -a
));
137 return (n
& (a
- 1)) == 0;
140 static inline uint32_t
141 anv_minify(uint32_t n
, uint32_t levels
)
143 if (unlikely(n
== 0))
146 return MAX2(n
>> levels
, 1);
150 anv_clamp_f(float f
, float min
, float max
)
163 anv_clear_mask(uint32_t *inout_mask
, uint32_t clear_mask
)
165 if (*inout_mask
& clear_mask
) {
166 *inout_mask
&= ~clear_mask
;
173 static inline union isl_color_value
174 vk_to_isl_color(VkClearColorValue color
)
176 return (union isl_color_value
) {
186 #define for_each_bit(b, dword) \
187 for (uint32_t __dword = (dword); \
188 (b) = __builtin_ffs(__dword) - 1, __dword; \
189 __dword &= ~(1 << (b)))
191 #define typed_memcpy(dest, src, count) ({ \
192 STATIC_ASSERT(sizeof(*src) == sizeof(*dest)); \
193 memcpy((dest), (src), (count) * sizeof(*(src))); \
196 /* Whenever we generate an error, pass it through this function. Useful for
197 * debugging, where we can break on it. Only call at error site, not when
198 * propagating errors. Might be useful to plug in a stack trace here.
201 VkResult
__vk_errorf(VkResult error
, const char *file
, int line
, const char *format
, ...);
204 #define vk_error(error) __vk_errorf(error, __FILE__, __LINE__, NULL);
205 #define vk_errorf(error, format, ...) __vk_errorf(error, __FILE__, __LINE__, format, ## __VA_ARGS__);
206 #define anv_debug(format, ...) fprintf(stderr, "debug: " format, ##__VA_ARGS__)
208 #define vk_error(error) error
209 #define vk_errorf(error, format, ...) error
210 #define anv_debug(format, ...)
214 * Warn on ignored extension structs.
216 * The Vulkan spec requires us to ignore unsupported or unknown structs in
217 * a pNext chain. In debug mode, emitting warnings for ignored structs may
218 * help us discover structs that we should not have ignored.
221 * From the Vulkan 1.0.38 spec:
223 * Any component of the implementation (the loader, any enabled layers,
224 * and drivers) must skip over, without processing (other than reading the
225 * sType and pNext members) any chained structures with sType values not
226 * defined by extensions supported by that component.
228 #define anv_debug_ignored_stype(sType) \
229 anv_debug("debug: %s: ignored VkStructureType %u\n", __func__, (sType))
231 void __anv_finishme(const char *file
, int line
, const char *format
, ...)
232 anv_printflike(3, 4);
233 void __anv_perf_warn(const char *file
, int line
, const char *format
, ...)
234 anv_printflike(3, 4);
235 void anv_loge(const char *format
, ...) anv_printflike(1, 2);
236 void anv_loge_v(const char *format
, va_list va
);
239 * Print a FINISHME message, including its source location.
241 #define anv_finishme(format, ...) \
243 static bool reported = false; \
245 __anv_finishme(__FILE__, __LINE__, format, ##__VA_ARGS__); \
251 * Print a perf warning message. Set INTEL_DEBUG=perf to see these.
253 #define anv_perf_warn(format, ...) \
255 static bool reported = false; \
256 if (!reported && unlikely(INTEL_DEBUG & DEBUG_PERF)) { \
257 __anv_perf_warn(__FILE__, __LINE__, format, ##__VA_ARGS__); \
262 /* A non-fatal assert. Useful for debugging. */
264 #define anv_assert(x) ({ \
265 if (unlikely(!(x))) \
266 fprintf(stderr, "%s:%d ASSERT: %s\n", __FILE__, __LINE__, #x); \
269 #define anv_assert(x)
273 * A dynamically growable, circular buffer. Elements are added at head and
274 * removed from tail. head and tail are free-running uint32_t indices and we
275 * only compute the modulo with size when accessing the array. This way,
276 * number of bytes in the queue is always head - tail, even in case of
283 /* Index into the current validation list. This is used by the
284 * validation list building alrogithm to track which buffers are already
285 * in the validation list so that we can ensure uniqueness.
289 /* Last known offset. This value is provided by the kernel when we
290 * execbuf and is used as the presumed offset for the next bunch of
298 /** Flags to pass to the kernel through drm_i915_exec_object2::flags */
303 anv_bo_init(struct anv_bo
*bo
, uint32_t gem_handle
, uint64_t size
)
305 bo
->gem_handle
= gem_handle
;
313 /* Represents a lock-free linked list of "free" things. This is used by
314 * both the block pool and the state pools. Unfortunately, in order to
315 * solve the ABA problem, we can't use a single uint32_t head.
317 union anv_free_list
{
321 /* A simple count that is incremented every time the head changes. */
327 #define ANV_FREE_LIST_EMPTY ((union anv_free_list) { { 1, 0 } })
329 struct anv_block_state
{
339 struct anv_block_pool
{
340 struct anv_device
*device
;
344 /* The offset from the start of the bo to the "center" of the block
345 * pool. Pointers to allocated blocks are given by
346 * bo.map + center_bo_offset + offsets.
348 uint32_t center_bo_offset
;
350 /* Current memory map of the block pool. This pointer may or may not
351 * point to the actual beginning of the block pool memory. If
352 * anv_block_pool_alloc_back has ever been called, then this pointer
353 * will point to the "center" position of the buffer and all offsets
354 * (negative or positive) given out by the block pool alloc functions
355 * will be valid relative to this pointer.
357 * In particular, map == bo.map + center_offset
363 * Array of mmaps and gem handles owned by the block pool, reclaimed when
364 * the block pool is destroyed.
366 struct u_vector mmap_cleanups
;
370 union anv_free_list free_list
;
371 struct anv_block_state state
;
373 union anv_free_list back_free_list
;
374 struct anv_block_state back_state
;
377 /* Block pools are backed by a fixed-size 1GB memfd */
378 #define BLOCK_POOL_MEMFD_SIZE (1ul << 30)
380 /* The center of the block pool is also the middle of the memfd. This may
381 * change in the future if we decide differently for some reason.
383 #define BLOCK_POOL_MEMFD_CENTER (BLOCK_POOL_MEMFD_SIZE / 2)
385 static inline uint32_t
386 anv_block_pool_size(struct anv_block_pool
*pool
)
388 return pool
->state
.end
+ pool
->back_state
.end
;
397 struct anv_fixed_size_state_pool
{
399 union anv_free_list free_list
;
400 struct anv_block_state block
;
403 #define ANV_MIN_STATE_SIZE_LOG2 6
404 #define ANV_MAX_STATE_SIZE_LOG2 20
406 #define ANV_STATE_BUCKETS (ANV_MAX_STATE_SIZE_LOG2 - ANV_MIN_STATE_SIZE_LOG2 + 1)
408 struct anv_state_pool
{
409 struct anv_block_pool
*block_pool
;
410 struct anv_fixed_size_state_pool buckets
[ANV_STATE_BUCKETS
];
413 struct anv_state_stream_block
;
415 struct anv_state_stream
{
416 struct anv_block_pool
*block_pool
;
418 /* The current working block */
419 struct anv_state_stream_block
*block
;
421 /* Offset at which the current block starts */
423 /* Offset at which to allocate the next state */
425 /* Offset at which the current block ends */
429 #define CACHELINE_SIZE 64
430 #define CACHELINE_MASK 63
433 anv_clflush_range(void *start
, size_t size
)
435 void *p
= (void *) (((uintptr_t) start
) & ~CACHELINE_MASK
);
436 void *end
= start
+ size
;
439 __builtin_ia32_clflush(p
);
445 anv_flush_range(void *start
, size_t size
)
447 __builtin_ia32_mfence();
448 anv_clflush_range(start
, size
);
452 anv_invalidate_range(void *start
, size_t size
)
454 anv_clflush_range(start
, size
);
455 __builtin_ia32_mfence();
458 VkResult
anv_block_pool_init(struct anv_block_pool
*pool
,
459 struct anv_device
*device
, uint32_t block_size
);
460 void anv_block_pool_finish(struct anv_block_pool
*pool
);
461 int32_t anv_block_pool_alloc(struct anv_block_pool
*pool
);
462 int32_t anv_block_pool_alloc_back(struct anv_block_pool
*pool
);
463 void anv_block_pool_free(struct anv_block_pool
*pool
, int32_t offset
);
464 void anv_state_pool_init(struct anv_state_pool
*pool
,
465 struct anv_block_pool
*block_pool
);
466 void anv_state_pool_finish(struct anv_state_pool
*pool
);
467 struct anv_state
anv_state_pool_alloc(struct anv_state_pool
*pool
,
468 size_t state_size
, size_t alignment
);
469 void anv_state_pool_free(struct anv_state_pool
*pool
, struct anv_state state
);
470 void anv_state_stream_init(struct anv_state_stream
*stream
,
471 struct anv_block_pool
*block_pool
);
472 void anv_state_stream_finish(struct anv_state_stream
*stream
);
473 struct anv_state
anv_state_stream_alloc(struct anv_state_stream
*stream
,
474 uint32_t size
, uint32_t alignment
);
477 * Implements a pool of re-usable BOs. The interface is identical to that
478 * of block_pool except that each block is its own BO.
481 struct anv_device
*device
;
486 void anv_bo_pool_init(struct anv_bo_pool
*pool
, struct anv_device
*device
);
487 void anv_bo_pool_finish(struct anv_bo_pool
*pool
);
488 VkResult
anv_bo_pool_alloc(struct anv_bo_pool
*pool
, struct anv_bo
*bo
,
490 void anv_bo_pool_free(struct anv_bo_pool
*pool
, const struct anv_bo
*bo
);
492 struct anv_scratch_bo
{
497 struct anv_scratch_pool
{
498 /* Indexed by Per-Thread Scratch Space number (the hardware value) and stage */
499 struct anv_scratch_bo bos
[16][MESA_SHADER_STAGES
];
502 void anv_scratch_pool_init(struct anv_device
*device
,
503 struct anv_scratch_pool
*pool
);
504 void anv_scratch_pool_finish(struct anv_device
*device
,
505 struct anv_scratch_pool
*pool
);
506 struct anv_bo
*anv_scratch_pool_alloc(struct anv_device
*device
,
507 struct anv_scratch_pool
*pool
,
508 gl_shader_stage stage
,
509 unsigned per_thread_scratch
);
511 struct anv_physical_device
{
512 VK_LOADER_DATA _loader_data
;
514 struct anv_instance
* instance
;
518 struct gen_device_info info
;
519 /** Amount of "GPU memory" we want to advertise
521 * Clearly, this value is bogus since Intel is a UMA architecture. On
522 * gen7 platforms, we are limited by GTT size unless we want to implement
523 * fine-grained tracking and GTT splitting. On Broadwell and above we are
524 * practically unlimited. However, we will never report more than 3/4 of
525 * the total system ram to try and avoid running out of RAM.
528 bool supports_48bit_addresses
;
529 struct brw_compiler
* compiler
;
530 struct isl_device isl_dev
;
531 int cmd_parser_version
;
534 uint32_t subslice_total
;
536 uint8_t uuid
[VK_UUID_SIZE
];
538 struct wsi_device wsi_device
;
542 struct anv_instance
{
543 VK_LOADER_DATA _loader_data
;
545 VkAllocationCallbacks alloc
;
548 int physicalDeviceCount
;
549 struct anv_physical_device physicalDevice
;
552 VkResult
anv_init_wsi(struct anv_physical_device
*physical_device
);
553 void anv_finish_wsi(struct anv_physical_device
*physical_device
);
556 VK_LOADER_DATA _loader_data
;
558 struct anv_device
* device
;
560 struct anv_state_pool
* pool
;
563 struct anv_pipeline_cache
{
564 struct anv_device
* device
;
565 pthread_mutex_t mutex
;
567 struct hash_table
* cache
;
570 struct anv_pipeline_bind_map
;
572 void anv_pipeline_cache_init(struct anv_pipeline_cache
*cache
,
573 struct anv_device
*device
,
575 void anv_pipeline_cache_finish(struct anv_pipeline_cache
*cache
);
577 struct anv_shader_bin
*
578 anv_pipeline_cache_search(struct anv_pipeline_cache
*cache
,
579 const void *key
, uint32_t key_size
);
580 struct anv_shader_bin
*
581 anv_pipeline_cache_upload_kernel(struct anv_pipeline_cache
*cache
,
582 const void *key_data
, uint32_t key_size
,
583 const void *kernel_data
, uint32_t kernel_size
,
584 const struct brw_stage_prog_data
*prog_data
,
585 uint32_t prog_data_size
,
586 const struct anv_pipeline_bind_map
*bind_map
);
589 VK_LOADER_DATA _loader_data
;
591 VkAllocationCallbacks alloc
;
593 struct anv_instance
* instance
;
595 struct gen_device_info info
;
596 struct isl_device isl_dev
;
599 bool can_chain_batches
;
600 bool robust_buffer_access
;
602 struct anv_bo_pool batch_bo_pool
;
604 struct anv_block_pool dynamic_state_block_pool
;
605 struct anv_state_pool dynamic_state_pool
;
607 struct anv_block_pool instruction_block_pool
;
608 struct anv_state_pool instruction_state_pool
;
610 struct anv_block_pool surface_state_block_pool
;
611 struct anv_state_pool surface_state_pool
;
613 struct anv_bo workaround_bo
;
615 struct anv_pipeline_cache blorp_shader_cache
;
616 struct blorp_context blorp
;
618 struct anv_state border_colors
;
620 struct anv_queue queue
;
622 struct anv_scratch_pool scratch_pool
;
624 uint32_t default_mocs
;
626 pthread_mutex_t mutex
;
627 pthread_cond_t queue_submit
;
632 anv_state_flush(struct anv_device
*device
, struct anv_state state
)
634 if (device
->info
.has_llc
)
637 anv_flush_range(state
.map
, state
.alloc_size
);
640 void anv_device_init_blorp(struct anv_device
*device
);
641 void anv_device_finish_blorp(struct anv_device
*device
);
643 VkResult
anv_device_execbuf(struct anv_device
*device
,
644 struct drm_i915_gem_execbuffer2
*execbuf
,
645 struct anv_bo
**execbuf_bos
);
646 VkResult
anv_device_query_status(struct anv_device
*device
);
647 VkResult
anv_device_bo_busy(struct anv_device
*device
, struct anv_bo
*bo
);
648 VkResult
anv_device_wait(struct anv_device
*device
, struct anv_bo
*bo
,
651 void* anv_gem_mmap(struct anv_device
*device
,
652 uint32_t gem_handle
, uint64_t offset
, uint64_t size
, uint32_t flags
);
653 void anv_gem_munmap(void *p
, uint64_t size
);
654 uint32_t anv_gem_create(struct anv_device
*device
, size_t size
);
655 void anv_gem_close(struct anv_device
*device
, uint32_t gem_handle
);
656 uint32_t anv_gem_userptr(struct anv_device
*device
, void *mem
, size_t size
);
657 int anv_gem_busy(struct anv_device
*device
, uint32_t gem_handle
);
658 int anv_gem_wait(struct anv_device
*device
, uint32_t gem_handle
, int64_t *timeout_ns
);
659 int anv_gem_execbuffer(struct anv_device
*device
,
660 struct drm_i915_gem_execbuffer2
*execbuf
);
661 int anv_gem_set_tiling(struct anv_device
*device
, uint32_t gem_handle
,
662 uint32_t stride
, uint32_t tiling
);
663 int anv_gem_create_context(struct anv_device
*device
);
664 int anv_gem_destroy_context(struct anv_device
*device
, int context
);
665 int anv_gem_get_context_param(int fd
, int context
, uint32_t param
,
667 int anv_gem_get_param(int fd
, uint32_t param
);
668 bool anv_gem_get_bit6_swizzle(int fd
, uint32_t tiling
);
669 int anv_gem_get_aperture(int fd
, uint64_t *size
);
670 bool anv_gem_supports_48b_addresses(int fd
);
671 int anv_gem_gpu_get_reset_stats(struct anv_device
*device
,
672 uint32_t *active
, uint32_t *pending
);
673 int anv_gem_handle_to_fd(struct anv_device
*device
, uint32_t gem_handle
);
674 uint32_t anv_gem_fd_to_handle(struct anv_device
*device
, int fd
);
675 int anv_gem_set_caching(struct anv_device
*device
, uint32_t gem_handle
, uint32_t caching
);
676 int anv_gem_set_domain(struct anv_device
*device
, uint32_t gem_handle
,
677 uint32_t read_domains
, uint32_t write_domain
);
679 VkResult
anv_bo_init_new(struct anv_bo
*bo
, struct anv_device
*device
, uint64_t size
);
681 struct anv_reloc_list
{
684 struct drm_i915_gem_relocation_entry
* relocs
;
685 struct anv_bo
** reloc_bos
;
688 VkResult
anv_reloc_list_init(struct anv_reloc_list
*list
,
689 const VkAllocationCallbacks
*alloc
);
690 void anv_reloc_list_finish(struct anv_reloc_list
*list
,
691 const VkAllocationCallbacks
*alloc
);
693 VkResult
anv_reloc_list_add(struct anv_reloc_list
*list
,
694 const VkAllocationCallbacks
*alloc
,
695 uint32_t offset
, struct anv_bo
*target_bo
,
698 struct anv_batch_bo
{
699 /* Link in the anv_cmd_buffer.owned_batch_bos list */
700 struct list_head link
;
704 /* Bytes actually consumed in this batch BO */
707 struct anv_reloc_list relocs
;
711 const VkAllocationCallbacks
* alloc
;
717 struct anv_reloc_list
* relocs
;
719 /* This callback is called (with the associated user data) in the event
720 * that the batch runs out of space.
722 VkResult (*extend_cb
)(struct anv_batch
*, void *);
726 * Current error status of the command buffer. Used to track inconsistent
727 * or incomplete command buffer states that are the consequence of run-time
728 * errors such as out of memory scenarios. We want to track this in the
729 * batch because the command buffer object is not visible to some parts
735 void *anv_batch_emit_dwords(struct anv_batch
*batch
, int num_dwords
);
736 void anv_batch_emit_batch(struct anv_batch
*batch
, struct anv_batch
*other
);
737 uint64_t anv_batch_emit_reloc(struct anv_batch
*batch
,
738 void *location
, struct anv_bo
*bo
, uint32_t offset
);
739 VkResult
anv_device_submit_simple_batch(struct anv_device
*device
,
740 struct anv_batch
*batch
);
742 static inline VkResult
743 anv_batch_set_error(struct anv_batch
*batch
, VkResult error
)
745 assert(error
!= VK_SUCCESS
);
746 if (batch
->status
== VK_SUCCESS
)
747 batch
->status
= error
;
748 return batch
->status
;
752 anv_batch_has_error(struct anv_batch
*batch
)
754 return batch
->status
!= VK_SUCCESS
;
762 static inline uint64_t
763 _anv_combine_address(struct anv_batch
*batch
, void *location
,
764 const struct anv_address address
, uint32_t delta
)
766 if (address
.bo
== NULL
) {
767 return address
.offset
+ delta
;
769 assert(batch
->start
<= location
&& location
< batch
->end
);
771 return anv_batch_emit_reloc(batch
, location
, address
.bo
, address
.offset
+ delta
);
775 #define __gen_address_type struct anv_address
776 #define __gen_user_data struct anv_batch
777 #define __gen_combine_address _anv_combine_address
779 /* Wrapper macros needed to work around preprocessor argument issues. In
780 * particular, arguments don't get pre-evaluated if they are concatenated.
781 * This means that, if you pass GENX(3DSTATE_PS) into the emit macro, the
782 * GENX macro won't get evaluated if the emit macro contains "cmd ## foo".
783 * We can work around this easily enough with these helpers.
785 #define __anv_cmd_length(cmd) cmd ## _length
786 #define __anv_cmd_length_bias(cmd) cmd ## _length_bias
787 #define __anv_cmd_header(cmd) cmd ## _header
788 #define __anv_cmd_pack(cmd) cmd ## _pack
789 #define __anv_reg_num(reg) reg ## _num
791 #define anv_pack_struct(dst, struc, ...) do { \
792 struct struc __template = { \
795 __anv_cmd_pack(struc)(NULL, dst, &__template); \
796 VG(VALGRIND_CHECK_MEM_IS_DEFINED(dst, __anv_cmd_length(struc) * 4)); \
799 #define anv_batch_emitn(batch, n, cmd, ...) ({ \
800 void *__dst = anv_batch_emit_dwords(batch, n); \
802 struct cmd __template = { \
803 __anv_cmd_header(cmd), \
804 .DWordLength = n - __anv_cmd_length_bias(cmd), \
807 __anv_cmd_pack(cmd)(batch, __dst, &__template); \
812 #define anv_batch_emit_merge(batch, dwords0, dwords1) \
816 STATIC_ASSERT(ARRAY_SIZE(dwords0) == ARRAY_SIZE(dwords1)); \
817 dw = anv_batch_emit_dwords((batch), ARRAY_SIZE(dwords0)); \
820 for (uint32_t i = 0; i < ARRAY_SIZE(dwords0); i++) \
821 dw[i] = (dwords0)[i] | (dwords1)[i]; \
822 VG(VALGRIND_CHECK_MEM_IS_DEFINED(dw, ARRAY_SIZE(dwords0) * 4));\
825 #define anv_batch_emit(batch, cmd, name) \
826 for (struct cmd name = { __anv_cmd_header(cmd) }, \
827 *_dst = anv_batch_emit_dwords(batch, __anv_cmd_length(cmd)); \
828 __builtin_expect(_dst != NULL, 1); \
829 ({ __anv_cmd_pack(cmd)(batch, _dst, &name); \
830 VG(VALGRIND_CHECK_MEM_IS_DEFINED(_dst, __anv_cmd_length(cmd) * 4)); \
834 #define GEN7_MOCS (struct GEN7_MEMORY_OBJECT_CONTROL_STATE) { \
835 .GraphicsDataTypeGFDT = 0, \
836 .LLCCacheabilityControlLLCCC = 0, \
837 .L3CacheabilityControlL3CC = 1, \
840 #define GEN75_MOCS (struct GEN75_MEMORY_OBJECT_CONTROL_STATE) { \
841 .LLCeLLCCacheabilityControlLLCCC = 0, \
842 .L3CacheabilityControlL3CC = 1, \
845 #define GEN8_MOCS (struct GEN8_MEMORY_OBJECT_CONTROL_STATE) { \
846 .MemoryTypeLLCeLLCCacheabilityControl = WB, \
847 .TargetCache = L3DefertoPATforLLCeLLCselection, \
851 /* Skylake: MOCS is now an index into an array of 62 different caching
852 * configurations programmed by the kernel.
855 #define GEN9_MOCS (struct GEN9_MEMORY_OBJECT_CONTROL_STATE) { \
856 /* TC=LLC/eLLC, LeCC=WB, LRUM=3, L3CC=WB */ \
857 .IndextoMOCSTables = 2 \
860 #define GEN9_MOCS_PTE { \
861 /* TC=LLC/eLLC, LeCC=WB, LRUM=3, L3CC=WB */ \
862 .IndextoMOCSTables = 1 \
865 struct anv_device_memory
{
868 VkDeviceSize map_size
;
873 * Header for Vertex URB Entry (VUE)
875 struct anv_vue_header
{
877 uint32_t RTAIndex
; /* RenderTargetArrayIndex */
878 uint32_t ViewportIndex
;
882 struct anv_descriptor_set_binding_layout
{
884 /* The type of the descriptors in this binding */
885 VkDescriptorType type
;
888 /* Number of array elements in this binding */
891 /* Index into the flattend descriptor set */
892 uint16_t descriptor_index
;
894 /* Index into the dynamic state array for a dynamic buffer */
895 int16_t dynamic_offset_index
;
897 /* Index into the descriptor set buffer views */
898 int16_t buffer_index
;
901 /* Index into the binding table for the associated surface */
902 int16_t surface_index
;
904 /* Index into the sampler table for the associated sampler */
905 int16_t sampler_index
;
907 /* Index into the image table for the associated image */
909 } stage
[MESA_SHADER_STAGES
];
911 /* Immutable samplers (or NULL if no immutable samplers) */
912 struct anv_sampler
**immutable_samplers
;
915 struct anv_descriptor_set_layout
{
916 /* Number of bindings in this descriptor set */
917 uint16_t binding_count
;
919 /* Total size of the descriptor set with room for all array entries */
922 /* Shader stages affected by this descriptor set */
923 uint16_t shader_stages
;
925 /* Number of buffers in this descriptor set */
926 uint16_t buffer_count
;
928 /* Number of dynamic offsets used by this descriptor set */
929 uint16_t dynamic_offset_count
;
931 /* Bindings in this descriptor set */
932 struct anv_descriptor_set_binding_layout binding
[0];
935 struct anv_descriptor
{
936 VkDescriptorType type
;
940 struct anv_image_view
*image_view
;
941 struct anv_sampler
*sampler
;
943 /* Used to determine whether or not we need the surface state to have
944 * the auxiliary buffer enabled.
946 enum isl_aux_usage aux_usage
;
950 struct anv_buffer
*buffer
;
955 struct anv_buffer_view
*buffer_view
;
959 struct anv_descriptor_set
{
960 const struct anv_descriptor_set_layout
*layout
;
962 uint32_t buffer_count
;
963 struct anv_buffer_view
*buffer_views
;
964 struct anv_descriptor descriptors
[0];
967 struct anv_buffer_view
{
968 enum isl_format format
; /**< VkBufferViewCreateInfo::format */
970 uint32_t offset
; /**< Offset into bo. */
971 uint64_t range
; /**< VkBufferViewCreateInfo::range */
973 struct anv_state surface_state
;
974 struct anv_state storage_surface_state
;
975 struct anv_state writeonly_storage_surface_state
;
977 struct brw_image_param storage_image_param
;
980 struct anv_push_descriptor_set
{
981 struct anv_descriptor_set set
;
983 /* Put this field right behind anv_descriptor_set so it fills up the
984 * descriptors[0] field. */
985 struct anv_descriptor descriptors
[MAX_PUSH_DESCRIPTORS
];
987 struct anv_buffer_view buffer_views
[MAX_PUSH_DESCRIPTORS
];
990 struct anv_descriptor_pool
{
995 struct anv_state_stream surface_state_stream
;
996 void *surface_state_free_list
;
1001 enum anv_descriptor_template_entry_type
{
1002 ANV_DESCRIPTOR_TEMPLATE_ENTRY_TYPE_IMAGE
,
1003 ANV_DESCRIPTOR_TEMPLATE_ENTRY_TYPE_BUFFER
,
1004 ANV_DESCRIPTOR_TEMPLATE_ENTRY_TYPE_BUFFER_VIEW
1007 struct anv_descriptor_template_entry
{
1008 /* The type of descriptor in this entry */
1009 VkDescriptorType type
;
1011 /* Binding in the descriptor set */
1014 /* Offset at which to write into the descriptor set binding */
1015 uint32_t array_element
;
1017 /* Number of elements to write into the descriptor set binding */
1018 uint32_t array_count
;
1020 /* Offset into the user provided data */
1023 /* Stride between elements into the user provided data */
1027 struct anv_descriptor_update_template
{
1028 /* The descriptor set this template corresponds to. This value is only
1029 * valid if the template was created with the templateType
1030 * VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET_KHR.
1034 /* Number of entries in this template */
1035 uint32_t entry_count
;
1037 /* Entries of the template */
1038 struct anv_descriptor_template_entry entries
[0];
1042 anv_descriptor_set_layout_size(const struct anv_descriptor_set_layout
*layout
);
1045 anv_descriptor_set_write_image_view(struct anv_descriptor_set
*set
,
1046 const struct gen_device_info
* const devinfo
,
1047 const VkDescriptorImageInfo
* const info
,
1048 VkDescriptorType type
,
1053 anv_descriptor_set_write_buffer_view(struct anv_descriptor_set
*set
,
1054 VkDescriptorType type
,
1055 struct anv_buffer_view
*buffer_view
,
1060 anv_descriptor_set_write_buffer(struct anv_descriptor_set
*set
,
1061 struct anv_device
*device
,
1062 struct anv_state_stream
*alloc_stream
,
1063 VkDescriptorType type
,
1064 struct anv_buffer
*buffer
,
1067 VkDeviceSize offset
,
1068 VkDeviceSize range
);
1071 anv_descriptor_set_write_template(struct anv_descriptor_set
*set
,
1072 struct anv_device
*device
,
1073 struct anv_state_stream
*alloc_stream
,
1074 const struct anv_descriptor_update_template
*template,
1078 anv_descriptor_set_create(struct anv_device
*device
,
1079 struct anv_descriptor_pool
*pool
,
1080 const struct anv_descriptor_set_layout
*layout
,
1081 struct anv_descriptor_set
**out_set
);
1084 anv_descriptor_set_destroy(struct anv_device
*device
,
1085 struct anv_descriptor_pool
*pool
,
1086 struct anv_descriptor_set
*set
);
1088 #define ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS UINT8_MAX
1090 struct anv_pipeline_binding
{
1091 /* The descriptor set this surface corresponds to. The special value of
1092 * ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS indicates that the offset refers
1093 * to a color attachment and not a regular descriptor.
1097 /* Binding in the descriptor set */
1100 /* Index in the binding */
1103 /* Input attachment index (relative to the subpass) */
1104 uint8_t input_attachment_index
;
1106 /* For a storage image, whether it is write-only */
1110 struct anv_pipeline_layout
{
1112 struct anv_descriptor_set_layout
*layout
;
1113 uint32_t dynamic_offset_start
;
1119 bool has_dynamic_offsets
;
1120 } stage
[MESA_SHADER_STAGES
];
1122 unsigned char sha1
[20];
1126 struct anv_device
* device
;
1129 VkBufferUsageFlags usage
;
1131 /* Set when bound */
1133 VkDeviceSize offset
;
1136 static inline uint64_t
1137 anv_buffer_get_range(struct anv_buffer
*buffer
, uint64_t offset
, uint64_t range
)
1139 assert(offset
<= buffer
->size
);
1140 if (range
== VK_WHOLE_SIZE
) {
1141 return buffer
->size
- offset
;
1143 assert(range
<= buffer
->size
);
1148 enum anv_cmd_dirty_bits
{
1149 ANV_CMD_DIRTY_DYNAMIC_VIEWPORT
= 1 << 0, /* VK_DYNAMIC_STATE_VIEWPORT */
1150 ANV_CMD_DIRTY_DYNAMIC_SCISSOR
= 1 << 1, /* VK_DYNAMIC_STATE_SCISSOR */
1151 ANV_CMD_DIRTY_DYNAMIC_LINE_WIDTH
= 1 << 2, /* VK_DYNAMIC_STATE_LINE_WIDTH */
1152 ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS
= 1 << 3, /* VK_DYNAMIC_STATE_DEPTH_BIAS */
1153 ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS
= 1 << 4, /* VK_DYNAMIC_STATE_BLEND_CONSTANTS */
1154 ANV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS
= 1 << 5, /* VK_DYNAMIC_STATE_DEPTH_BOUNDS */
1155 ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK
= 1 << 6, /* VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK */
1156 ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK
= 1 << 7, /* VK_DYNAMIC_STATE_STENCIL_WRITE_MASK */
1157 ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE
= 1 << 8, /* VK_DYNAMIC_STATE_STENCIL_REFERENCE */
1158 ANV_CMD_DIRTY_DYNAMIC_ALL
= (1 << 9) - 1,
1159 ANV_CMD_DIRTY_PIPELINE
= 1 << 9,
1160 ANV_CMD_DIRTY_INDEX_BUFFER
= 1 << 10,
1161 ANV_CMD_DIRTY_RENDER_TARGETS
= 1 << 11,
1163 typedef uint32_t anv_cmd_dirty_mask_t
;
1165 enum anv_pipe_bits
{
1166 ANV_PIPE_DEPTH_CACHE_FLUSH_BIT
= (1 << 0),
1167 ANV_PIPE_STALL_AT_SCOREBOARD_BIT
= (1 << 1),
1168 ANV_PIPE_STATE_CACHE_INVALIDATE_BIT
= (1 << 2),
1169 ANV_PIPE_CONSTANT_CACHE_INVALIDATE_BIT
= (1 << 3),
1170 ANV_PIPE_VF_CACHE_INVALIDATE_BIT
= (1 << 4),
1171 ANV_PIPE_DATA_CACHE_FLUSH_BIT
= (1 << 5),
1172 ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT
= (1 << 10),
1173 ANV_PIPE_INSTRUCTION_CACHE_INVALIDATE_BIT
= (1 << 11),
1174 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT
= (1 << 12),
1175 ANV_PIPE_DEPTH_STALL_BIT
= (1 << 13),
1176 ANV_PIPE_CS_STALL_BIT
= (1 << 20),
1178 /* This bit does not exist directly in PIPE_CONTROL. Instead it means that
1179 * a flush has happened but not a CS stall. The next time we do any sort
1180 * of invalidation we need to insert a CS stall at that time. Otherwise,
1181 * we would have to CS stall on every flush which could be bad.
1183 ANV_PIPE_NEEDS_CS_STALL_BIT
= (1 << 21),
1186 #define ANV_PIPE_FLUSH_BITS ( \
1187 ANV_PIPE_DEPTH_CACHE_FLUSH_BIT | \
1188 ANV_PIPE_DATA_CACHE_FLUSH_BIT | \
1189 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT)
1191 #define ANV_PIPE_STALL_BITS ( \
1192 ANV_PIPE_STALL_AT_SCOREBOARD_BIT | \
1193 ANV_PIPE_DEPTH_STALL_BIT | \
1194 ANV_PIPE_CS_STALL_BIT)
1196 #define ANV_PIPE_INVALIDATE_BITS ( \
1197 ANV_PIPE_STATE_CACHE_INVALIDATE_BIT | \
1198 ANV_PIPE_CONSTANT_CACHE_INVALIDATE_BIT | \
1199 ANV_PIPE_VF_CACHE_INVALIDATE_BIT | \
1200 ANV_PIPE_DATA_CACHE_FLUSH_BIT | \
1201 ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT | \
1202 ANV_PIPE_INSTRUCTION_CACHE_INVALIDATE_BIT)
1204 static inline enum anv_pipe_bits
1205 anv_pipe_flush_bits_for_access_flags(VkAccessFlags flags
)
1207 enum anv_pipe_bits pipe_bits
= 0;
1210 for_each_bit(b
, flags
) {
1211 switch ((VkAccessFlagBits
)(1 << b
)) {
1212 case VK_ACCESS_SHADER_WRITE_BIT
:
1213 pipe_bits
|= ANV_PIPE_DATA_CACHE_FLUSH_BIT
;
1215 case VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT
:
1216 pipe_bits
|= ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT
;
1218 case VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT
:
1219 pipe_bits
|= ANV_PIPE_DEPTH_CACHE_FLUSH_BIT
;
1221 case VK_ACCESS_TRANSFER_WRITE_BIT
:
1222 pipe_bits
|= ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT
;
1223 pipe_bits
|= ANV_PIPE_DEPTH_CACHE_FLUSH_BIT
;
1226 break; /* Nothing to do */
1233 static inline enum anv_pipe_bits
1234 anv_pipe_invalidate_bits_for_access_flags(VkAccessFlags flags
)
1236 enum anv_pipe_bits pipe_bits
= 0;
1239 for_each_bit(b
, flags
) {
1240 switch ((VkAccessFlagBits
)(1 << b
)) {
1241 case VK_ACCESS_INDIRECT_COMMAND_READ_BIT
:
1242 case VK_ACCESS_INDEX_READ_BIT
:
1243 case VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT
:
1244 pipe_bits
|= ANV_PIPE_VF_CACHE_INVALIDATE_BIT
;
1246 case VK_ACCESS_UNIFORM_READ_BIT
:
1247 pipe_bits
|= ANV_PIPE_CONSTANT_CACHE_INVALIDATE_BIT
;
1248 pipe_bits
|= ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT
;
1250 case VK_ACCESS_SHADER_READ_BIT
:
1251 case VK_ACCESS_INPUT_ATTACHMENT_READ_BIT
:
1252 case VK_ACCESS_TRANSFER_READ_BIT
:
1253 pipe_bits
|= ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT
;
1256 break; /* Nothing to do */
1263 struct anv_vertex_binding
{
1264 struct anv_buffer
* buffer
;
1265 VkDeviceSize offset
;
1268 struct anv_push_constants
{
1269 /* Current allocated size of this push constants data structure.
1270 * Because a decent chunk of it may not be used (images on SKL, for
1271 * instance), we won't actually allocate the entire structure up-front.
1275 /* Push constant data provided by the client through vkPushConstants */
1276 uint8_t client_data
[MAX_PUSH_CONSTANTS_SIZE
];
1278 /* Our hardware only provides zero-based vertex and instance id so, in
1279 * order to satisfy the vulkan requirements, we may have to push one or
1280 * both of these into the shader.
1282 uint32_t base_vertex
;
1283 uint32_t base_instance
;
1285 /* Image data for image_load_store on pre-SKL */
1286 struct brw_image_param images
[MAX_IMAGES
];
1289 struct anv_dynamic_state
{
1292 VkViewport viewports
[MAX_VIEWPORTS
];
1297 VkRect2D scissors
[MAX_SCISSORS
];
1308 float blend_constants
[4];
1318 } stencil_compare_mask
;
1323 } stencil_write_mask
;
1328 } stencil_reference
;
1331 extern const struct anv_dynamic_state default_dynamic_state
;
1333 void anv_dynamic_state_copy(struct anv_dynamic_state
*dest
,
1334 const struct anv_dynamic_state
*src
,
1335 uint32_t copy_mask
);
1338 * Attachment state when recording a renderpass instance.
1340 * The clear value is valid only if there exists a pending clear.
1342 struct anv_attachment_state
{
1343 enum isl_aux_usage aux_usage
;
1344 enum isl_aux_usage input_aux_usage
;
1345 struct anv_state color_rt_state
;
1346 struct anv_state input_att_state
;
1348 VkImageLayout current_layout
;
1349 VkImageAspectFlags pending_clear_aspects
;
1351 VkClearValue clear_value
;
1352 bool clear_color_is_zero_one
;
1355 /** State required while building cmd buffer */
1356 struct anv_cmd_state
{
1357 /* PIPELINE_SELECT.PipelineSelection */
1358 uint32_t current_pipeline
;
1359 const struct gen_l3_config
* current_l3_config
;
1361 anv_cmd_dirty_mask_t dirty
;
1362 anv_cmd_dirty_mask_t compute_dirty
;
1363 enum anv_pipe_bits pending_pipe_bits
;
1364 uint32_t num_workgroups_offset
;
1365 struct anv_bo
*num_workgroups_bo
;
1366 VkShaderStageFlags descriptors_dirty
;
1367 VkShaderStageFlags push_constants_dirty
;
1368 uint32_t scratch_size
;
1369 struct anv_pipeline
* pipeline
;
1370 struct anv_pipeline
* compute_pipeline
;
1371 struct anv_framebuffer
* framebuffer
;
1372 struct anv_render_pass
* pass
;
1373 struct anv_subpass
* subpass
;
1374 VkRect2D render_area
;
1375 uint32_t restart_index
;
1376 struct anv_vertex_binding vertex_bindings
[MAX_VBS
];
1377 struct anv_descriptor_set
* descriptors
[MAX_SETS
];
1378 uint32_t dynamic_offsets
[MAX_DYNAMIC_BUFFERS
];
1379 VkShaderStageFlags push_constant_stages
;
1380 struct anv_push_constants
* push_constants
[MESA_SHADER_STAGES
];
1381 struct anv_state binding_tables
[MESA_SHADER_STAGES
];
1382 struct anv_state samplers
[MESA_SHADER_STAGES
];
1383 struct anv_dynamic_state dynamic
;
1386 struct anv_push_descriptor_set push_descriptor
;
1389 * Whether or not the gen8 PMA fix is enabled. We ensure that, at the top
1390 * of any command buffer it is disabled by disabling it in EndCommandBuffer
1391 * and before invoking the secondary in ExecuteCommands.
1393 bool pma_fix_enabled
;
1396 * Whether or not we know for certain that HiZ is enabled for the current
1397 * subpass. If, for whatever reason, we are unsure as to whether HiZ is
1398 * enabled or not, this will be false.
1403 * Array length is anv_cmd_state::pass::attachment_count. Array content is
1404 * valid only when recording a render pass instance.
1406 struct anv_attachment_state
* attachments
;
1409 * Surface states for color render targets. These are stored in a single
1410 * flat array. For depth-stencil attachments, the surface state is simply
1413 struct anv_state render_pass_states
;
1416 * A null surface state of the right size to match the framebuffer. This
1417 * is one of the states in render_pass_states.
1419 struct anv_state null_surface_state
;
1422 struct anv_buffer
* index_buffer
;
1423 uint32_t index_type
; /**< 3DSTATE_INDEX_BUFFER.IndexFormat */
1424 uint32_t index_offset
;
1428 struct anv_cmd_pool
{
1429 VkAllocationCallbacks alloc
;
1430 struct list_head cmd_buffers
;
1433 #define ANV_CMD_BUFFER_BATCH_SIZE 8192
1435 enum anv_cmd_buffer_exec_mode
{
1436 ANV_CMD_BUFFER_EXEC_MODE_PRIMARY
,
1437 ANV_CMD_BUFFER_EXEC_MODE_EMIT
,
1438 ANV_CMD_BUFFER_EXEC_MODE_GROW_AND_EMIT
,
1439 ANV_CMD_BUFFER_EXEC_MODE_CHAIN
,
1440 ANV_CMD_BUFFER_EXEC_MODE_COPY_AND_CHAIN
,
1443 struct anv_cmd_buffer
{
1444 VK_LOADER_DATA _loader_data
;
1446 struct anv_device
* device
;
1448 struct anv_cmd_pool
* pool
;
1449 struct list_head pool_link
;
1451 struct anv_batch batch
;
1453 /* Fields required for the actual chain of anv_batch_bo's.
1455 * These fields are initialized by anv_cmd_buffer_init_batch_bo_chain().
1457 struct list_head batch_bos
;
1458 enum anv_cmd_buffer_exec_mode exec_mode
;
1460 /* A vector of anv_batch_bo pointers for every batch or surface buffer
1461 * referenced by this command buffer
1463 * initialized by anv_cmd_buffer_init_batch_bo_chain()
1465 struct u_vector seen_bbos
;
1467 /* A vector of int32_t's for every block of binding tables.
1469 * initialized by anv_cmd_buffer_init_batch_bo_chain()
1471 struct u_vector bt_blocks
;
1474 struct anv_reloc_list surface_relocs
;
1475 /** Last seen surface state block pool center bo offset */
1476 uint32_t last_ss_pool_center
;
1478 /* Serial for tracking buffer completion */
1481 /* Stream objects for storing temporary data */
1482 struct anv_state_stream surface_state_stream
;
1483 struct anv_state_stream dynamic_state_stream
;
1485 VkCommandBufferUsageFlags usage_flags
;
1486 VkCommandBufferLevel level
;
1488 struct anv_cmd_state state
;
1491 VkResult
anv_cmd_buffer_init_batch_bo_chain(struct anv_cmd_buffer
*cmd_buffer
);
1492 void anv_cmd_buffer_fini_batch_bo_chain(struct anv_cmd_buffer
*cmd_buffer
);
1493 void anv_cmd_buffer_reset_batch_bo_chain(struct anv_cmd_buffer
*cmd_buffer
);
1494 void anv_cmd_buffer_end_batch_buffer(struct anv_cmd_buffer
*cmd_buffer
);
1495 void anv_cmd_buffer_add_secondary(struct anv_cmd_buffer
*primary
,
1496 struct anv_cmd_buffer
*secondary
);
1497 void anv_cmd_buffer_prepare_execbuf(struct anv_cmd_buffer
*cmd_buffer
);
1498 VkResult
anv_cmd_buffer_execbuf(struct anv_device
*device
,
1499 struct anv_cmd_buffer
*cmd_buffer
);
1501 VkResult
anv_cmd_buffer_reset(struct anv_cmd_buffer
*cmd_buffer
);
1504 anv_cmd_buffer_ensure_push_constants_size(struct anv_cmd_buffer
*cmd_buffer
,
1505 gl_shader_stage stage
, uint32_t size
);
1506 #define anv_cmd_buffer_ensure_push_constant_field(cmd_buffer, stage, field) \
1507 anv_cmd_buffer_ensure_push_constants_size(cmd_buffer, stage, \
1508 (offsetof(struct anv_push_constants, field) + \
1509 sizeof(cmd_buffer->state.push_constants[0]->field)))
1511 struct anv_state
anv_cmd_buffer_emit_dynamic(struct anv_cmd_buffer
*cmd_buffer
,
1512 const void *data
, uint32_t size
, uint32_t alignment
);
1513 struct anv_state
anv_cmd_buffer_merge_dynamic(struct anv_cmd_buffer
*cmd_buffer
,
1514 uint32_t *a
, uint32_t *b
,
1515 uint32_t dwords
, uint32_t alignment
);
1518 anv_cmd_buffer_surface_base_address(struct anv_cmd_buffer
*cmd_buffer
);
1520 anv_cmd_buffer_alloc_binding_table(struct anv_cmd_buffer
*cmd_buffer
,
1521 uint32_t entries
, uint32_t *state_offset
);
1523 anv_cmd_buffer_alloc_surface_state(struct anv_cmd_buffer
*cmd_buffer
);
1525 anv_cmd_buffer_alloc_dynamic_state(struct anv_cmd_buffer
*cmd_buffer
,
1526 uint32_t size
, uint32_t alignment
);
1529 anv_cmd_buffer_new_binding_table_block(struct anv_cmd_buffer
*cmd_buffer
);
1531 void gen8_cmd_buffer_emit_viewport(struct anv_cmd_buffer
*cmd_buffer
);
1532 void gen8_cmd_buffer_emit_depth_viewport(struct anv_cmd_buffer
*cmd_buffer
,
1533 bool depth_clamp_enable
);
1534 void gen7_cmd_buffer_emit_scissor(struct anv_cmd_buffer
*cmd_buffer
);
1536 void anv_cmd_buffer_setup_attachments(struct anv_cmd_buffer
*cmd_buffer
,
1537 struct anv_render_pass
*pass
,
1538 struct anv_framebuffer
*framebuffer
,
1539 const VkClearValue
*clear_values
);
1541 void anv_cmd_buffer_emit_state_base_address(struct anv_cmd_buffer
*cmd_buffer
);
1544 anv_cmd_buffer_push_constants(struct anv_cmd_buffer
*cmd_buffer
,
1545 gl_shader_stage stage
);
1547 anv_cmd_buffer_cs_push_constants(struct anv_cmd_buffer
*cmd_buffer
);
1549 void anv_cmd_buffer_clear_subpass(struct anv_cmd_buffer
*cmd_buffer
);
1550 void anv_cmd_buffer_resolve_subpass(struct anv_cmd_buffer
*cmd_buffer
);
1552 const struct anv_image_view
*
1553 anv_cmd_buffer_get_depth_stencil_view(const struct anv_cmd_buffer
*cmd_buffer
);
1556 anv_cmd_buffer_alloc_blorp_binding_table(struct anv_cmd_buffer
*cmd_buffer
,
1557 uint32_t num_entries
,
1558 uint32_t *state_offset
,
1559 struct anv_state
*bt_state
);
1561 void anv_cmd_buffer_dump(struct anv_cmd_buffer
*cmd_buffer
);
1563 enum anv_fence_state
{
1564 /** Indicates that this is a new (or newly reset fence) */
1565 ANV_FENCE_STATE_RESET
,
1567 /** Indicates that this fence has been submitted to the GPU but is still
1568 * (as far as we know) in use by the GPU.
1570 ANV_FENCE_STATE_SUBMITTED
,
1572 ANV_FENCE_STATE_SIGNALED
,
1577 struct drm_i915_gem_execbuffer2 execbuf
;
1578 struct drm_i915_gem_exec_object2 exec2_objects
[1];
1579 enum anv_fence_state state
;
1584 struct anv_state state
;
1587 struct anv_shader_module
{
1588 unsigned char sha1
[20];
1593 void anv_hash_shader(unsigned char *hash
, const void *key
, size_t key_size
,
1594 struct anv_shader_module
*module
,
1595 const char *entrypoint
,
1596 const struct anv_pipeline_layout
*pipeline_layout
,
1597 const VkSpecializationInfo
*spec_info
);
1599 static inline gl_shader_stage
1600 vk_to_mesa_shader_stage(VkShaderStageFlagBits vk_stage
)
1602 assert(__builtin_popcount(vk_stage
) == 1);
1603 return ffs(vk_stage
) - 1;
1606 static inline VkShaderStageFlagBits
1607 mesa_to_vk_shader_stage(gl_shader_stage mesa_stage
)
1609 return (1 << mesa_stage
);
1612 #define ANV_STAGE_MASK ((1 << MESA_SHADER_STAGES) - 1)
1614 #define anv_foreach_stage(stage, stage_bits) \
1615 for (gl_shader_stage stage, \
1616 __tmp = (gl_shader_stage)((stage_bits) & ANV_STAGE_MASK); \
1617 stage = __builtin_ffs(__tmp) - 1, __tmp; \
1618 __tmp &= ~(1 << (stage)))
1620 struct anv_pipeline_bind_map
{
1621 uint32_t surface_count
;
1622 uint32_t sampler_count
;
1623 uint32_t image_count
;
1625 struct anv_pipeline_binding
* surface_to_descriptor
;
1626 struct anv_pipeline_binding
* sampler_to_descriptor
;
1629 struct anv_shader_bin_key
{
1634 struct anv_shader_bin
{
1637 const struct anv_shader_bin_key
*key
;
1639 struct anv_state kernel
;
1640 uint32_t kernel_size
;
1642 const struct brw_stage_prog_data
*prog_data
;
1643 uint32_t prog_data_size
;
1645 struct anv_pipeline_bind_map bind_map
;
1647 /* Prog data follows, then params, then the key, all aligned to 8-bytes */
1650 struct anv_shader_bin
*
1651 anv_shader_bin_create(struct anv_device
*device
,
1652 const void *key
, uint32_t key_size
,
1653 const void *kernel
, uint32_t kernel_size
,
1654 const struct brw_stage_prog_data
*prog_data
,
1655 uint32_t prog_data_size
, const void *prog_data_param
,
1656 const struct anv_pipeline_bind_map
*bind_map
);
1659 anv_shader_bin_destroy(struct anv_device
*device
, struct anv_shader_bin
*shader
);
1662 anv_shader_bin_ref(struct anv_shader_bin
*shader
)
1664 assert(shader
&& shader
->ref_cnt
>= 1);
1665 __sync_fetch_and_add(&shader
->ref_cnt
, 1);
1669 anv_shader_bin_unref(struct anv_device
*device
, struct anv_shader_bin
*shader
)
1671 assert(shader
&& shader
->ref_cnt
>= 1);
1672 if (__sync_fetch_and_add(&shader
->ref_cnt
, -1) == 1)
1673 anv_shader_bin_destroy(device
, shader
);
1676 struct anv_pipeline
{
1677 struct anv_device
* device
;
1678 struct anv_batch batch
;
1679 uint32_t batch_data
[512];
1680 struct anv_reloc_list batch_relocs
;
1681 uint32_t dynamic_state_mask
;
1682 struct anv_dynamic_state dynamic_state
;
1684 struct anv_pipeline_layout
* layout
;
1686 bool needs_data_cache
;
1688 struct anv_shader_bin
* shaders
[MESA_SHADER_STAGES
];
1691 const struct gen_l3_config
* l3_config
;
1692 uint32_t total_size
;
1695 VkShaderStageFlags active_stages
;
1696 struct anv_state blend_state
;
1699 uint32_t binding_stride
[MAX_VBS
];
1700 bool instancing_enable
[MAX_VBS
];
1701 bool primitive_restart
;
1704 uint32_t cs_right_mask
;
1707 bool depth_test_enable
;
1708 bool writes_stencil
;
1709 bool stencil_test_enable
;
1710 bool depth_clamp_enable
;
1711 bool sample_shading_enable
;
1716 uint32_t depth_stencil_state
[3];
1722 uint32_t wm_depth_stencil
[3];
1726 uint32_t wm_depth_stencil
[4];
1729 uint32_t interface_descriptor_data
[8];
1733 anv_pipeline_has_stage(const struct anv_pipeline
*pipeline
,
1734 gl_shader_stage stage
)
1736 return (pipeline
->active_stages
& mesa_to_vk_shader_stage(stage
)) != 0;
1739 #define ANV_DECL_GET_PROG_DATA_FUNC(prefix, stage) \
1740 static inline const struct brw_##prefix##_prog_data * \
1741 get_##prefix##_prog_data(const struct anv_pipeline *pipeline) \
1743 if (anv_pipeline_has_stage(pipeline, stage)) { \
1744 return (const struct brw_##prefix##_prog_data *) \
1745 pipeline->shaders[stage]->prog_data; \
1751 ANV_DECL_GET_PROG_DATA_FUNC(vs
, MESA_SHADER_VERTEX
)
1752 ANV_DECL_GET_PROG_DATA_FUNC(tcs
, MESA_SHADER_TESS_CTRL
)
1753 ANV_DECL_GET_PROG_DATA_FUNC(tes
, MESA_SHADER_TESS_EVAL
)
1754 ANV_DECL_GET_PROG_DATA_FUNC(gs
, MESA_SHADER_GEOMETRY
)
1755 ANV_DECL_GET_PROG_DATA_FUNC(wm
, MESA_SHADER_FRAGMENT
)
1756 ANV_DECL_GET_PROG_DATA_FUNC(cs
, MESA_SHADER_COMPUTE
)
1758 static inline const struct brw_vue_prog_data
*
1759 anv_pipeline_get_last_vue_prog_data(const struct anv_pipeline
*pipeline
)
1761 if (anv_pipeline_has_stage(pipeline
, MESA_SHADER_GEOMETRY
))
1762 return &get_gs_prog_data(pipeline
)->base
;
1763 else if (anv_pipeline_has_stage(pipeline
, MESA_SHADER_TESS_EVAL
))
1764 return &get_tes_prog_data(pipeline
)->base
;
1766 return &get_vs_prog_data(pipeline
)->base
;
1770 anv_pipeline_init(struct anv_pipeline
*pipeline
, struct anv_device
*device
,
1771 struct anv_pipeline_cache
*cache
,
1772 const VkGraphicsPipelineCreateInfo
*pCreateInfo
,
1773 const VkAllocationCallbacks
*alloc
);
1776 anv_pipeline_compile_cs(struct anv_pipeline
*pipeline
,
1777 struct anv_pipeline_cache
*cache
,
1778 const VkComputePipelineCreateInfo
*info
,
1779 struct anv_shader_module
*module
,
1780 const char *entrypoint
,
1781 const VkSpecializationInfo
*spec_info
);
1784 enum isl_format isl_format
:16;
1785 struct isl_swizzle swizzle
;
1789 anv_get_format(const struct gen_device_info
*devinfo
, VkFormat format
,
1790 VkImageAspectFlags aspect
, VkImageTiling tiling
);
1792 static inline enum isl_format
1793 anv_get_isl_format(const struct gen_device_info
*devinfo
, VkFormat vk_format
,
1794 VkImageAspectFlags aspect
, VkImageTiling tiling
)
1796 return anv_get_format(devinfo
, vk_format
, aspect
, tiling
).isl_format
;
1799 static inline struct isl_swizzle
1800 anv_swizzle_for_render(struct isl_swizzle swizzle
)
1802 /* Sometimes the swizzle will have alpha map to one. We do this to fake
1803 * RGB as RGBA for texturing
1805 assert(swizzle
.a
== ISL_CHANNEL_SELECT_ONE
||
1806 swizzle
.a
== ISL_CHANNEL_SELECT_ALPHA
);
1808 /* But it doesn't matter what we render to that channel */
1809 swizzle
.a
= ISL_CHANNEL_SELECT_ALPHA
;
1815 anv_pipeline_setup_l3_config(struct anv_pipeline
*pipeline
, bool needs_slm
);
1818 * Subsurface of an anv_image.
1820 struct anv_surface
{
1821 /** Valid only if isl_surf::size > 0. */
1822 struct isl_surf isl
;
1825 * Offset from VkImage's base address, as bound by vkBindImageMemory().
1832 /* The original VkFormat provided by the client. This may not match any
1833 * of the actual surface formats.
1836 VkImageAspectFlags aspects
;
1839 uint32_t array_size
;
1840 uint32_t samples
; /**< VkImageCreateInfo::samples */
1841 VkImageUsageFlags usage
; /**< Superset of VkImageCreateInfo::usage. */
1842 VkImageTiling tiling
; /** VkImageCreateInfo::tiling */
1847 /* Set when bound */
1849 VkDeviceSize offset
;
1854 * For each foo, anv_image::foo_surface is valid if and only if
1855 * anv_image::aspects has a foo aspect.
1857 * The hardware requires that the depth buffer and stencil buffer be
1858 * separate surfaces. From Vulkan's perspective, though, depth and stencil
1859 * reside in the same VkImage. To satisfy both the hardware and Vulkan, we
1860 * allocate the depth and stencil buffers as separate surfaces in the same
1864 struct anv_surface color_surface
;
1867 struct anv_surface depth_surface
;
1868 struct anv_surface stencil_surface
;
1873 * For color images, this is the aux usage for this image when not used as a
1876 * For depth/stencil images, this is set to ISL_AUX_USAGE_HIZ if the image
1879 enum isl_aux_usage aux_usage
;
1881 struct anv_surface aux_surface
;
1884 /* Returns true if a HiZ-enabled depth buffer can be sampled from. */
1886 anv_can_sample_with_hiz(const struct gen_device_info
* const devinfo
,
1887 const VkImageAspectFlags aspect_mask
,
1888 const uint32_t samples
)
1890 /* Validate the inputs. */
1891 assert(devinfo
&& aspect_mask
&& samples
);
1892 return devinfo
->gen
>= 8 && (aspect_mask
& VK_IMAGE_ASPECT_DEPTH_BIT
) &&
1897 anv_gen8_hiz_op_resolve(struct anv_cmd_buffer
*cmd_buffer
,
1898 const struct anv_image
*image
,
1899 enum blorp_hiz_op op
);
1902 anv_layout_to_aux_usage(const struct gen_device_info
* const devinfo
,
1903 const struct anv_image
*image
,
1904 const VkImageAspectFlags aspects
,
1905 const VkImageLayout layout
);
1907 /* This is defined as a macro so that it works for both
1908 * VkImageSubresourceRange and VkImageSubresourceLayers
1910 #define anv_get_layerCount(_image, _range) \
1911 ((_range)->layerCount == VK_REMAINING_ARRAY_LAYERS ? \
1912 (_image)->array_size - (_range)->baseArrayLayer : (_range)->layerCount)
1914 static inline uint32_t
1915 anv_get_levelCount(const struct anv_image
*image
,
1916 const VkImageSubresourceRange
*range
)
1918 return range
->levelCount
== VK_REMAINING_MIP_LEVELS
?
1919 image
->levels
- range
->baseMipLevel
: range
->levelCount
;
1923 struct anv_image_view
{
1924 const struct anv_image
*image
; /**< VkImageViewCreateInfo::image */
1926 uint32_t offset
; /**< Offset into bo. */
1928 struct isl_view isl
;
1930 VkImageAspectFlags aspect_mask
;
1932 VkExtent3D extent
; /**< Extent of VkImageViewCreateInfo::baseMipLevel. */
1934 /** RENDER_SURFACE_STATE when using image as a sampler surface. */
1935 struct anv_state sampler_surface_state
;
1938 * RENDER_SURFACE_STATE when using image as a sampler surface with the
1939 * auxiliary buffer disabled.
1941 struct anv_state no_aux_sampler_surface_state
;
1944 * RENDER_SURFACE_STATE when using image as a storage image. Separate states
1945 * for write-only and readable, using the real format for write-only and the
1946 * lowered format for readable.
1948 struct anv_state storage_surface_state
;
1949 struct anv_state writeonly_storage_surface_state
;
1951 struct brw_image_param storage_image_param
;
1954 struct anv_image_create_info
{
1955 const VkImageCreateInfo
*vk_info
;
1957 /** An opt-in bitmask which filters an ISL-mapping of the Vulkan tiling. */
1958 isl_tiling_flags_t isl_tiling_flags
;
1963 VkResult
anv_image_create(VkDevice _device
,
1964 const struct anv_image_create_info
*info
,
1965 const VkAllocationCallbacks
* alloc
,
1968 const struct anv_surface
*
1969 anv_image_get_surface_for_aspect_mask(const struct anv_image
*image
,
1970 VkImageAspectFlags aspect_mask
);
1973 anv_isl_format_for_descriptor_type(VkDescriptorType type
);
1975 static inline struct VkExtent3D
1976 anv_sanitize_image_extent(const VkImageType imageType
,
1977 const struct VkExtent3D imageExtent
)
1979 switch (imageType
) {
1980 case VK_IMAGE_TYPE_1D
:
1981 return (VkExtent3D
) { imageExtent
.width
, 1, 1 };
1982 case VK_IMAGE_TYPE_2D
:
1983 return (VkExtent3D
) { imageExtent
.width
, imageExtent
.height
, 1 };
1984 case VK_IMAGE_TYPE_3D
:
1987 unreachable("invalid image type");
1991 static inline struct VkOffset3D
1992 anv_sanitize_image_offset(const VkImageType imageType
,
1993 const struct VkOffset3D imageOffset
)
1995 switch (imageType
) {
1996 case VK_IMAGE_TYPE_1D
:
1997 return (VkOffset3D
) { imageOffset
.x
, 0, 0 };
1998 case VK_IMAGE_TYPE_2D
:
1999 return (VkOffset3D
) { imageOffset
.x
, imageOffset
.y
, 0 };
2000 case VK_IMAGE_TYPE_3D
:
2003 unreachable("invalid image type");
2008 void anv_fill_buffer_surface_state(struct anv_device
*device
,
2009 struct anv_state state
,
2010 enum isl_format format
,
2011 uint32_t offset
, uint32_t range
,
2014 void anv_image_view_fill_image_param(struct anv_device
*device
,
2015 struct anv_image_view
*view
,
2016 struct brw_image_param
*param
);
2017 void anv_buffer_view_fill_image_param(struct anv_device
*device
,
2018 struct anv_buffer_view
*view
,
2019 struct brw_image_param
*param
);
2021 struct anv_sampler
{
2025 struct anv_framebuffer
{
2030 uint32_t attachment_count
;
2031 struct anv_image_view
* attachments
[0];
2034 struct anv_subpass
{
2035 uint32_t attachment_count
;
2038 * A pointer to all attachment references used in this subpass.
2039 * Only valid if ::attachment_count > 0.
2041 VkAttachmentReference
* attachments
;
2042 uint32_t input_count
;
2043 VkAttachmentReference
* input_attachments
;
2044 uint32_t color_count
;
2045 VkAttachmentReference
* color_attachments
;
2046 VkAttachmentReference
* resolve_attachments
;
2048 VkAttachmentReference depth_stencil_attachment
;
2050 /** Subpass has a depth/stencil self-dependency */
2051 bool has_ds_self_dep
;
2053 /** Subpass has at least one resolve attachment */
2057 enum anv_subpass_usage
{
2058 ANV_SUBPASS_USAGE_DRAW
= (1 << 0),
2059 ANV_SUBPASS_USAGE_INPUT
= (1 << 1),
2060 ANV_SUBPASS_USAGE_RESOLVE_SRC
= (1 << 2),
2061 ANV_SUBPASS_USAGE_RESOLVE_DST
= (1 << 3),
2064 struct anv_render_pass_attachment
{
2065 /* TODO: Consider using VkAttachmentDescription instead of storing each of
2066 * its members individually.
2070 VkImageUsageFlags usage
;
2071 VkAttachmentLoadOp load_op
;
2072 VkAttachmentStoreOp store_op
;
2073 VkAttachmentLoadOp stencil_load_op
;
2074 VkImageLayout initial_layout
;
2075 VkImageLayout final_layout
;
2077 /* An array, indexed by subpass id, of how the attachment will be used. */
2078 enum anv_subpass_usage
* subpass_usage
;
2080 /* The subpass id in which the attachment will be used last. */
2081 uint32_t last_subpass_idx
;
2084 struct anv_render_pass
{
2085 uint32_t attachment_count
;
2086 uint32_t subpass_count
;
2087 VkAttachmentReference
* subpass_attachments
;
2088 enum anv_subpass_usage
* subpass_usages
;
2089 struct anv_render_pass_attachment
* attachments
;
2090 struct anv_subpass subpasses
[0];
2093 #define ANV_PIPELINE_STATISTICS_MASK 0x000007ff
2095 struct anv_query_pool
{
2097 VkQueryPipelineStatisticFlags pipeline_statistics
;
2098 /** Stride between slots, in bytes */
2100 /** Number of slots in this query pool */
2105 void *anv_lookup_entrypoint(const struct gen_device_info
*devinfo
,
2108 void anv_dump_image_to_ppm(struct anv_device
*device
,
2109 struct anv_image
*image
, unsigned miplevel
,
2110 unsigned array_layer
, VkImageAspectFlagBits aspect
,
2111 const char *filename
);
2113 enum anv_dump_action
{
2114 ANV_DUMP_FRAMEBUFFERS_BIT
= 0x1,
2117 void anv_dump_start(struct anv_device
*device
, enum anv_dump_action actions
);
2118 void anv_dump_finish(void);
2120 void anv_dump_add_framebuffer(struct anv_cmd_buffer
*cmd_buffer
,
2121 struct anv_framebuffer
*fb
);
2123 static inline uint32_t
2124 anv_get_subpass_id(const struct anv_cmd_state
* const cmd_state
)
2126 /* This function must be called from within a subpass. */
2127 assert(cmd_state
->pass
&& cmd_state
->subpass
);
2129 const uint32_t subpass_id
= cmd_state
->subpass
- cmd_state
->pass
->subpasses
;
2131 /* The id of this subpass shouldn't exceed the number of subpasses in this
2132 * render pass minus 1.
2134 assert(subpass_id
< cmd_state
->pass
->subpass_count
);
2138 #define ANV_DEFINE_HANDLE_CASTS(__anv_type, __VkType) \
2140 static inline struct __anv_type * \
2141 __anv_type ## _from_handle(__VkType _handle) \
2143 return (struct __anv_type *) _handle; \
2146 static inline __VkType \
2147 __anv_type ## _to_handle(struct __anv_type *_obj) \
2149 return (__VkType) _obj; \
2152 #define ANV_DEFINE_NONDISP_HANDLE_CASTS(__anv_type, __VkType) \
2154 static inline struct __anv_type * \
2155 __anv_type ## _from_handle(__VkType _handle) \
2157 return (struct __anv_type *)(uintptr_t) _handle; \
2160 static inline __VkType \
2161 __anv_type ## _to_handle(struct __anv_type *_obj) \
2163 return (__VkType)(uintptr_t) _obj; \
2166 #define ANV_FROM_HANDLE(__anv_type, __name, __handle) \
2167 struct __anv_type *__name = __anv_type ## _from_handle(__handle)
2169 ANV_DEFINE_HANDLE_CASTS(anv_cmd_buffer
, VkCommandBuffer
)
2170 ANV_DEFINE_HANDLE_CASTS(anv_device
, VkDevice
)
2171 ANV_DEFINE_HANDLE_CASTS(anv_instance
, VkInstance
)
2172 ANV_DEFINE_HANDLE_CASTS(anv_physical_device
, VkPhysicalDevice
)
2173 ANV_DEFINE_HANDLE_CASTS(anv_queue
, VkQueue
)
2175 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_cmd_pool
, VkCommandPool
)
2176 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_buffer
, VkBuffer
)
2177 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_buffer_view
, VkBufferView
)
2178 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_descriptor_pool
, VkDescriptorPool
)
2179 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_descriptor_set
, VkDescriptorSet
)
2180 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_descriptor_set_layout
, VkDescriptorSetLayout
)
2181 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_descriptor_update_template
, VkDescriptorUpdateTemplateKHR
)
2182 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_device_memory
, VkDeviceMemory
)
2183 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_fence
, VkFence
)
2184 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_event
, VkEvent
)
2185 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_framebuffer
, VkFramebuffer
)
2186 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_image
, VkImage
)
2187 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_image_view
, VkImageView
);
2188 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_pipeline_cache
, VkPipelineCache
)
2189 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_pipeline
, VkPipeline
)
2190 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_pipeline_layout
, VkPipelineLayout
)
2191 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_query_pool
, VkQueryPool
)
2192 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_render_pass
, VkRenderPass
)
2193 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_sampler
, VkSampler
)
2194 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_shader_module
, VkShaderModule
)
2196 /* Gen-specific function declarations */
2198 # include "anv_genX.h"
2200 # define genX(x) gen7_##x
2201 # include "anv_genX.h"
2203 # define genX(x) gen75_##x
2204 # include "anv_genX.h"
2206 # define genX(x) gen8_##x
2207 # include "anv_genX.h"
2209 # define genX(x) gen9_##x
2210 # include "anv_genX.h"
2214 #endif /* ANV_PRIVATE_H */