2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
39 #define __gen_validate_value(x) VALGRIND_CHECK_MEM_IS_DEFINED(&(x), sizeof(x))
44 #include "common/gen_device_info.h"
45 #include "blorp/blorp.h"
46 #include "compiler/brw_compiler.h"
47 #include "util/macros.h"
48 #include "util/list.h"
49 #include "util/u_vector.h"
50 #include "util/vk_alloc.h"
52 /* Pre-declarations needed for WSI entrypoints */
55 typedef struct xcb_connection_t xcb_connection_t
;
56 typedef uint32_t xcb_visualid_t
;
57 typedef uint32_t xcb_window_t
;
60 struct anv_buffer_view
;
61 struct anv_image_view
;
65 #include <vulkan/vulkan.h>
66 #include <vulkan/vulkan_intel.h>
67 #include <vulkan/vk_icd.h>
69 #include "anv_entrypoints.h"
72 #include "common/gen_debug.h"
73 #include "wsi_common.h"
75 /* Allowing different clear colors requires us to perform a depth resolve at
76 * the end of certain render passes. This is because while slow clears store
77 * the clear color in the HiZ buffer, fast clears (without a resolve) don't.
78 * See the PRMs for examples describing when additional resolves would be
79 * necessary. To enable fast clears without requiring extra resolves, we set
80 * the clear value to a globally-defined one. We could allow different values
81 * if the user doesn't expect coherent data during or after a render passes
82 * (VK_ATTACHMENT_STORE_OP_DONT_CARE), but such users (aside from the CTS)
83 * don't seem to exist yet. In almost all Vulkan applications tested thus far,
84 * 1.0f seems to be the only value used. The only application that doesn't set
85 * this value does so through the usage of an seemingly uninitialized clear
88 #define ANV_HZ_FC_VAL 1.0f
93 #define MAX_VIEWPORTS 16
94 #define MAX_SCISSORS 16
95 #define MAX_PUSH_CONSTANTS_SIZE 128
96 #define MAX_DYNAMIC_BUFFERS 16
98 #define MAX_PUSH_DESCRIPTORS 32 /* Minimum requirement */
100 #define ANV_SVGS_VB_INDEX MAX_VBS
101 #define ANV_DRAWID_VB_INDEX (MAX_VBS + 1)
103 #define anv_printflike(a, b) __attribute__((__format__(__printf__, a, b)))
105 static inline uint32_t
106 align_down_npot_u32(uint32_t v
, uint32_t a
)
111 static inline uint32_t
112 align_u32(uint32_t v
, uint32_t a
)
114 assert(a
!= 0 && a
== (a
& -a
));
115 return (v
+ a
- 1) & ~(a
- 1);
118 static inline uint64_t
119 align_u64(uint64_t v
, uint64_t a
)
121 assert(a
!= 0 && a
== (a
& -a
));
122 return (v
+ a
- 1) & ~(a
- 1);
125 static inline int32_t
126 align_i32(int32_t v
, int32_t a
)
128 assert(a
!= 0 && a
== (a
& -a
));
129 return (v
+ a
- 1) & ~(a
- 1);
132 /** Alignment must be a power of 2. */
134 anv_is_aligned(uintmax_t n
, uintmax_t a
)
136 assert(a
== (a
& -a
));
137 return (n
& (a
- 1)) == 0;
140 static inline uint32_t
141 anv_minify(uint32_t n
, uint32_t levels
)
143 if (unlikely(n
== 0))
146 return MAX2(n
>> levels
, 1);
150 anv_clamp_f(float f
, float min
, float max
)
163 anv_clear_mask(uint32_t *inout_mask
, uint32_t clear_mask
)
165 if (*inout_mask
& clear_mask
) {
166 *inout_mask
&= ~clear_mask
;
173 static inline union isl_color_value
174 vk_to_isl_color(VkClearColorValue color
)
176 return (union isl_color_value
) {
186 #define for_each_bit(b, dword) \
187 for (uint32_t __dword = (dword); \
188 (b) = __builtin_ffs(__dword) - 1, __dword; \
189 __dword &= ~(1 << (b)))
191 #define typed_memcpy(dest, src, count) ({ \
192 STATIC_ASSERT(sizeof(*src) == sizeof(*dest)); \
193 memcpy((dest), (src), (count) * sizeof(*(src))); \
196 /* Whenever we generate an error, pass it through this function. Useful for
197 * debugging, where we can break on it. Only call at error site, not when
198 * propagating errors. Might be useful to plug in a stack trace here.
201 VkResult
__vk_errorf(VkResult error
, const char *file
, int line
, const char *format
, ...);
204 #define vk_error(error) __vk_errorf(error, __FILE__, __LINE__, NULL);
205 #define vk_errorf(error, format, ...) __vk_errorf(error, __FILE__, __LINE__, format, ## __VA_ARGS__);
206 #define anv_debug(format, ...) fprintf(stderr, "debug: " format, ##__VA_ARGS__)
208 #define vk_error(error) error
209 #define vk_errorf(error, format, ...) error
210 #define anv_debug(format, ...)
214 * Warn on ignored extension structs.
216 * The Vulkan spec requires us to ignore unsupported or unknown structs in
217 * a pNext chain. In debug mode, emitting warnings for ignored structs may
218 * help us discover structs that we should not have ignored.
221 * From the Vulkan 1.0.38 spec:
223 * Any component of the implementation (the loader, any enabled layers,
224 * and drivers) must skip over, without processing (other than reading the
225 * sType and pNext members) any chained structures with sType values not
226 * defined by extensions supported by that component.
228 #define anv_debug_ignored_stype(sType) \
229 anv_debug("debug: %s: ignored VkStructureType %u\n", __func__, (sType))
231 void __anv_finishme(const char *file
, int line
, const char *format
, ...)
232 anv_printflike(3, 4);
233 void __anv_perf_warn(const char *file
, int line
, const char *format
, ...)
234 anv_printflike(3, 4);
235 void anv_loge(const char *format
, ...) anv_printflike(1, 2);
236 void anv_loge_v(const char *format
, va_list va
);
239 * Print a FINISHME message, including its source location.
241 #define anv_finishme(format, ...) \
243 static bool reported = false; \
245 __anv_finishme(__FILE__, __LINE__, format, ##__VA_ARGS__); \
251 * Print a perf warning message. Set INTEL_DEBUG=perf to see these.
253 #define anv_perf_warn(format, ...) \
255 static bool reported = false; \
256 if (!reported && unlikely(INTEL_DEBUG & DEBUG_PERF)) { \
257 __anv_perf_warn(__FILE__, __LINE__, format, ##__VA_ARGS__); \
262 /* A non-fatal assert. Useful for debugging. */
264 #define anv_assert(x) ({ \
265 if (unlikely(!(x))) \
266 fprintf(stderr, "%s:%d ASSERT: %s\n", __FILE__, __LINE__, #x); \
269 #define anv_assert(x)
273 * A dynamically growable, circular buffer. Elements are added at head and
274 * removed from tail. head and tail are free-running uint32_t indices and we
275 * only compute the modulo with size when accessing the array. This way,
276 * number of bytes in the queue is always head - tail, even in case of
283 /* Index into the current validation list. This is used by the
284 * validation list building alrogithm to track which buffers are already
285 * in the validation list so that we can ensure uniqueness.
289 /* Last known offset. This value is provided by the kernel when we
290 * execbuf and is used as the presumed offset for the next bunch of
298 /* We need to set the WRITE flag on winsys bos so GEM will know we're
299 * writing to them and synchronize uses on other rings (eg if the display
300 * server uses the blitter ring).
306 anv_bo_init(struct anv_bo
*bo
, uint32_t gem_handle
, uint64_t size
)
308 bo
->gem_handle
= gem_handle
;
313 bo
->is_winsys_bo
= false;
316 /* Represents a lock-free linked list of "free" things. This is used by
317 * both the block pool and the state pools. Unfortunately, in order to
318 * solve the ABA problem, we can't use a single uint32_t head.
320 union anv_free_list
{
324 /* A simple count that is incremented every time the head changes. */
330 #define ANV_FREE_LIST_EMPTY ((union anv_free_list) { { 1, 0 } })
332 struct anv_block_state
{
342 struct anv_block_pool
{
343 struct anv_device
*device
;
347 /* The offset from the start of the bo to the "center" of the block
348 * pool. Pointers to allocated blocks are given by
349 * bo.map + center_bo_offset + offsets.
351 uint32_t center_bo_offset
;
353 /* Current memory map of the block pool. This pointer may or may not
354 * point to the actual beginning of the block pool memory. If
355 * anv_block_pool_alloc_back has ever been called, then this pointer
356 * will point to the "center" position of the buffer and all offsets
357 * (negative or positive) given out by the block pool alloc functions
358 * will be valid relative to this pointer.
360 * In particular, map == bo.map + center_offset
366 * Array of mmaps and gem handles owned by the block pool, reclaimed when
367 * the block pool is destroyed.
369 struct u_vector mmap_cleanups
;
373 union anv_free_list free_list
;
374 struct anv_block_state state
;
376 union anv_free_list back_free_list
;
377 struct anv_block_state back_state
;
380 /* Block pools are backed by a fixed-size 2GB memfd */
381 #define BLOCK_POOL_MEMFD_SIZE (1ul << 31)
383 /* The center of the block pool is also the middle of the memfd. This may
384 * change in the future if we decide differently for some reason.
386 #define BLOCK_POOL_MEMFD_CENTER (BLOCK_POOL_MEMFD_SIZE / 2)
388 static inline uint32_t
389 anv_block_pool_size(struct anv_block_pool
*pool
)
391 return pool
->state
.end
+ pool
->back_state
.end
;
400 struct anv_fixed_size_state_pool
{
402 union anv_free_list free_list
;
403 struct anv_block_state block
;
406 #define ANV_MIN_STATE_SIZE_LOG2 6
407 #define ANV_MAX_STATE_SIZE_LOG2 20
409 #define ANV_STATE_BUCKETS (ANV_MAX_STATE_SIZE_LOG2 - ANV_MIN_STATE_SIZE_LOG2 + 1)
411 struct anv_state_pool
{
412 struct anv_block_pool
*block_pool
;
413 struct anv_fixed_size_state_pool buckets
[ANV_STATE_BUCKETS
];
416 struct anv_state_stream_block
;
418 struct anv_state_stream
{
419 struct anv_block_pool
*block_pool
;
421 /* The current working block */
422 struct anv_state_stream_block
*block
;
424 /* Offset at which the current block starts */
426 /* Offset at which to allocate the next state */
428 /* Offset at which the current block ends */
432 #define CACHELINE_SIZE 64
433 #define CACHELINE_MASK 63
436 anv_clflush_range(void *start
, size_t size
)
438 void *p
= (void *) (((uintptr_t) start
) & ~CACHELINE_MASK
);
439 void *end
= start
+ size
;
442 __builtin_ia32_clflush(p
);
448 anv_flush_range(void *start
, size_t size
)
450 __builtin_ia32_mfence();
451 anv_clflush_range(start
, size
);
455 anv_invalidate_range(void *start
, size_t size
)
457 anv_clflush_range(start
, size
);
458 __builtin_ia32_mfence();
461 VkResult
anv_block_pool_init(struct anv_block_pool
*pool
,
462 struct anv_device
*device
, uint32_t block_size
);
463 void anv_block_pool_finish(struct anv_block_pool
*pool
);
464 int32_t anv_block_pool_alloc(struct anv_block_pool
*pool
);
465 int32_t anv_block_pool_alloc_back(struct anv_block_pool
*pool
);
466 void anv_block_pool_free(struct anv_block_pool
*pool
, int32_t offset
);
467 void anv_state_pool_init(struct anv_state_pool
*pool
,
468 struct anv_block_pool
*block_pool
);
469 void anv_state_pool_finish(struct anv_state_pool
*pool
);
470 struct anv_state
anv_state_pool_alloc(struct anv_state_pool
*pool
,
471 size_t state_size
, size_t alignment
);
472 void anv_state_pool_free(struct anv_state_pool
*pool
, struct anv_state state
);
473 void anv_state_stream_init(struct anv_state_stream
*stream
,
474 struct anv_block_pool
*block_pool
);
475 void anv_state_stream_finish(struct anv_state_stream
*stream
);
476 struct anv_state
anv_state_stream_alloc(struct anv_state_stream
*stream
,
477 uint32_t size
, uint32_t alignment
);
480 * Implements a pool of re-usable BOs. The interface is identical to that
481 * of block_pool except that each block is its own BO.
484 struct anv_device
*device
;
489 void anv_bo_pool_init(struct anv_bo_pool
*pool
, struct anv_device
*device
);
490 void anv_bo_pool_finish(struct anv_bo_pool
*pool
);
491 VkResult
anv_bo_pool_alloc(struct anv_bo_pool
*pool
, struct anv_bo
*bo
,
493 void anv_bo_pool_free(struct anv_bo_pool
*pool
, const struct anv_bo
*bo
);
495 struct anv_scratch_bo
{
500 struct anv_scratch_pool
{
501 /* Indexed by Per-Thread Scratch Space number (the hardware value) and stage */
502 struct anv_scratch_bo bos
[16][MESA_SHADER_STAGES
];
505 void anv_scratch_pool_init(struct anv_device
*device
,
506 struct anv_scratch_pool
*pool
);
507 void anv_scratch_pool_finish(struct anv_device
*device
,
508 struct anv_scratch_pool
*pool
);
509 struct anv_bo
*anv_scratch_pool_alloc(struct anv_device
*device
,
510 struct anv_scratch_pool
*pool
,
511 gl_shader_stage stage
,
512 unsigned per_thread_scratch
);
514 struct anv_physical_device
{
515 VK_LOADER_DATA _loader_data
;
517 struct anv_instance
* instance
;
521 struct gen_device_info info
;
522 uint64_t aperture_size
;
523 struct brw_compiler
* compiler
;
524 struct isl_device isl_dev
;
525 int cmd_parser_version
;
528 uint32_t subslice_total
;
530 uint8_t uuid
[VK_UUID_SIZE
];
532 struct wsi_device wsi_device
;
536 struct anv_instance
{
537 VK_LOADER_DATA _loader_data
;
539 VkAllocationCallbacks alloc
;
542 int physicalDeviceCount
;
543 struct anv_physical_device physicalDevice
;
546 VkResult
anv_init_wsi(struct anv_physical_device
*physical_device
);
547 void anv_finish_wsi(struct anv_physical_device
*physical_device
);
550 VK_LOADER_DATA _loader_data
;
552 struct anv_device
* device
;
554 struct anv_state_pool
* pool
;
557 struct anv_pipeline_cache
{
558 struct anv_device
* device
;
559 pthread_mutex_t mutex
;
561 struct hash_table
* cache
;
564 struct anv_pipeline_bind_map
;
566 void anv_pipeline_cache_init(struct anv_pipeline_cache
*cache
,
567 struct anv_device
*device
,
569 void anv_pipeline_cache_finish(struct anv_pipeline_cache
*cache
);
571 struct anv_shader_bin
*
572 anv_pipeline_cache_search(struct anv_pipeline_cache
*cache
,
573 const void *key
, uint32_t key_size
);
574 struct anv_shader_bin
*
575 anv_pipeline_cache_upload_kernel(struct anv_pipeline_cache
*cache
,
576 const void *key_data
, uint32_t key_size
,
577 const void *kernel_data
, uint32_t kernel_size
,
578 const struct brw_stage_prog_data
*prog_data
,
579 uint32_t prog_data_size
,
580 const struct anv_pipeline_bind_map
*bind_map
);
583 VK_LOADER_DATA _loader_data
;
585 VkAllocationCallbacks alloc
;
587 struct anv_instance
* instance
;
589 struct gen_device_info info
;
590 struct isl_device isl_dev
;
593 bool can_chain_batches
;
594 bool robust_buffer_access
;
596 struct anv_bo_pool batch_bo_pool
;
598 struct anv_block_pool dynamic_state_block_pool
;
599 struct anv_state_pool dynamic_state_pool
;
601 struct anv_block_pool instruction_block_pool
;
602 struct anv_state_pool instruction_state_pool
;
604 struct anv_block_pool surface_state_block_pool
;
605 struct anv_state_pool surface_state_pool
;
607 struct anv_bo workaround_bo
;
609 struct anv_pipeline_cache blorp_shader_cache
;
610 struct blorp_context blorp
;
612 struct anv_state border_colors
;
614 struct anv_queue queue
;
616 struct anv_scratch_pool scratch_pool
;
618 uint32_t default_mocs
;
620 pthread_mutex_t mutex
;
621 pthread_cond_t queue_submit
;
625 anv_state_flush(struct anv_device
*device
, struct anv_state state
)
627 if (device
->info
.has_llc
)
630 anv_flush_range(state
.map
, state
.alloc_size
);
633 void anv_device_init_blorp(struct anv_device
*device
);
634 void anv_device_finish_blorp(struct anv_device
*device
);
636 VkResult
anv_device_execbuf(struct anv_device
*device
,
637 struct drm_i915_gem_execbuffer2
*execbuf
,
638 struct anv_bo
**execbuf_bos
);
640 void* anv_gem_mmap(struct anv_device
*device
,
641 uint32_t gem_handle
, uint64_t offset
, uint64_t size
, uint32_t flags
);
642 void anv_gem_munmap(void *p
, uint64_t size
);
643 uint32_t anv_gem_create(struct anv_device
*device
, size_t size
);
644 void anv_gem_close(struct anv_device
*device
, uint32_t gem_handle
);
645 uint32_t anv_gem_userptr(struct anv_device
*device
, void *mem
, size_t size
);
646 int anv_gem_wait(struct anv_device
*device
, uint32_t gem_handle
, int64_t *timeout_ns
);
647 int anv_gem_execbuffer(struct anv_device
*device
,
648 struct drm_i915_gem_execbuffer2
*execbuf
);
649 int anv_gem_set_tiling(struct anv_device
*device
, uint32_t gem_handle
,
650 uint32_t stride
, uint32_t tiling
);
651 int anv_gem_create_context(struct anv_device
*device
);
652 int anv_gem_destroy_context(struct anv_device
*device
, int context
);
653 int anv_gem_get_param(int fd
, uint32_t param
);
654 bool anv_gem_get_bit6_swizzle(int fd
, uint32_t tiling
);
655 int anv_gem_get_aperture(int fd
, uint64_t *size
);
656 int anv_gem_handle_to_fd(struct anv_device
*device
, uint32_t gem_handle
);
657 uint32_t anv_gem_fd_to_handle(struct anv_device
*device
, int fd
);
658 int anv_gem_set_caching(struct anv_device
*device
, uint32_t gem_handle
, uint32_t caching
);
659 int anv_gem_set_domain(struct anv_device
*device
, uint32_t gem_handle
,
660 uint32_t read_domains
, uint32_t write_domain
);
662 VkResult
anv_bo_init_new(struct anv_bo
*bo
, struct anv_device
*device
, uint64_t size
);
664 struct anv_reloc_list
{
667 struct drm_i915_gem_relocation_entry
* relocs
;
668 struct anv_bo
** reloc_bos
;
671 VkResult
anv_reloc_list_init(struct anv_reloc_list
*list
,
672 const VkAllocationCallbacks
*alloc
);
673 void anv_reloc_list_finish(struct anv_reloc_list
*list
,
674 const VkAllocationCallbacks
*alloc
);
676 uint64_t anv_reloc_list_add(struct anv_reloc_list
*list
,
677 const VkAllocationCallbacks
*alloc
,
678 uint32_t offset
, struct anv_bo
*target_bo
,
681 struct anv_batch_bo
{
682 /* Link in the anv_cmd_buffer.owned_batch_bos list */
683 struct list_head link
;
687 /* Bytes actually consumed in this batch BO */
690 struct anv_reloc_list relocs
;
694 const VkAllocationCallbacks
* alloc
;
700 struct anv_reloc_list
* relocs
;
702 /* This callback is called (with the associated user data) in the event
703 * that the batch runs out of space.
705 VkResult (*extend_cb
)(struct anv_batch
*, void *);
709 void *anv_batch_emit_dwords(struct anv_batch
*batch
, int num_dwords
);
710 void anv_batch_emit_batch(struct anv_batch
*batch
, struct anv_batch
*other
);
711 uint64_t anv_batch_emit_reloc(struct anv_batch
*batch
,
712 void *location
, struct anv_bo
*bo
, uint32_t offset
);
713 VkResult
anv_device_submit_simple_batch(struct anv_device
*device
,
714 struct anv_batch
*batch
);
721 static inline uint64_t
722 _anv_combine_address(struct anv_batch
*batch
, void *location
,
723 const struct anv_address address
, uint32_t delta
)
725 if (address
.bo
== NULL
) {
726 return address
.offset
+ delta
;
728 assert(batch
->start
<= location
&& location
< batch
->end
);
730 return anv_batch_emit_reloc(batch
, location
, address
.bo
, address
.offset
+ delta
);
734 #define __gen_address_type struct anv_address
735 #define __gen_user_data struct anv_batch
736 #define __gen_combine_address _anv_combine_address
738 /* Wrapper macros needed to work around preprocessor argument issues. In
739 * particular, arguments don't get pre-evaluated if they are concatenated.
740 * This means that, if you pass GENX(3DSTATE_PS) into the emit macro, the
741 * GENX macro won't get evaluated if the emit macro contains "cmd ## foo".
742 * We can work around this easily enough with these helpers.
744 #define __anv_cmd_length(cmd) cmd ## _length
745 #define __anv_cmd_length_bias(cmd) cmd ## _length_bias
746 #define __anv_cmd_header(cmd) cmd ## _header
747 #define __anv_cmd_pack(cmd) cmd ## _pack
748 #define __anv_reg_num(reg) reg ## _num
750 #define anv_pack_struct(dst, struc, ...) do { \
751 struct struc __template = { \
754 __anv_cmd_pack(struc)(NULL, dst, &__template); \
755 VG(VALGRIND_CHECK_MEM_IS_DEFINED(dst, __anv_cmd_length(struc) * 4)); \
758 #define anv_batch_emitn(batch, n, cmd, ...) ({ \
759 void *__dst = anv_batch_emit_dwords(batch, n); \
760 struct cmd __template = { \
761 __anv_cmd_header(cmd), \
762 .DWordLength = n - __anv_cmd_length_bias(cmd), \
765 __anv_cmd_pack(cmd)(batch, __dst, &__template); \
769 #define anv_batch_emit_merge(batch, dwords0, dwords1) \
773 STATIC_ASSERT(ARRAY_SIZE(dwords0) == ARRAY_SIZE(dwords1)); \
774 dw = anv_batch_emit_dwords((batch), ARRAY_SIZE(dwords0)); \
775 for (uint32_t i = 0; i < ARRAY_SIZE(dwords0); i++) \
776 dw[i] = (dwords0)[i] | (dwords1)[i]; \
777 VG(VALGRIND_CHECK_MEM_IS_DEFINED(dw, ARRAY_SIZE(dwords0) * 4));\
780 #define anv_batch_emit(batch, cmd, name) \
781 for (struct cmd name = { __anv_cmd_header(cmd) }, \
782 *_dst = anv_batch_emit_dwords(batch, __anv_cmd_length(cmd)); \
783 __builtin_expect(_dst != NULL, 1); \
784 ({ __anv_cmd_pack(cmd)(batch, _dst, &name); \
785 VG(VALGRIND_CHECK_MEM_IS_DEFINED(_dst, __anv_cmd_length(cmd) * 4)); \
789 #define GEN7_MOCS (struct GEN7_MEMORY_OBJECT_CONTROL_STATE) { \
790 .GraphicsDataTypeGFDT = 0, \
791 .LLCCacheabilityControlLLCCC = 0, \
792 .L3CacheabilityControlL3CC = 1, \
795 #define GEN75_MOCS (struct GEN75_MEMORY_OBJECT_CONTROL_STATE) { \
796 .LLCeLLCCacheabilityControlLLCCC = 0, \
797 .L3CacheabilityControlL3CC = 1, \
800 #define GEN8_MOCS (struct GEN8_MEMORY_OBJECT_CONTROL_STATE) { \
801 .MemoryTypeLLCeLLCCacheabilityControl = WB, \
802 .TargetCache = L3DefertoPATforLLCeLLCselection, \
806 /* Skylake: MOCS is now an index into an array of 62 different caching
807 * configurations programmed by the kernel.
810 #define GEN9_MOCS (struct GEN9_MEMORY_OBJECT_CONTROL_STATE) { \
811 /* TC=LLC/eLLC, LeCC=WB, LRUM=3, L3CC=WB */ \
812 .IndextoMOCSTables = 2 \
815 #define GEN9_MOCS_PTE { \
816 /* TC=LLC/eLLC, LeCC=WB, LRUM=3, L3CC=WB */ \
817 .IndextoMOCSTables = 1 \
820 struct anv_device_memory
{
823 VkDeviceSize map_size
;
828 * Header for Vertex URB Entry (VUE)
830 struct anv_vue_header
{
832 uint32_t RTAIndex
; /* RenderTargetArrayIndex */
833 uint32_t ViewportIndex
;
837 struct anv_descriptor_set_binding_layout
{
839 /* The type of the descriptors in this binding */
840 VkDescriptorType type
;
843 /* Number of array elements in this binding */
846 /* Index into the flattend descriptor set */
847 uint16_t descriptor_index
;
849 /* Index into the dynamic state array for a dynamic buffer */
850 int16_t dynamic_offset_index
;
852 /* Index into the descriptor set buffer views */
853 int16_t buffer_index
;
856 /* Index into the binding table for the associated surface */
857 int16_t surface_index
;
859 /* Index into the sampler table for the associated sampler */
860 int16_t sampler_index
;
862 /* Index into the image table for the associated image */
864 } stage
[MESA_SHADER_STAGES
];
866 /* Immutable samplers (or NULL if no immutable samplers) */
867 struct anv_sampler
**immutable_samplers
;
870 struct anv_descriptor_set_layout
{
871 /* Number of bindings in this descriptor set */
872 uint16_t binding_count
;
874 /* Total size of the descriptor set with room for all array entries */
877 /* Shader stages affected by this descriptor set */
878 uint16_t shader_stages
;
880 /* Number of buffers in this descriptor set */
881 uint16_t buffer_count
;
883 /* Number of dynamic offsets used by this descriptor set */
884 uint16_t dynamic_offset_count
;
886 /* Bindings in this descriptor set */
887 struct anv_descriptor_set_binding_layout binding
[0];
890 struct anv_descriptor
{
891 VkDescriptorType type
;
895 struct anv_image_view
*image_view
;
896 struct anv_sampler
*sampler
;
898 /* Used to determine whether or not we need the surface state to have
899 * the auxiliary buffer enabled.
901 enum isl_aux_usage aux_usage
;
904 struct anv_buffer_view
*buffer_view
;
908 struct anv_descriptor_set
{
909 const struct anv_descriptor_set_layout
*layout
;
911 uint32_t buffer_count
;
912 struct anv_buffer_view
*buffer_views
;
913 struct anv_descriptor descriptors
[0];
916 struct anv_buffer_view
{
917 enum isl_format format
; /**< VkBufferViewCreateInfo::format */
919 uint32_t offset
; /**< Offset into bo. */
920 uint64_t range
; /**< VkBufferViewCreateInfo::range */
922 struct anv_state surface_state
;
923 struct anv_state storage_surface_state
;
924 struct anv_state writeonly_storage_surface_state
;
926 struct brw_image_param storage_image_param
;
929 struct anv_push_descriptor_set
{
930 struct anv_descriptor_set set
;
932 /* Put this field right behind anv_descriptor_set so it fills up the
933 * descriptors[0] field. */
934 struct anv_descriptor descriptors
[MAX_PUSH_DESCRIPTORS
];
936 struct anv_buffer_view buffer_views
[MAX_PUSH_DESCRIPTORS
];
939 struct anv_descriptor_pool
{
944 struct anv_state_stream surface_state_stream
;
945 void *surface_state_free_list
;
950 enum anv_descriptor_template_entry_type
{
951 ANV_DESCRIPTOR_TEMPLATE_ENTRY_TYPE_IMAGE
,
952 ANV_DESCRIPTOR_TEMPLATE_ENTRY_TYPE_BUFFER
,
953 ANV_DESCRIPTOR_TEMPLATE_ENTRY_TYPE_BUFFER_VIEW
956 struct anv_descriptor_template_entry
{
957 /* The type of descriptor in this entry */
958 VkDescriptorType type
;
960 /* Binding in the descriptor set */
963 /* Offset at which to write into the descriptor set binding */
964 uint32_t array_element
;
966 /* Number of elements to write into the descriptor set binding */
967 uint32_t array_count
;
969 /* Offset into the user provided data */
972 /* Stride between elements into the user provided data */
976 struct anv_descriptor_update_template
{
977 /* The descriptor set this template corresponds to. This value is only
978 * valid if the template was created with the templateType
979 * VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET_KHR.
983 /* Number of entries in this template */
984 uint32_t entry_count
;
986 /* Entries of the template */
987 struct anv_descriptor_template_entry entries
[0];
991 anv_descriptor_set_layout_size(const struct anv_descriptor_set_layout
*layout
);
994 anv_descriptor_set_write_image_view(struct anv_descriptor_set
*set
,
995 const struct gen_device_info
* const devinfo
,
996 const VkDescriptorImageInfo
* const info
,
997 VkDescriptorType type
,
1002 anv_descriptor_set_write_buffer_view(struct anv_descriptor_set
*set
,
1003 VkDescriptorType type
,
1004 struct anv_buffer_view
*buffer_view
,
1009 anv_descriptor_set_write_buffer(struct anv_descriptor_set
*set
,
1010 struct anv_device
*device
,
1011 struct anv_state_stream
*alloc_stream
,
1012 VkDescriptorType type
,
1013 struct anv_buffer
*buffer
,
1016 VkDeviceSize offset
,
1017 VkDeviceSize range
);
1020 anv_descriptor_set_write_template(struct anv_descriptor_set
*set
,
1021 struct anv_device
*device
,
1022 struct anv_state_stream
*alloc_stream
,
1023 const struct anv_descriptor_update_template
*template,
1027 anv_descriptor_set_create(struct anv_device
*device
,
1028 struct anv_descriptor_pool
*pool
,
1029 const struct anv_descriptor_set_layout
*layout
,
1030 struct anv_descriptor_set
**out_set
);
1033 anv_descriptor_set_destroy(struct anv_device
*device
,
1034 struct anv_descriptor_pool
*pool
,
1035 struct anv_descriptor_set
*set
);
1037 #define ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS UINT8_MAX
1039 struct anv_pipeline_binding
{
1040 /* The descriptor set this surface corresponds to. The special value of
1041 * ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS indicates that the offset refers
1042 * to a color attachment and not a regular descriptor.
1046 /* Binding in the descriptor set */
1049 /* Index in the binding */
1052 /* Input attachment index (relative to the subpass) */
1053 uint8_t input_attachment_index
;
1055 /* For a storage image, whether it is write-only */
1059 struct anv_pipeline_layout
{
1061 struct anv_descriptor_set_layout
*layout
;
1062 uint32_t dynamic_offset_start
;
1068 bool has_dynamic_offsets
;
1069 } stage
[MESA_SHADER_STAGES
];
1071 unsigned char sha1
[20];
1075 struct anv_device
* device
;
1078 VkBufferUsageFlags usage
;
1080 /* Set when bound */
1082 VkDeviceSize offset
;
1085 enum anv_cmd_dirty_bits
{
1086 ANV_CMD_DIRTY_DYNAMIC_VIEWPORT
= 1 << 0, /* VK_DYNAMIC_STATE_VIEWPORT */
1087 ANV_CMD_DIRTY_DYNAMIC_SCISSOR
= 1 << 1, /* VK_DYNAMIC_STATE_SCISSOR */
1088 ANV_CMD_DIRTY_DYNAMIC_LINE_WIDTH
= 1 << 2, /* VK_DYNAMIC_STATE_LINE_WIDTH */
1089 ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS
= 1 << 3, /* VK_DYNAMIC_STATE_DEPTH_BIAS */
1090 ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS
= 1 << 4, /* VK_DYNAMIC_STATE_BLEND_CONSTANTS */
1091 ANV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS
= 1 << 5, /* VK_DYNAMIC_STATE_DEPTH_BOUNDS */
1092 ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK
= 1 << 6, /* VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK */
1093 ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK
= 1 << 7, /* VK_DYNAMIC_STATE_STENCIL_WRITE_MASK */
1094 ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE
= 1 << 8, /* VK_DYNAMIC_STATE_STENCIL_REFERENCE */
1095 ANV_CMD_DIRTY_DYNAMIC_ALL
= (1 << 9) - 1,
1096 ANV_CMD_DIRTY_PIPELINE
= 1 << 9,
1097 ANV_CMD_DIRTY_INDEX_BUFFER
= 1 << 10,
1098 ANV_CMD_DIRTY_RENDER_TARGETS
= 1 << 11,
1100 typedef uint32_t anv_cmd_dirty_mask_t
;
1102 enum anv_pipe_bits
{
1103 ANV_PIPE_DEPTH_CACHE_FLUSH_BIT
= (1 << 0),
1104 ANV_PIPE_STALL_AT_SCOREBOARD_BIT
= (1 << 1),
1105 ANV_PIPE_STATE_CACHE_INVALIDATE_BIT
= (1 << 2),
1106 ANV_PIPE_CONSTANT_CACHE_INVALIDATE_BIT
= (1 << 3),
1107 ANV_PIPE_VF_CACHE_INVALIDATE_BIT
= (1 << 4),
1108 ANV_PIPE_DATA_CACHE_FLUSH_BIT
= (1 << 5),
1109 ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT
= (1 << 10),
1110 ANV_PIPE_INSTRUCTION_CACHE_INVALIDATE_BIT
= (1 << 11),
1111 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT
= (1 << 12),
1112 ANV_PIPE_DEPTH_STALL_BIT
= (1 << 13),
1113 ANV_PIPE_CS_STALL_BIT
= (1 << 20),
1115 /* This bit does not exist directly in PIPE_CONTROL. Instead it means that
1116 * a flush has happened but not a CS stall. The next time we do any sort
1117 * of invalidation we need to insert a CS stall at that time. Otherwise,
1118 * we would have to CS stall on every flush which could be bad.
1120 ANV_PIPE_NEEDS_CS_STALL_BIT
= (1 << 21),
1123 #define ANV_PIPE_FLUSH_BITS ( \
1124 ANV_PIPE_DEPTH_CACHE_FLUSH_BIT | \
1125 ANV_PIPE_DATA_CACHE_FLUSH_BIT | \
1126 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT)
1128 #define ANV_PIPE_STALL_BITS ( \
1129 ANV_PIPE_STALL_AT_SCOREBOARD_BIT | \
1130 ANV_PIPE_DEPTH_STALL_BIT | \
1131 ANV_PIPE_CS_STALL_BIT)
1133 #define ANV_PIPE_INVALIDATE_BITS ( \
1134 ANV_PIPE_STATE_CACHE_INVALIDATE_BIT | \
1135 ANV_PIPE_CONSTANT_CACHE_INVALIDATE_BIT | \
1136 ANV_PIPE_VF_CACHE_INVALIDATE_BIT | \
1137 ANV_PIPE_DATA_CACHE_FLUSH_BIT | \
1138 ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT | \
1139 ANV_PIPE_INSTRUCTION_CACHE_INVALIDATE_BIT)
1141 struct anv_vertex_binding
{
1142 struct anv_buffer
* buffer
;
1143 VkDeviceSize offset
;
1146 struct anv_push_constants
{
1147 /* Current allocated size of this push constants data structure.
1148 * Because a decent chunk of it may not be used (images on SKL, for
1149 * instance), we won't actually allocate the entire structure up-front.
1153 /* Push constant data provided by the client through vkPushConstants */
1154 uint8_t client_data
[MAX_PUSH_CONSTANTS_SIZE
];
1156 /* Our hardware only provides zero-based vertex and instance id so, in
1157 * order to satisfy the vulkan requirements, we may have to push one or
1158 * both of these into the shader.
1160 uint32_t base_vertex
;
1161 uint32_t base_instance
;
1163 /* Offsets and ranges for dynamically bound buffers */
1167 } dynamic
[MAX_DYNAMIC_BUFFERS
];
1169 /* Image data for image_load_store on pre-SKL */
1170 struct brw_image_param images
[MAX_IMAGES
];
1173 struct anv_dynamic_state
{
1176 VkViewport viewports
[MAX_VIEWPORTS
];
1181 VkRect2D scissors
[MAX_SCISSORS
];
1192 float blend_constants
[4];
1202 } stencil_compare_mask
;
1207 } stencil_write_mask
;
1212 } stencil_reference
;
1215 extern const struct anv_dynamic_state default_dynamic_state
;
1217 void anv_dynamic_state_copy(struct anv_dynamic_state
*dest
,
1218 const struct anv_dynamic_state
*src
,
1219 uint32_t copy_mask
);
1222 * Attachment state when recording a renderpass instance.
1224 * The clear value is valid only if there exists a pending clear.
1226 struct anv_attachment_state
{
1227 enum isl_aux_usage aux_usage
;
1228 enum isl_aux_usage input_aux_usage
;
1229 struct anv_state color_rt_state
;
1230 struct anv_state input_att_state
;
1232 VkImageLayout current_layout
;
1233 VkImageAspectFlags pending_clear_aspects
;
1235 VkClearValue clear_value
;
1236 bool clear_color_is_zero_one
;
1239 /** State required while building cmd buffer */
1240 struct anv_cmd_state
{
1241 /* PIPELINE_SELECT.PipelineSelection */
1242 uint32_t current_pipeline
;
1243 const struct gen_l3_config
* current_l3_config
;
1245 anv_cmd_dirty_mask_t dirty
;
1246 anv_cmd_dirty_mask_t compute_dirty
;
1247 enum anv_pipe_bits pending_pipe_bits
;
1248 uint32_t num_workgroups_offset
;
1249 struct anv_bo
*num_workgroups_bo
;
1250 VkShaderStageFlags descriptors_dirty
;
1251 VkShaderStageFlags push_constants_dirty
;
1252 uint32_t scratch_size
;
1253 struct anv_pipeline
* pipeline
;
1254 struct anv_pipeline
* compute_pipeline
;
1255 struct anv_framebuffer
* framebuffer
;
1256 struct anv_render_pass
* pass
;
1257 struct anv_subpass
* subpass
;
1258 VkRect2D render_area
;
1259 uint32_t restart_index
;
1260 struct anv_vertex_binding vertex_bindings
[MAX_VBS
];
1261 struct anv_descriptor_set
* descriptors
[MAX_SETS
];
1262 VkShaderStageFlags push_constant_stages
;
1263 struct anv_push_constants
* push_constants
[MESA_SHADER_STAGES
];
1264 struct anv_state binding_tables
[MESA_SHADER_STAGES
];
1265 struct anv_state samplers
[MESA_SHADER_STAGES
];
1266 struct anv_dynamic_state dynamic
;
1269 struct anv_push_descriptor_set push_descriptor
;
1272 * Whether or not the gen8 PMA fix is enabled. We ensure that, at the top
1273 * of any command buffer it is disabled by disabling it in EndCommandBuffer
1274 * and before invoking the secondary in ExecuteCommands.
1276 bool pma_fix_enabled
;
1279 * Whether or not we know for certain that HiZ is enabled for the current
1280 * subpass. If, for whatever reason, we are unsure as to whether HiZ is
1281 * enabled or not, this will be false.
1286 * Array length is anv_cmd_state::pass::attachment_count. Array content is
1287 * valid only when recording a render pass instance.
1289 struct anv_attachment_state
* attachments
;
1292 * Surface states for color render targets. These are stored in a single
1293 * flat array. For depth-stencil attachments, the surface state is simply
1296 struct anv_state render_pass_states
;
1299 * A null surface state of the right size to match the framebuffer. This
1300 * is one of the states in render_pass_states.
1302 struct anv_state null_surface_state
;
1305 struct anv_buffer
* index_buffer
;
1306 uint32_t index_type
; /**< 3DSTATE_INDEX_BUFFER.IndexFormat */
1307 uint32_t index_offset
;
1311 struct anv_cmd_pool
{
1312 VkAllocationCallbacks alloc
;
1313 struct list_head cmd_buffers
;
1316 #define ANV_CMD_BUFFER_BATCH_SIZE 8192
1318 enum anv_cmd_buffer_exec_mode
{
1319 ANV_CMD_BUFFER_EXEC_MODE_PRIMARY
,
1320 ANV_CMD_BUFFER_EXEC_MODE_EMIT
,
1321 ANV_CMD_BUFFER_EXEC_MODE_GROW_AND_EMIT
,
1322 ANV_CMD_BUFFER_EXEC_MODE_CHAIN
,
1323 ANV_CMD_BUFFER_EXEC_MODE_COPY_AND_CHAIN
,
1326 struct anv_cmd_buffer
{
1327 VK_LOADER_DATA _loader_data
;
1329 struct anv_device
* device
;
1331 struct anv_cmd_pool
* pool
;
1332 struct list_head pool_link
;
1334 struct anv_batch batch
;
1336 /* Fields required for the actual chain of anv_batch_bo's.
1338 * These fields are initialized by anv_cmd_buffer_init_batch_bo_chain().
1340 struct list_head batch_bos
;
1341 enum anv_cmd_buffer_exec_mode exec_mode
;
1343 /* A vector of anv_batch_bo pointers for every batch or surface buffer
1344 * referenced by this command buffer
1346 * initialized by anv_cmd_buffer_init_batch_bo_chain()
1348 struct u_vector seen_bbos
;
1350 /* A vector of int32_t's for every block of binding tables.
1352 * initialized by anv_cmd_buffer_init_batch_bo_chain()
1354 struct u_vector bt_blocks
;
1357 struct anv_reloc_list surface_relocs
;
1358 /** Last seen surface state block pool center bo offset */
1359 uint32_t last_ss_pool_center
;
1361 /* Serial for tracking buffer completion */
1364 /* Stream objects for storing temporary data */
1365 struct anv_state_stream surface_state_stream
;
1366 struct anv_state_stream dynamic_state_stream
;
1368 VkCommandBufferUsageFlags usage_flags
;
1369 VkCommandBufferLevel level
;
1371 struct anv_cmd_state state
;
1374 VkResult
anv_cmd_buffer_init_batch_bo_chain(struct anv_cmd_buffer
*cmd_buffer
);
1375 void anv_cmd_buffer_fini_batch_bo_chain(struct anv_cmd_buffer
*cmd_buffer
);
1376 void anv_cmd_buffer_reset_batch_bo_chain(struct anv_cmd_buffer
*cmd_buffer
);
1377 void anv_cmd_buffer_end_batch_buffer(struct anv_cmd_buffer
*cmd_buffer
);
1378 void anv_cmd_buffer_add_secondary(struct anv_cmd_buffer
*primary
,
1379 struct anv_cmd_buffer
*secondary
);
1380 void anv_cmd_buffer_prepare_execbuf(struct anv_cmd_buffer
*cmd_buffer
);
1381 VkResult
anv_cmd_buffer_execbuf(struct anv_device
*device
,
1382 struct anv_cmd_buffer
*cmd_buffer
);
1384 VkResult
anv_cmd_buffer_reset(struct anv_cmd_buffer
*cmd_buffer
);
1387 anv_cmd_buffer_ensure_push_constants_size(struct anv_cmd_buffer
*cmd_buffer
,
1388 gl_shader_stage stage
, uint32_t size
);
1389 #define anv_cmd_buffer_ensure_push_constant_field(cmd_buffer, stage, field) \
1390 anv_cmd_buffer_ensure_push_constants_size(cmd_buffer, stage, \
1391 (offsetof(struct anv_push_constants, field) + \
1392 sizeof(cmd_buffer->state.push_constants[0]->field)))
1394 struct anv_state
anv_cmd_buffer_emit_dynamic(struct anv_cmd_buffer
*cmd_buffer
,
1395 const void *data
, uint32_t size
, uint32_t alignment
);
1396 struct anv_state
anv_cmd_buffer_merge_dynamic(struct anv_cmd_buffer
*cmd_buffer
,
1397 uint32_t *a
, uint32_t *b
,
1398 uint32_t dwords
, uint32_t alignment
);
1401 anv_cmd_buffer_surface_base_address(struct anv_cmd_buffer
*cmd_buffer
);
1403 anv_cmd_buffer_alloc_binding_table(struct anv_cmd_buffer
*cmd_buffer
,
1404 uint32_t entries
, uint32_t *state_offset
);
1406 anv_cmd_buffer_alloc_surface_state(struct anv_cmd_buffer
*cmd_buffer
);
1408 anv_cmd_buffer_alloc_dynamic_state(struct anv_cmd_buffer
*cmd_buffer
,
1409 uint32_t size
, uint32_t alignment
);
1412 anv_cmd_buffer_new_binding_table_block(struct anv_cmd_buffer
*cmd_buffer
);
1414 void gen8_cmd_buffer_emit_viewport(struct anv_cmd_buffer
*cmd_buffer
);
1415 void gen8_cmd_buffer_emit_depth_viewport(struct anv_cmd_buffer
*cmd_buffer
,
1416 bool depth_clamp_enable
);
1417 void gen7_cmd_buffer_emit_scissor(struct anv_cmd_buffer
*cmd_buffer
);
1419 void anv_cmd_buffer_setup_attachments(struct anv_cmd_buffer
*cmd_buffer
,
1420 struct anv_render_pass
*pass
,
1421 struct anv_framebuffer
*framebuffer
,
1422 const VkClearValue
*clear_values
);
1424 void anv_cmd_buffer_emit_state_base_address(struct anv_cmd_buffer
*cmd_buffer
);
1427 anv_cmd_buffer_push_constants(struct anv_cmd_buffer
*cmd_buffer
,
1428 gl_shader_stage stage
);
1430 anv_cmd_buffer_cs_push_constants(struct anv_cmd_buffer
*cmd_buffer
);
1432 void anv_cmd_buffer_clear_subpass(struct anv_cmd_buffer
*cmd_buffer
);
1433 void anv_cmd_buffer_resolve_subpass(struct anv_cmd_buffer
*cmd_buffer
);
1435 const struct anv_image_view
*
1436 anv_cmd_buffer_get_depth_stencil_view(const struct anv_cmd_buffer
*cmd_buffer
);
1439 anv_cmd_buffer_alloc_blorp_binding_table(struct anv_cmd_buffer
*cmd_buffer
,
1440 uint32_t num_entries
,
1441 uint32_t *state_offset
);
1443 void anv_cmd_buffer_dump(struct anv_cmd_buffer
*cmd_buffer
);
1445 enum anv_fence_state
{
1446 /** Indicates that this is a new (or newly reset fence) */
1447 ANV_FENCE_STATE_RESET
,
1449 /** Indicates that this fence has been submitted to the GPU but is still
1450 * (as far as we know) in use by the GPU.
1452 ANV_FENCE_STATE_SUBMITTED
,
1454 ANV_FENCE_STATE_SIGNALED
,
1459 struct drm_i915_gem_execbuffer2 execbuf
;
1460 struct drm_i915_gem_exec_object2 exec2_objects
[1];
1461 enum anv_fence_state state
;
1466 struct anv_state state
;
1469 struct anv_shader_module
{
1470 unsigned char sha1
[20];
1475 void anv_hash_shader(unsigned char *hash
, const void *key
, size_t key_size
,
1476 struct anv_shader_module
*module
,
1477 const char *entrypoint
,
1478 const struct anv_pipeline_layout
*pipeline_layout
,
1479 const VkSpecializationInfo
*spec_info
);
1481 static inline gl_shader_stage
1482 vk_to_mesa_shader_stage(VkShaderStageFlagBits vk_stage
)
1484 assert(__builtin_popcount(vk_stage
) == 1);
1485 return ffs(vk_stage
) - 1;
1488 static inline VkShaderStageFlagBits
1489 mesa_to_vk_shader_stage(gl_shader_stage mesa_stage
)
1491 return (1 << mesa_stage
);
1494 #define ANV_STAGE_MASK ((1 << MESA_SHADER_STAGES) - 1)
1496 #define anv_foreach_stage(stage, stage_bits) \
1497 for (gl_shader_stage stage, \
1498 __tmp = (gl_shader_stage)((stage_bits) & ANV_STAGE_MASK); \
1499 stage = __builtin_ffs(__tmp) - 1, __tmp; \
1500 __tmp &= ~(1 << (stage)))
1502 struct anv_pipeline_bind_map
{
1503 uint32_t surface_count
;
1504 uint32_t sampler_count
;
1505 uint32_t image_count
;
1507 struct anv_pipeline_binding
* surface_to_descriptor
;
1508 struct anv_pipeline_binding
* sampler_to_descriptor
;
1511 struct anv_shader_bin_key
{
1516 struct anv_shader_bin
{
1519 const struct anv_shader_bin_key
*key
;
1521 struct anv_state kernel
;
1522 uint32_t kernel_size
;
1524 const struct brw_stage_prog_data
*prog_data
;
1525 uint32_t prog_data_size
;
1527 struct anv_pipeline_bind_map bind_map
;
1529 /* Prog data follows, then params, then the key, all aligned to 8-bytes */
1532 struct anv_shader_bin
*
1533 anv_shader_bin_create(struct anv_device
*device
,
1534 const void *key
, uint32_t key_size
,
1535 const void *kernel
, uint32_t kernel_size
,
1536 const struct brw_stage_prog_data
*prog_data
,
1537 uint32_t prog_data_size
, const void *prog_data_param
,
1538 const struct anv_pipeline_bind_map
*bind_map
);
1541 anv_shader_bin_destroy(struct anv_device
*device
, struct anv_shader_bin
*shader
);
1544 anv_shader_bin_ref(struct anv_shader_bin
*shader
)
1546 assert(shader
->ref_cnt
>= 1);
1547 __sync_fetch_and_add(&shader
->ref_cnt
, 1);
1551 anv_shader_bin_unref(struct anv_device
*device
, struct anv_shader_bin
*shader
)
1553 assert(shader
->ref_cnt
>= 1);
1554 if (__sync_fetch_and_add(&shader
->ref_cnt
, -1) == 1)
1555 anv_shader_bin_destroy(device
, shader
);
1558 struct anv_pipeline
{
1559 struct anv_device
* device
;
1560 struct anv_batch batch
;
1561 uint32_t batch_data
[512];
1562 struct anv_reloc_list batch_relocs
;
1563 uint32_t dynamic_state_mask
;
1564 struct anv_dynamic_state dynamic_state
;
1566 struct anv_pipeline_layout
* layout
;
1568 bool needs_data_cache
;
1570 struct anv_shader_bin
* shaders
[MESA_SHADER_STAGES
];
1573 const struct gen_l3_config
* l3_config
;
1574 uint32_t total_size
;
1577 VkShaderStageFlags active_stages
;
1578 struct anv_state blend_state
;
1581 uint32_t binding_stride
[MAX_VBS
];
1582 bool instancing_enable
[MAX_VBS
];
1583 bool primitive_restart
;
1586 uint32_t cs_right_mask
;
1589 bool depth_test_enable
;
1590 bool writes_stencil
;
1591 bool stencil_test_enable
;
1592 bool depth_clamp_enable
;
1597 uint32_t depth_stencil_state
[3];
1603 uint32_t wm_depth_stencil
[3];
1607 uint32_t wm_depth_stencil
[4];
1610 uint32_t interface_descriptor_data
[8];
1614 anv_pipeline_has_stage(const struct anv_pipeline
*pipeline
,
1615 gl_shader_stage stage
)
1617 return (pipeline
->active_stages
& mesa_to_vk_shader_stage(stage
)) != 0;
1620 #define ANV_DECL_GET_PROG_DATA_FUNC(prefix, stage) \
1621 static inline const struct brw_##prefix##_prog_data * \
1622 get_##prefix##_prog_data(const struct anv_pipeline *pipeline) \
1624 if (anv_pipeline_has_stage(pipeline, stage)) { \
1625 return (const struct brw_##prefix##_prog_data *) \
1626 pipeline->shaders[stage]->prog_data; \
1632 ANV_DECL_GET_PROG_DATA_FUNC(vs
, MESA_SHADER_VERTEX
)
1633 ANV_DECL_GET_PROG_DATA_FUNC(tcs
, MESA_SHADER_TESS_CTRL
)
1634 ANV_DECL_GET_PROG_DATA_FUNC(tes
, MESA_SHADER_TESS_EVAL
)
1635 ANV_DECL_GET_PROG_DATA_FUNC(gs
, MESA_SHADER_GEOMETRY
)
1636 ANV_DECL_GET_PROG_DATA_FUNC(wm
, MESA_SHADER_FRAGMENT
)
1637 ANV_DECL_GET_PROG_DATA_FUNC(cs
, MESA_SHADER_COMPUTE
)
1639 static inline const struct brw_vue_prog_data
*
1640 anv_pipeline_get_last_vue_prog_data(const struct anv_pipeline
*pipeline
)
1642 if (anv_pipeline_has_stage(pipeline
, MESA_SHADER_GEOMETRY
))
1643 return &get_gs_prog_data(pipeline
)->base
;
1644 else if (anv_pipeline_has_stage(pipeline
, MESA_SHADER_TESS_EVAL
))
1645 return &get_tes_prog_data(pipeline
)->base
;
1647 return &get_vs_prog_data(pipeline
)->base
;
1651 anv_pipeline_init(struct anv_pipeline
*pipeline
, struct anv_device
*device
,
1652 struct anv_pipeline_cache
*cache
,
1653 const VkGraphicsPipelineCreateInfo
*pCreateInfo
,
1654 const VkAllocationCallbacks
*alloc
);
1657 anv_pipeline_compile_cs(struct anv_pipeline
*pipeline
,
1658 struct anv_pipeline_cache
*cache
,
1659 const VkComputePipelineCreateInfo
*info
,
1660 struct anv_shader_module
*module
,
1661 const char *entrypoint
,
1662 const VkSpecializationInfo
*spec_info
);
1665 enum isl_format isl_format
:16;
1666 struct isl_swizzle swizzle
;
1670 anv_get_format(const struct gen_device_info
*devinfo
, VkFormat format
,
1671 VkImageAspectFlags aspect
, VkImageTiling tiling
);
1673 static inline enum isl_format
1674 anv_get_isl_format(const struct gen_device_info
*devinfo
, VkFormat vk_format
,
1675 VkImageAspectFlags aspect
, VkImageTiling tiling
)
1677 return anv_get_format(devinfo
, vk_format
, aspect
, tiling
).isl_format
;
1680 static inline struct isl_swizzle
1681 anv_swizzle_for_render(struct isl_swizzle swizzle
)
1683 /* Sometimes the swizzle will have alpha map to one. We do this to fake
1684 * RGB as RGBA for texturing
1686 assert(swizzle
.a
== ISL_CHANNEL_SELECT_ONE
||
1687 swizzle
.a
== ISL_CHANNEL_SELECT_ALPHA
);
1689 /* But it doesn't matter what we render to that channel */
1690 swizzle
.a
= ISL_CHANNEL_SELECT_ALPHA
;
1696 anv_pipeline_setup_l3_config(struct anv_pipeline
*pipeline
, bool needs_slm
);
1699 * Subsurface of an anv_image.
1701 struct anv_surface
{
1702 /** Valid only if isl_surf::size > 0. */
1703 struct isl_surf isl
;
1706 * Offset from VkImage's base address, as bound by vkBindImageMemory().
1713 /* The original VkFormat provided by the client. This may not match any
1714 * of the actual surface formats.
1717 VkImageAspectFlags aspects
;
1720 uint32_t array_size
;
1721 uint32_t samples
; /**< VkImageCreateInfo::samples */
1722 VkImageUsageFlags usage
; /**< Superset of VkImageCreateInfo::usage. */
1723 VkImageTiling tiling
; /** VkImageCreateInfo::tiling */
1728 /* Set when bound */
1730 VkDeviceSize offset
;
1735 * For each foo, anv_image::foo_surface is valid if and only if
1736 * anv_image::aspects has a foo aspect.
1738 * The hardware requires that the depth buffer and stencil buffer be
1739 * separate surfaces. From Vulkan's perspective, though, depth and stencil
1740 * reside in the same VkImage. To satisfy both the hardware and Vulkan, we
1741 * allocate the depth and stencil buffers as separate surfaces in the same
1745 struct anv_surface color_surface
;
1748 struct anv_surface depth_surface
;
1749 struct anv_surface stencil_surface
;
1754 * For color images, this is the aux usage for this image when not used as a
1757 * For depth/stencil images, this is set to ISL_AUX_USAGE_HIZ if the image
1760 enum isl_aux_usage aux_usage
;
1762 struct anv_surface aux_surface
;
1765 /* Returns true if a HiZ-enabled depth buffer can be sampled from. */
1767 anv_can_sample_with_hiz(const struct gen_device_info
* const devinfo
,
1768 const VkImageAspectFlags aspect_mask
,
1769 const uint32_t samples
)
1771 /* Validate the inputs. */
1772 assert(devinfo
&& aspect_mask
&& samples
);
1773 return devinfo
->gen
>= 8 && (aspect_mask
& VK_IMAGE_ASPECT_DEPTH_BIT
) &&
1778 anv_gen8_hiz_op_resolve(struct anv_cmd_buffer
*cmd_buffer
,
1779 const struct anv_image
*image
,
1780 enum blorp_hiz_op op
);
1783 anv_layout_to_aux_usage(const struct gen_device_info
* const devinfo
,
1784 const struct anv_image
*image
,
1785 const VkImageAspectFlags aspects
,
1786 const VkImageLayout layout
);
1787 static inline uint32_t
1788 anv_get_layerCount(const struct anv_image
*image
,
1789 const VkImageSubresourceRange
*range
)
1791 return range
->layerCount
== VK_REMAINING_ARRAY_LAYERS
?
1792 image
->array_size
- range
->baseArrayLayer
: range
->layerCount
;
1795 static inline uint32_t
1796 anv_get_levelCount(const struct anv_image
*image
,
1797 const VkImageSubresourceRange
*range
)
1799 return range
->levelCount
== VK_REMAINING_MIP_LEVELS
?
1800 image
->levels
- range
->baseMipLevel
: range
->levelCount
;
1804 struct anv_image_view
{
1805 const struct anv_image
*image
; /**< VkImageViewCreateInfo::image */
1807 uint32_t offset
; /**< Offset into bo. */
1809 struct isl_view isl
;
1811 VkImageAspectFlags aspect_mask
;
1813 VkExtent3D extent
; /**< Extent of VkImageViewCreateInfo::baseMipLevel. */
1815 /** RENDER_SURFACE_STATE when using image as a sampler surface. */
1816 struct anv_state sampler_surface_state
;
1819 * RENDER_SURFACE_STATE when using image as a sampler surface with the
1820 * auxiliary buffer disabled.
1822 struct anv_state no_aux_sampler_surface_state
;
1825 * RENDER_SURFACE_STATE when using image as a storage image. Separate states
1826 * for write-only and readable, using the real format for write-only and the
1827 * lowered format for readable.
1829 struct anv_state storage_surface_state
;
1830 struct anv_state writeonly_storage_surface_state
;
1832 struct brw_image_param storage_image_param
;
1835 struct anv_image_create_info
{
1836 const VkImageCreateInfo
*vk_info
;
1838 /** An opt-in bitmask which filters an ISL-mapping of the Vulkan tiling. */
1839 isl_tiling_flags_t isl_tiling_flags
;
1844 VkResult
anv_image_create(VkDevice _device
,
1845 const struct anv_image_create_info
*info
,
1846 const VkAllocationCallbacks
* alloc
,
1849 const struct anv_surface
*
1850 anv_image_get_surface_for_aspect_mask(const struct anv_image
*image
,
1851 VkImageAspectFlags aspect_mask
);
1854 anv_isl_format_for_descriptor_type(VkDescriptorType type
);
1856 static inline struct VkExtent3D
1857 anv_sanitize_image_extent(const VkImageType imageType
,
1858 const struct VkExtent3D imageExtent
)
1860 switch (imageType
) {
1861 case VK_IMAGE_TYPE_1D
:
1862 return (VkExtent3D
) { imageExtent
.width
, 1, 1 };
1863 case VK_IMAGE_TYPE_2D
:
1864 return (VkExtent3D
) { imageExtent
.width
, imageExtent
.height
, 1 };
1865 case VK_IMAGE_TYPE_3D
:
1868 unreachable("invalid image type");
1872 static inline struct VkOffset3D
1873 anv_sanitize_image_offset(const VkImageType imageType
,
1874 const struct VkOffset3D imageOffset
)
1876 switch (imageType
) {
1877 case VK_IMAGE_TYPE_1D
:
1878 return (VkOffset3D
) { imageOffset
.x
, 0, 0 };
1879 case VK_IMAGE_TYPE_2D
:
1880 return (VkOffset3D
) { imageOffset
.x
, imageOffset
.y
, 0 };
1881 case VK_IMAGE_TYPE_3D
:
1884 unreachable("invalid image type");
1889 void anv_fill_buffer_surface_state(struct anv_device
*device
,
1890 struct anv_state state
,
1891 enum isl_format format
,
1892 uint32_t offset
, uint32_t range
,
1895 void anv_image_view_fill_image_param(struct anv_device
*device
,
1896 struct anv_image_view
*view
,
1897 struct brw_image_param
*param
);
1898 void anv_buffer_view_fill_image_param(struct anv_device
*device
,
1899 struct anv_buffer_view
*view
,
1900 struct brw_image_param
*param
);
1902 struct anv_sampler
{
1906 struct anv_framebuffer
{
1911 uint32_t attachment_count
;
1912 struct anv_image_view
* attachments
[0];
1915 struct anv_subpass
{
1916 uint32_t attachment_count
;
1919 * A pointer to all attachment references used in this subpass.
1920 * Only valid if ::attachment_count > 0.
1922 VkAttachmentReference
* attachments
;
1923 uint32_t input_count
;
1924 VkAttachmentReference
* input_attachments
;
1925 uint32_t color_count
;
1926 VkAttachmentReference
* color_attachments
;
1927 VkAttachmentReference
* resolve_attachments
;
1929 VkAttachmentReference depth_stencil_attachment
;
1931 /** Subpass has a depth/stencil self-dependency */
1932 bool has_ds_self_dep
;
1934 /** Subpass has at least one resolve attachment */
1938 enum anv_subpass_usage
{
1939 ANV_SUBPASS_USAGE_DRAW
= (1 << 0),
1940 ANV_SUBPASS_USAGE_INPUT
= (1 << 1),
1941 ANV_SUBPASS_USAGE_RESOLVE_SRC
= (1 << 2),
1942 ANV_SUBPASS_USAGE_RESOLVE_DST
= (1 << 3),
1945 struct anv_render_pass_attachment
{
1946 /* TODO: Consider using VkAttachmentDescription instead of storing each of
1947 * its members individually.
1951 VkImageUsageFlags usage
;
1952 VkAttachmentLoadOp load_op
;
1953 VkAttachmentStoreOp store_op
;
1954 VkAttachmentLoadOp stencil_load_op
;
1955 VkImageLayout initial_layout
;
1956 VkImageLayout final_layout
;
1958 /* An array, indexed by subpass id, of how the attachment will be used. */
1959 enum anv_subpass_usage
* subpass_usage
;
1961 /* The subpass id in which the attachment will be used last. */
1962 uint32_t last_subpass_idx
;
1965 struct anv_render_pass
{
1966 uint32_t attachment_count
;
1967 uint32_t subpass_count
;
1968 VkAttachmentReference
* subpass_attachments
;
1969 enum anv_subpass_usage
* subpass_usages
;
1970 struct anv_render_pass_attachment
* attachments
;
1971 struct anv_subpass subpasses
[0];
1974 struct anv_query_pool_slot
{
1980 struct anv_query_pool
{
1986 void *anv_lookup_entrypoint(const struct gen_device_info
*devinfo
,
1989 void anv_dump_image_to_ppm(struct anv_device
*device
,
1990 struct anv_image
*image
, unsigned miplevel
,
1991 unsigned array_layer
, VkImageAspectFlagBits aspect
,
1992 const char *filename
);
1994 enum anv_dump_action
{
1995 ANV_DUMP_FRAMEBUFFERS_BIT
= 0x1,
1998 void anv_dump_start(struct anv_device
*device
, enum anv_dump_action actions
);
1999 void anv_dump_finish(void);
2001 void anv_dump_add_framebuffer(struct anv_cmd_buffer
*cmd_buffer
,
2002 struct anv_framebuffer
*fb
);
2004 static inline uint32_t
2005 anv_get_subpass_id(const struct anv_cmd_state
* const cmd_state
)
2007 /* This function must be called from within a subpass. */
2008 assert(cmd_state
->pass
&& cmd_state
->subpass
);
2010 const uint32_t subpass_id
= cmd_state
->subpass
- cmd_state
->pass
->subpasses
;
2012 /* The id of this subpass shouldn't exceed the number of subpasses in this
2013 * render pass minus 1.
2015 assert(subpass_id
< cmd_state
->pass
->subpass_count
);
2019 #define ANV_DEFINE_HANDLE_CASTS(__anv_type, __VkType) \
2021 static inline struct __anv_type * \
2022 __anv_type ## _from_handle(__VkType _handle) \
2024 return (struct __anv_type *) _handle; \
2027 static inline __VkType \
2028 __anv_type ## _to_handle(struct __anv_type *_obj) \
2030 return (__VkType) _obj; \
2033 #define ANV_DEFINE_NONDISP_HANDLE_CASTS(__anv_type, __VkType) \
2035 static inline struct __anv_type * \
2036 __anv_type ## _from_handle(__VkType _handle) \
2038 return (struct __anv_type *)(uintptr_t) _handle; \
2041 static inline __VkType \
2042 __anv_type ## _to_handle(struct __anv_type *_obj) \
2044 return (__VkType)(uintptr_t) _obj; \
2047 #define ANV_FROM_HANDLE(__anv_type, __name, __handle) \
2048 struct __anv_type *__name = __anv_type ## _from_handle(__handle)
2050 ANV_DEFINE_HANDLE_CASTS(anv_cmd_buffer
, VkCommandBuffer
)
2051 ANV_DEFINE_HANDLE_CASTS(anv_device
, VkDevice
)
2052 ANV_DEFINE_HANDLE_CASTS(anv_instance
, VkInstance
)
2053 ANV_DEFINE_HANDLE_CASTS(anv_physical_device
, VkPhysicalDevice
)
2054 ANV_DEFINE_HANDLE_CASTS(anv_queue
, VkQueue
)
2056 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_cmd_pool
, VkCommandPool
)
2057 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_buffer
, VkBuffer
)
2058 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_buffer_view
, VkBufferView
)
2059 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_descriptor_pool
, VkDescriptorPool
)
2060 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_descriptor_set
, VkDescriptorSet
)
2061 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_descriptor_set_layout
, VkDescriptorSetLayout
)
2062 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_descriptor_update_template
, VkDescriptorUpdateTemplateKHR
)
2063 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_device_memory
, VkDeviceMemory
)
2064 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_fence
, VkFence
)
2065 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_event
, VkEvent
)
2066 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_framebuffer
, VkFramebuffer
)
2067 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_image
, VkImage
)
2068 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_image_view
, VkImageView
);
2069 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_pipeline_cache
, VkPipelineCache
)
2070 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_pipeline
, VkPipeline
)
2071 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_pipeline_layout
, VkPipelineLayout
)
2072 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_query_pool
, VkQueryPool
)
2073 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_render_pass
, VkRenderPass
)
2074 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_sampler
, VkSampler
)
2075 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_shader_module
, VkShaderModule
)
2077 /* Gen-specific function declarations */
2079 # include "anv_genX.h"
2081 # define genX(x) gen7_##x
2082 # include "anv_genX.h"
2084 # define genX(x) gen75_##x
2085 # include "anv_genX.h"
2087 # define genX(x) gen8_##x
2088 # include "anv_genX.h"
2090 # define genX(x) gen9_##x
2091 # include "anv_genX.h"
2095 #endif /* ANV_PRIVATE_H */