2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
39 #define __gen_validate_value(x) VALGRIND_CHECK_MEM_IS_DEFINED(&(x), sizeof(x))
44 #include "common/gen_device_info.h"
45 #include "blorp/blorp.h"
46 #include "brw_compiler.h"
47 #include "util/macros.h"
48 #include "util/list.h"
49 #include "util/u_vector.h"
50 #include "util/vk_alloc.h"
52 /* Pre-declarations needed for WSI entrypoints */
55 typedef struct xcb_connection_t xcb_connection_t
;
56 typedef uint32_t xcb_visualid_t
;
57 typedef uint32_t xcb_window_t
;
61 #include <vulkan/vulkan.h>
62 #include <vulkan/vulkan_intel.h>
63 #include <vulkan/vk_icd.h>
65 #include "anv_entrypoints.h"
66 #include "brw_context.h"
76 #define MAX_VIEWPORTS 16
77 #define MAX_SCISSORS 16
78 #define MAX_PUSH_CONSTANTS_SIZE 128
79 #define MAX_DYNAMIC_BUFFERS 16
81 #define MAX_SAMPLES_LOG2 4 /* SKL supports 16 samples */
83 #define anv_noreturn __attribute__((__noreturn__))
84 #define anv_printflike(a, b) __attribute__((__format__(__printf__, a, b)))
86 static inline uint32_t
87 align_down_npot_u32(uint32_t v
, uint32_t a
)
92 static inline uint32_t
93 align_u32(uint32_t v
, uint32_t a
)
95 assert(a
!= 0 && a
== (a
& -a
));
96 return (v
+ a
- 1) & ~(a
- 1);
99 static inline uint64_t
100 align_u64(uint64_t v
, uint64_t a
)
102 assert(a
!= 0 && a
== (a
& -a
));
103 return (v
+ a
- 1) & ~(a
- 1);
106 static inline int32_t
107 align_i32(int32_t v
, int32_t a
)
109 assert(a
!= 0 && a
== (a
& -a
));
110 return (v
+ a
- 1) & ~(a
- 1);
113 /** Alignment must be a power of 2. */
115 anv_is_aligned(uintmax_t n
, uintmax_t a
)
117 assert(a
== (a
& -a
));
118 return (n
& (a
- 1)) == 0;
121 static inline uint32_t
122 anv_minify(uint32_t n
, uint32_t levels
)
124 if (unlikely(n
== 0))
127 return MAX2(n
>> levels
, 1);
131 anv_clamp_f(float f
, float min
, float max
)
144 anv_clear_mask(uint32_t *inout_mask
, uint32_t clear_mask
)
146 if (*inout_mask
& clear_mask
) {
147 *inout_mask
&= ~clear_mask
;
154 #define for_each_bit(b, dword) \
155 for (uint32_t __dword = (dword); \
156 (b) = __builtin_ffs(__dword) - 1, __dword; \
157 __dword &= ~(1 << (b)))
159 #define typed_memcpy(dest, src, count) ({ \
160 static_assert(sizeof(*src) == sizeof(*dest), ""); \
161 memcpy((dest), (src), (count) * sizeof(*(src))); \
164 #define zero(x) (memset(&(x), 0, sizeof(x)))
166 /* Define no kernel as 1, since that's an illegal offset for a kernel */
170 VkStructureType sType
;
174 /* Whenever we generate an error, pass it through this function. Useful for
175 * debugging, where we can break on it. Only call at error site, not when
176 * propagating errors. Might be useful to plug in a stack trace here.
179 VkResult
__vk_errorf(VkResult error
, const char *file
, int line
, const char *format
, ...);
182 #define vk_error(error) __vk_errorf(error, __FILE__, __LINE__, NULL);
183 #define vk_errorf(error, format, ...) __vk_errorf(error, __FILE__, __LINE__, format, ## __VA_ARGS__);
185 #define vk_error(error) error
186 #define vk_errorf(error, format, ...) error
189 void __anv_finishme(const char *file
, int line
, const char *format
, ...)
190 anv_printflike(3, 4);
191 void anv_loge(const char *format
, ...) anv_printflike(1, 2);
192 void anv_loge_v(const char *format
, va_list va
);
195 * Print a FINISHME message, including its source location.
197 #define anv_finishme(format, ...) \
198 __anv_finishme(__FILE__, __LINE__, format, ##__VA_ARGS__);
200 /* A non-fatal assert. Useful for debugging. */
202 #define anv_assert(x) ({ \
203 if (unlikely(!(x))) \
204 fprintf(stderr, "%s:%d ASSERT: %s\n", __FILE__, __LINE__, #x); \
207 #define anv_assert(x)
211 * If a block of code is annotated with anv_validate, then the block runs only
215 #define anv_validate if (1)
217 #define anv_validate if (0)
220 void anv_abortf(const char *format
, ...) anv_noreturn
anv_printflike(1, 2);
221 void anv_abortfv(const char *format
, va_list va
) anv_noreturn
;
223 #define stub_return(v) \
225 anv_finishme("stub %s", __func__); \
231 anv_finishme("stub %s", __func__); \
236 * A dynamically growable, circular buffer. Elements are added at head and
237 * removed from tail. head and tail are free-running uint32_t indices and we
238 * only compute the modulo with size when accessing the array. This way,
239 * number of bytes in the queue is always head - tail, even in case of
246 /* Index into the current validation list. This is used by the
247 * validation list building alrogithm to track which buffers are already
248 * in the validation list so that we can ensure uniqueness.
252 /* Last known offset. This value is provided by the kernel when we
253 * execbuf and is used as the presumed offset for the next bunch of
261 /* We need to set the WRITE flag on winsys bos so GEM will know we're
262 * writing to them and synchronize uses on other rings (eg if the display
263 * server uses the blitter ring).
268 /* Represents a lock-free linked list of "free" things. This is used by
269 * both the block pool and the state pools. Unfortunately, in order to
270 * solve the ABA problem, we can't use a single uint32_t head.
272 union anv_free_list
{
276 /* A simple count that is incremented every time the head changes. */
282 #define ANV_FREE_LIST_EMPTY ((union anv_free_list) { { 1, 0 } })
284 struct anv_block_state
{
294 struct anv_block_pool
{
295 struct anv_device
*device
;
299 /* The offset from the start of the bo to the "center" of the block
300 * pool. Pointers to allocated blocks are given by
301 * bo.map + center_bo_offset + offsets.
303 uint32_t center_bo_offset
;
305 /* Current memory map of the block pool. This pointer may or may not
306 * point to the actual beginning of the block pool memory. If
307 * anv_block_pool_alloc_back has ever been called, then this pointer
308 * will point to the "center" position of the buffer and all offsets
309 * (negative or positive) given out by the block pool alloc functions
310 * will be valid relative to this pointer.
312 * In particular, map == bo.map + center_offset
318 * Array of mmaps and gem handles owned by the block pool, reclaimed when
319 * the block pool is destroyed.
321 struct u_vector mmap_cleanups
;
325 union anv_free_list free_list
;
326 struct anv_block_state state
;
328 union anv_free_list back_free_list
;
329 struct anv_block_state back_state
;
332 /* Block pools are backed by a fixed-size 2GB memfd */
333 #define BLOCK_POOL_MEMFD_SIZE (1ull << 32)
335 /* The center of the block pool is also the middle of the memfd. This may
336 * change in the future if we decide differently for some reason.
338 #define BLOCK_POOL_MEMFD_CENTER (BLOCK_POOL_MEMFD_SIZE / 2)
340 static inline uint32_t
341 anv_block_pool_size(struct anv_block_pool
*pool
)
343 return pool
->state
.end
+ pool
->back_state
.end
;
352 struct anv_fixed_size_state_pool
{
354 union anv_free_list free_list
;
355 struct anv_block_state block
;
358 #define ANV_MIN_STATE_SIZE_LOG2 6
359 #define ANV_MAX_STATE_SIZE_LOG2 17
361 #define ANV_STATE_BUCKETS (ANV_MAX_STATE_SIZE_LOG2 - ANV_MIN_STATE_SIZE_LOG2 + 1)
363 struct anv_state_pool
{
364 struct anv_block_pool
*block_pool
;
365 struct anv_fixed_size_state_pool buckets
[ANV_STATE_BUCKETS
];
368 struct anv_state_stream_block
;
370 struct anv_state_stream
{
371 struct anv_block_pool
*block_pool
;
373 /* The current working block */
374 struct anv_state_stream_block
*block
;
376 /* Offset at which the current block starts */
378 /* Offset at which to allocate the next state */
380 /* Offset at which the current block ends */
384 #define CACHELINE_SIZE 64
385 #define CACHELINE_MASK 63
388 anv_clflush_range(void *start
, size_t size
)
390 void *p
= (void *) (((uintptr_t) start
) & ~CACHELINE_MASK
);
391 void *end
= start
+ size
;
393 __builtin_ia32_mfence();
395 __builtin_ia32_clflush(p
);
401 anv_state_clflush(struct anv_state state
)
403 anv_clflush_range(state
.map
, state
.alloc_size
);
406 void anv_block_pool_init(struct anv_block_pool
*pool
,
407 struct anv_device
*device
, uint32_t block_size
);
408 void anv_block_pool_finish(struct anv_block_pool
*pool
);
409 int32_t anv_block_pool_alloc(struct anv_block_pool
*pool
);
410 int32_t anv_block_pool_alloc_back(struct anv_block_pool
*pool
);
411 void anv_block_pool_free(struct anv_block_pool
*pool
, int32_t offset
);
412 void anv_state_pool_init(struct anv_state_pool
*pool
,
413 struct anv_block_pool
*block_pool
);
414 void anv_state_pool_finish(struct anv_state_pool
*pool
);
415 struct anv_state
anv_state_pool_alloc(struct anv_state_pool
*pool
,
416 size_t state_size
, size_t alignment
);
417 void anv_state_pool_free(struct anv_state_pool
*pool
, struct anv_state state
);
418 void anv_state_stream_init(struct anv_state_stream
*stream
,
419 struct anv_block_pool
*block_pool
);
420 void anv_state_stream_finish(struct anv_state_stream
*stream
);
421 struct anv_state
anv_state_stream_alloc(struct anv_state_stream
*stream
,
422 uint32_t size
, uint32_t alignment
);
425 * Implements a pool of re-usable BOs. The interface is identical to that
426 * of block_pool except that each block is its own BO.
429 struct anv_device
*device
;
434 void anv_bo_pool_init(struct anv_bo_pool
*pool
, struct anv_device
*device
);
435 void anv_bo_pool_finish(struct anv_bo_pool
*pool
);
436 VkResult
anv_bo_pool_alloc(struct anv_bo_pool
*pool
, struct anv_bo
*bo
,
438 void anv_bo_pool_free(struct anv_bo_pool
*pool
, const struct anv_bo
*bo
);
440 struct anv_scratch_pool
{
441 /* Indexed by Per-Thread Scratch Space number (the hardware value) and stage */
442 struct anv_bo bos
[16][MESA_SHADER_STAGES
];
445 void anv_scratch_pool_init(struct anv_device
*device
,
446 struct anv_scratch_pool
*pool
);
447 void anv_scratch_pool_finish(struct anv_device
*device
,
448 struct anv_scratch_pool
*pool
);
449 struct anv_bo
*anv_scratch_pool_alloc(struct anv_device
*device
,
450 struct anv_scratch_pool
*pool
,
451 gl_shader_stage stage
,
452 unsigned per_thread_scratch
);
454 extern struct anv_dispatch_table dtable
;
456 struct anv_wsi_interaface
;
458 #define VK_ICD_WSI_PLATFORM_MAX 5
460 struct anv_physical_device
{
461 VK_LOADER_DATA _loader_data
;
463 struct anv_instance
* instance
;
467 struct gen_device_info info
;
468 uint64_t aperture_size
;
469 struct brw_compiler
* compiler
;
470 struct isl_device isl_dev
;
471 int cmd_parser_version
;
474 uint32_t subslice_total
;
476 struct anv_wsi_interface
* wsi
[VK_ICD_WSI_PLATFORM_MAX
];
479 struct anv_instance
{
480 VK_LOADER_DATA _loader_data
;
482 VkAllocationCallbacks alloc
;
485 int physicalDeviceCount
;
486 struct anv_physical_device physicalDevice
;
489 VkResult
anv_init_wsi(struct anv_physical_device
*physical_device
);
490 void anv_finish_wsi(struct anv_physical_device
*physical_device
);
493 VK_LOADER_DATA _loader_data
;
495 struct anv_device
* device
;
497 struct anv_state_pool
* pool
;
500 struct anv_pipeline_cache
{
501 struct anv_device
* device
;
502 pthread_mutex_t mutex
;
504 struct hash_table
* cache
;
507 struct anv_pipeline_bind_map
;
509 void anv_pipeline_cache_init(struct anv_pipeline_cache
*cache
,
510 struct anv_device
*device
,
512 void anv_pipeline_cache_finish(struct anv_pipeline_cache
*cache
);
514 struct anv_shader_bin
*
515 anv_pipeline_cache_search(struct anv_pipeline_cache
*cache
,
516 const void *key
, uint32_t key_size
);
517 struct anv_shader_bin
*
518 anv_pipeline_cache_upload_kernel(struct anv_pipeline_cache
*cache
,
519 const void *key_data
, uint32_t key_size
,
520 const void *kernel_data
, uint32_t kernel_size
,
521 const void *prog_data
, uint32_t prog_data_size
,
522 const struct anv_pipeline_bind_map
*bind_map
);
525 VK_LOADER_DATA _loader_data
;
527 VkAllocationCallbacks alloc
;
529 struct anv_instance
* instance
;
531 struct gen_device_info info
;
532 struct isl_device isl_dev
;
535 bool can_chain_batches
;
536 bool robust_buffer_access
;
538 struct anv_bo_pool batch_bo_pool
;
540 struct anv_block_pool dynamic_state_block_pool
;
541 struct anv_state_pool dynamic_state_pool
;
543 struct anv_block_pool instruction_block_pool
;
544 struct anv_state_pool instruction_state_pool
;
546 struct anv_block_pool surface_state_block_pool
;
547 struct anv_state_pool surface_state_pool
;
549 struct anv_bo workaround_bo
;
551 struct anv_pipeline_cache blorp_shader_cache
;
552 struct blorp_context blorp
;
554 struct anv_state border_colors
;
556 struct anv_queue queue
;
558 struct anv_scratch_pool scratch_pool
;
560 uint32_t default_mocs
;
562 pthread_mutex_t mutex
;
565 void anv_device_get_cache_uuid(void *uuid
);
567 void anv_device_init_blorp(struct anv_device
*device
);
568 void anv_device_finish_blorp(struct anv_device
*device
);
570 void* anv_gem_mmap(struct anv_device
*device
,
571 uint32_t gem_handle
, uint64_t offset
, uint64_t size
, uint32_t flags
);
572 void anv_gem_munmap(void *p
, uint64_t size
);
573 uint32_t anv_gem_create(struct anv_device
*device
, size_t size
);
574 void anv_gem_close(struct anv_device
*device
, uint32_t gem_handle
);
575 uint32_t anv_gem_userptr(struct anv_device
*device
, void *mem
, size_t size
);
576 int anv_gem_wait(struct anv_device
*device
, uint32_t gem_handle
, int64_t *timeout_ns
);
577 int anv_gem_execbuffer(struct anv_device
*device
,
578 struct drm_i915_gem_execbuffer2
*execbuf
);
579 int anv_gem_set_tiling(struct anv_device
*device
, uint32_t gem_handle
,
580 uint32_t stride
, uint32_t tiling
);
581 int anv_gem_create_context(struct anv_device
*device
);
582 int anv_gem_destroy_context(struct anv_device
*device
, int context
);
583 int anv_gem_get_param(int fd
, uint32_t param
);
584 bool anv_gem_get_bit6_swizzle(int fd
, uint32_t tiling
);
585 int anv_gem_get_aperture(int fd
, uint64_t *size
);
586 int anv_gem_handle_to_fd(struct anv_device
*device
, uint32_t gem_handle
);
587 uint32_t anv_gem_fd_to_handle(struct anv_device
*device
, int fd
);
588 int anv_gem_set_caching(struct anv_device
*device
, uint32_t gem_handle
, uint32_t caching
);
589 int anv_gem_set_domain(struct anv_device
*device
, uint32_t gem_handle
,
590 uint32_t read_domains
, uint32_t write_domain
);
592 VkResult
anv_bo_init_new(struct anv_bo
*bo
, struct anv_device
*device
, uint64_t size
);
594 struct anv_reloc_list
{
597 struct drm_i915_gem_relocation_entry
* relocs
;
598 struct anv_bo
** reloc_bos
;
601 VkResult
anv_reloc_list_init(struct anv_reloc_list
*list
,
602 const VkAllocationCallbacks
*alloc
);
603 void anv_reloc_list_finish(struct anv_reloc_list
*list
,
604 const VkAllocationCallbacks
*alloc
);
606 uint64_t anv_reloc_list_add(struct anv_reloc_list
*list
,
607 const VkAllocationCallbacks
*alloc
,
608 uint32_t offset
, struct anv_bo
*target_bo
,
611 struct anv_batch_bo
{
612 /* Link in the anv_cmd_buffer.owned_batch_bos list */
613 struct list_head link
;
617 /* Bytes actually consumed in this batch BO */
620 /* Last seen surface state block pool bo offset */
621 uint32_t last_ss_pool_bo_offset
;
623 struct anv_reloc_list relocs
;
627 const VkAllocationCallbacks
* alloc
;
633 struct anv_reloc_list
* relocs
;
635 /* This callback is called (with the associated user data) in the event
636 * that the batch runs out of space.
638 VkResult (*extend_cb
)(struct anv_batch
*, void *);
642 void *anv_batch_emit_dwords(struct anv_batch
*batch
, int num_dwords
);
643 void anv_batch_emit_batch(struct anv_batch
*batch
, struct anv_batch
*other
);
644 uint64_t anv_batch_emit_reloc(struct anv_batch
*batch
,
645 void *location
, struct anv_bo
*bo
, uint32_t offset
);
646 VkResult
anv_device_submit_simple_batch(struct anv_device
*device
,
647 struct anv_batch
*batch
);
654 static inline uint64_t
655 _anv_combine_address(struct anv_batch
*batch
, void *location
,
656 const struct anv_address address
, uint32_t delta
)
658 if (address
.bo
== NULL
) {
659 return address
.offset
+ delta
;
661 assert(batch
->start
<= location
&& location
< batch
->end
);
663 return anv_batch_emit_reloc(batch
, location
, address
.bo
, address
.offset
+ delta
);
667 #define __gen_address_type struct anv_address
668 #define __gen_user_data struct anv_batch
669 #define __gen_combine_address _anv_combine_address
671 /* Wrapper macros needed to work around preprocessor argument issues. In
672 * particular, arguments don't get pre-evaluated if they are concatenated.
673 * This means that, if you pass GENX(3DSTATE_PS) into the emit macro, the
674 * GENX macro won't get evaluated if the emit macro contains "cmd ## foo".
675 * We can work around this easily enough with these helpers.
677 #define __anv_cmd_length(cmd) cmd ## _length
678 #define __anv_cmd_length_bias(cmd) cmd ## _length_bias
679 #define __anv_cmd_header(cmd) cmd ## _header
680 #define __anv_cmd_pack(cmd) cmd ## _pack
681 #define __anv_reg_num(reg) reg ## _num
683 #define anv_pack_struct(dst, struc, ...) do { \
684 struct struc __template = { \
687 __anv_cmd_pack(struc)(NULL, dst, &__template); \
688 VG(VALGRIND_CHECK_MEM_IS_DEFINED(dst, __anv_cmd_length(struc) * 4)); \
691 #define anv_batch_emitn(batch, n, cmd, ...) ({ \
692 void *__dst = anv_batch_emit_dwords(batch, n); \
693 struct cmd __template = { \
694 __anv_cmd_header(cmd), \
695 .DWordLength = n - __anv_cmd_length_bias(cmd), \
698 __anv_cmd_pack(cmd)(batch, __dst, &__template); \
702 #define anv_batch_emit_merge(batch, dwords0, dwords1) \
706 static_assert(ARRAY_SIZE(dwords0) == ARRAY_SIZE(dwords1), "mismatch merge"); \
707 dw = anv_batch_emit_dwords((batch), ARRAY_SIZE(dwords0)); \
708 for (uint32_t i = 0; i < ARRAY_SIZE(dwords0); i++) \
709 dw[i] = (dwords0)[i] | (dwords1)[i]; \
710 VG(VALGRIND_CHECK_MEM_IS_DEFINED(dw, ARRAY_SIZE(dwords0) * 4));\
713 #define anv_batch_emit(batch, cmd, name) \
714 for (struct cmd name = { __anv_cmd_header(cmd) }, \
715 *_dst = anv_batch_emit_dwords(batch, __anv_cmd_length(cmd)); \
716 __builtin_expect(_dst != NULL, 1); \
717 ({ __anv_cmd_pack(cmd)(batch, _dst, &name); \
718 VG(VALGRIND_CHECK_MEM_IS_DEFINED(_dst, __anv_cmd_length(cmd) * 4)); \
722 #define anv_state_pool_emit(pool, cmd, align, ...) ({ \
723 const uint32_t __size = __anv_cmd_length(cmd) * 4; \
724 struct anv_state __state = \
725 anv_state_pool_alloc((pool), __size, align); \
726 struct cmd __template = { \
729 __anv_cmd_pack(cmd)(NULL, __state.map, &__template); \
730 VG(VALGRIND_CHECK_MEM_IS_DEFINED(__state.map, __anv_cmd_length(cmd) * 4)); \
731 if (!(pool)->block_pool->device->info.has_llc) \
732 anv_state_clflush(__state); \
736 #define GEN7_MOCS (struct GEN7_MEMORY_OBJECT_CONTROL_STATE) { \
737 .GraphicsDataTypeGFDT = 0, \
738 .LLCCacheabilityControlLLCCC = 0, \
739 .L3CacheabilityControlL3CC = 1, \
742 #define GEN75_MOCS (struct GEN75_MEMORY_OBJECT_CONTROL_STATE) { \
743 .LLCeLLCCacheabilityControlLLCCC = 0, \
744 .L3CacheabilityControlL3CC = 1, \
747 #define GEN8_MOCS (struct GEN8_MEMORY_OBJECT_CONTROL_STATE) { \
748 .MemoryTypeLLCeLLCCacheabilityControl = WB, \
749 .TargetCache = L3DefertoPATforLLCeLLCselection, \
753 /* Skylake: MOCS is now an index into an array of 62 different caching
754 * configurations programmed by the kernel.
757 #define GEN9_MOCS (struct GEN9_MEMORY_OBJECT_CONTROL_STATE) { \
758 /* TC=LLC/eLLC, LeCC=WB, LRUM=3, L3CC=WB */ \
759 .IndextoMOCSTables = 2 \
762 #define GEN9_MOCS_PTE { \
763 /* TC=LLC/eLLC, LeCC=WB, LRUM=3, L3CC=WB */ \
764 .IndextoMOCSTables = 1 \
767 struct anv_device_memory
{
770 VkDeviceSize map_size
;
775 * Header for Vertex URB Entry (VUE)
777 struct anv_vue_header
{
779 uint32_t RTAIndex
; /* RenderTargetArrayIndex */
780 uint32_t ViewportIndex
;
784 struct anv_descriptor_set_binding_layout
{
786 /* The type of the descriptors in this binding */
787 VkDescriptorType type
;
790 /* Number of array elements in this binding */
793 /* Index into the flattend descriptor set */
794 uint16_t descriptor_index
;
796 /* Index into the dynamic state array for a dynamic buffer */
797 int16_t dynamic_offset_index
;
799 /* Index into the descriptor set buffer views */
800 int16_t buffer_index
;
803 /* Index into the binding table for the associated surface */
804 int16_t surface_index
;
806 /* Index into the sampler table for the associated sampler */
807 int16_t sampler_index
;
809 /* Index into the image table for the associated image */
811 } stage
[MESA_SHADER_STAGES
];
813 /* Immutable samplers (or NULL if no immutable samplers) */
814 struct anv_sampler
**immutable_samplers
;
817 struct anv_descriptor_set_layout
{
818 /* Number of bindings in this descriptor set */
819 uint16_t binding_count
;
821 /* Total size of the descriptor set with room for all array entries */
824 /* Shader stages affected by this descriptor set */
825 uint16_t shader_stages
;
827 /* Number of buffers in this descriptor set */
828 uint16_t buffer_count
;
830 /* Number of dynamic offsets used by this descriptor set */
831 uint16_t dynamic_offset_count
;
833 /* Bindings in this descriptor set */
834 struct anv_descriptor_set_binding_layout binding
[0];
837 struct anv_descriptor
{
838 VkDescriptorType type
;
842 struct anv_image_view
*image_view
;
843 struct anv_sampler
*sampler
;
846 struct anv_buffer_view
*buffer_view
;
850 struct anv_descriptor_set
{
851 const struct anv_descriptor_set_layout
*layout
;
853 uint32_t buffer_count
;
854 struct anv_buffer_view
*buffer_views
;
855 struct anv_descriptor descriptors
[0];
858 struct anv_descriptor_pool
{
863 struct anv_state_stream surface_state_stream
;
864 void *surface_state_free_list
;
870 anv_descriptor_set_create(struct anv_device
*device
,
871 struct anv_descriptor_pool
*pool
,
872 const struct anv_descriptor_set_layout
*layout
,
873 struct anv_descriptor_set
**out_set
);
876 anv_descriptor_set_destroy(struct anv_device
*device
,
877 struct anv_descriptor_pool
*pool
,
878 struct anv_descriptor_set
*set
);
880 #define ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS UINT8_MAX
882 struct anv_pipeline_binding
{
883 /* The descriptor set this surface corresponds to. The special value of
884 * ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS indicates that the offset refers
885 * to a color attachment and not a regular descriptor.
889 /* Binding in the descriptor set */
892 /* Index in the binding */
896 struct anv_pipeline_layout
{
898 struct anv_descriptor_set_layout
*layout
;
899 uint32_t dynamic_offset_start
;
905 bool has_dynamic_offsets
;
906 } stage
[MESA_SHADER_STAGES
];
908 unsigned char sha1
[20];
912 struct anv_device
* device
;
915 VkBufferUsageFlags usage
;
922 enum anv_cmd_dirty_bits
{
923 ANV_CMD_DIRTY_DYNAMIC_VIEWPORT
= 1 << 0, /* VK_DYNAMIC_STATE_VIEWPORT */
924 ANV_CMD_DIRTY_DYNAMIC_SCISSOR
= 1 << 1, /* VK_DYNAMIC_STATE_SCISSOR */
925 ANV_CMD_DIRTY_DYNAMIC_LINE_WIDTH
= 1 << 2, /* VK_DYNAMIC_STATE_LINE_WIDTH */
926 ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS
= 1 << 3, /* VK_DYNAMIC_STATE_DEPTH_BIAS */
927 ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS
= 1 << 4, /* VK_DYNAMIC_STATE_BLEND_CONSTANTS */
928 ANV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS
= 1 << 5, /* VK_DYNAMIC_STATE_DEPTH_BOUNDS */
929 ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK
= 1 << 6, /* VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK */
930 ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK
= 1 << 7, /* VK_DYNAMIC_STATE_STENCIL_WRITE_MASK */
931 ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE
= 1 << 8, /* VK_DYNAMIC_STATE_STENCIL_REFERENCE */
932 ANV_CMD_DIRTY_DYNAMIC_ALL
= (1 << 9) - 1,
933 ANV_CMD_DIRTY_PIPELINE
= 1 << 9,
934 ANV_CMD_DIRTY_INDEX_BUFFER
= 1 << 10,
935 ANV_CMD_DIRTY_RENDER_TARGETS
= 1 << 11,
937 typedef uint32_t anv_cmd_dirty_mask_t
;
940 ANV_PIPE_DEPTH_CACHE_FLUSH_BIT
= (1 << 0),
941 ANV_PIPE_STALL_AT_SCOREBOARD_BIT
= (1 << 1),
942 ANV_PIPE_STATE_CACHE_INVALIDATE_BIT
= (1 << 2),
943 ANV_PIPE_CONSTANT_CACHE_INVALIDATE_BIT
= (1 << 3),
944 ANV_PIPE_VF_CACHE_INVALIDATE_BIT
= (1 << 4),
945 ANV_PIPE_DATA_CACHE_FLUSH_BIT
= (1 << 5),
946 ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT
= (1 << 10),
947 ANV_PIPE_INSTRUCTION_CACHE_INVALIDATE_BIT
= (1 << 11),
948 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT
= (1 << 12),
949 ANV_PIPE_DEPTH_STALL_BIT
= (1 << 13),
950 ANV_PIPE_CS_STALL_BIT
= (1 << 20),
952 /* This bit does not exist directly in PIPE_CONTROL. Instead it means that
953 * a flush has happened but not a CS stall. The next time we do any sort
954 * of invalidation we need to insert a CS stall at that time. Otherwise,
955 * we would have to CS stall on every flush which could be bad.
957 ANV_PIPE_NEEDS_CS_STALL_BIT
= (1 << 21),
960 #define ANV_PIPE_FLUSH_BITS ( \
961 ANV_PIPE_DEPTH_CACHE_FLUSH_BIT | \
962 ANV_PIPE_DATA_CACHE_FLUSH_BIT | \
963 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT)
965 #define ANV_PIPE_STALL_BITS ( \
966 ANV_PIPE_STALL_AT_SCOREBOARD_BIT | \
967 ANV_PIPE_DEPTH_STALL_BIT | \
968 ANV_PIPE_CS_STALL_BIT)
970 #define ANV_PIPE_INVALIDATE_BITS ( \
971 ANV_PIPE_STATE_CACHE_INVALIDATE_BIT | \
972 ANV_PIPE_CONSTANT_CACHE_INVALIDATE_BIT | \
973 ANV_PIPE_VF_CACHE_INVALIDATE_BIT | \
974 ANV_PIPE_DATA_CACHE_FLUSH_BIT | \
975 ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT | \
976 ANV_PIPE_INSTRUCTION_CACHE_INVALIDATE_BIT)
978 struct anv_vertex_binding
{
979 struct anv_buffer
* buffer
;
983 struct anv_push_constants
{
984 /* Current allocated size of this push constants data structure.
985 * Because a decent chunk of it may not be used (images on SKL, for
986 * instance), we won't actually allocate the entire structure up-front.
990 /* Push constant data provided by the client through vkPushConstants */
991 uint8_t client_data
[MAX_PUSH_CONSTANTS_SIZE
];
993 /* Our hardware only provides zero-based vertex and instance id so, in
994 * order to satisfy the vulkan requirements, we may have to push one or
995 * both of these into the shader.
997 uint32_t base_vertex
;
998 uint32_t base_instance
;
1000 /* Offsets and ranges for dynamically bound buffers */
1004 } dynamic
[MAX_DYNAMIC_BUFFERS
];
1006 /* Image data for image_load_store on pre-SKL */
1007 struct brw_image_param images
[MAX_IMAGES
];
1010 struct anv_dynamic_state
{
1013 VkViewport viewports
[MAX_VIEWPORTS
];
1018 VkRect2D scissors
[MAX_SCISSORS
];
1029 float blend_constants
[4];
1039 } stencil_compare_mask
;
1044 } stencil_write_mask
;
1049 } stencil_reference
;
1052 extern const struct anv_dynamic_state default_dynamic_state
;
1054 void anv_dynamic_state_copy(struct anv_dynamic_state
*dest
,
1055 const struct anv_dynamic_state
*src
,
1056 uint32_t copy_mask
);
1059 * Attachment state when recording a renderpass instance.
1061 * The clear value is valid only if there exists a pending clear.
1063 struct anv_attachment_state
{
1064 VkImageAspectFlags pending_clear_aspects
;
1065 VkClearValue clear_value
;
1068 /** State required while building cmd buffer */
1069 struct anv_cmd_state
{
1070 /* PIPELINE_SELECT.PipelineSelection */
1071 uint32_t current_pipeline
;
1072 const struct gen_l3_config
* current_l3_config
;
1074 anv_cmd_dirty_mask_t dirty
;
1075 anv_cmd_dirty_mask_t compute_dirty
;
1076 enum anv_pipe_bits pending_pipe_bits
;
1077 uint32_t num_workgroups_offset
;
1078 struct anv_bo
*num_workgroups_bo
;
1079 VkShaderStageFlags descriptors_dirty
;
1080 VkShaderStageFlags push_constants_dirty
;
1081 uint32_t scratch_size
;
1082 struct anv_pipeline
* pipeline
;
1083 struct anv_pipeline
* compute_pipeline
;
1084 struct anv_framebuffer
* framebuffer
;
1085 struct anv_render_pass
* pass
;
1086 struct anv_subpass
* subpass
;
1087 VkRect2D render_area
;
1088 uint32_t restart_index
;
1089 struct anv_vertex_binding vertex_bindings
[MAX_VBS
];
1090 struct anv_descriptor_set
* descriptors
[MAX_SETS
];
1091 VkShaderStageFlags push_constant_stages
;
1092 struct anv_push_constants
* push_constants
[MESA_SHADER_STAGES
];
1093 struct anv_state binding_tables
[MESA_SHADER_STAGES
];
1094 struct anv_state samplers
[MESA_SHADER_STAGES
];
1095 struct anv_dynamic_state dynamic
;
1099 * Array length is anv_cmd_state::pass::attachment_count. Array content is
1100 * valid only when recording a render pass instance.
1102 struct anv_attachment_state
* attachments
;
1105 struct anv_buffer
* index_buffer
;
1106 uint32_t index_type
; /**< 3DSTATE_INDEX_BUFFER.IndexFormat */
1107 uint32_t index_offset
;
1111 struct anv_cmd_pool
{
1112 VkAllocationCallbacks alloc
;
1113 struct list_head cmd_buffers
;
1116 #define ANV_CMD_BUFFER_BATCH_SIZE 8192
1118 enum anv_cmd_buffer_exec_mode
{
1119 ANV_CMD_BUFFER_EXEC_MODE_PRIMARY
,
1120 ANV_CMD_BUFFER_EXEC_MODE_EMIT
,
1121 ANV_CMD_BUFFER_EXEC_MODE_GROW_AND_EMIT
,
1122 ANV_CMD_BUFFER_EXEC_MODE_CHAIN
,
1123 ANV_CMD_BUFFER_EXEC_MODE_COPY_AND_CHAIN
,
1126 struct anv_cmd_buffer
{
1127 VK_LOADER_DATA _loader_data
;
1129 struct anv_device
* device
;
1131 struct anv_cmd_pool
* pool
;
1132 struct list_head pool_link
;
1134 struct anv_batch batch
;
1136 /* Fields required for the actual chain of anv_batch_bo's.
1138 * These fields are initialized by anv_cmd_buffer_init_batch_bo_chain().
1140 struct list_head batch_bos
;
1141 enum anv_cmd_buffer_exec_mode exec_mode
;
1143 /* A vector of anv_batch_bo pointers for every batch or surface buffer
1144 * referenced by this command buffer
1146 * initialized by anv_cmd_buffer_init_batch_bo_chain()
1148 struct u_vector seen_bbos
;
1150 /* A vector of int32_t's for every block of binding tables.
1152 * initialized by anv_cmd_buffer_init_batch_bo_chain()
1154 struct u_vector bt_blocks
;
1156 struct anv_reloc_list surface_relocs
;
1158 /* Information needed for execbuf
1160 * These fields are generated by anv_cmd_buffer_prepare_execbuf().
1163 struct drm_i915_gem_execbuffer2 execbuf
;
1165 struct drm_i915_gem_exec_object2
* objects
;
1167 struct anv_bo
** bos
;
1169 /* Allocated length of the 'objects' and 'bos' arrays */
1170 uint32_t array_length
;
1175 /* Serial for tracking buffer completion */
1178 /* Stream objects for storing temporary data */
1179 struct anv_state_stream surface_state_stream
;
1180 struct anv_state_stream dynamic_state_stream
;
1182 VkCommandBufferUsageFlags usage_flags
;
1183 VkCommandBufferLevel level
;
1185 struct anv_cmd_state state
;
1188 VkResult
anv_cmd_buffer_init_batch_bo_chain(struct anv_cmd_buffer
*cmd_buffer
);
1189 void anv_cmd_buffer_fini_batch_bo_chain(struct anv_cmd_buffer
*cmd_buffer
);
1190 void anv_cmd_buffer_reset_batch_bo_chain(struct anv_cmd_buffer
*cmd_buffer
);
1191 void anv_cmd_buffer_end_batch_buffer(struct anv_cmd_buffer
*cmd_buffer
);
1192 void anv_cmd_buffer_add_secondary(struct anv_cmd_buffer
*primary
,
1193 struct anv_cmd_buffer
*secondary
);
1194 void anv_cmd_buffer_prepare_execbuf(struct anv_cmd_buffer
*cmd_buffer
);
1196 VkResult
anv_cmd_buffer_reset(struct anv_cmd_buffer
*cmd_buffer
);
1199 anv_cmd_buffer_ensure_push_constants_size(struct anv_cmd_buffer
*cmd_buffer
,
1200 gl_shader_stage stage
, uint32_t size
);
1201 #define anv_cmd_buffer_ensure_push_constant_field(cmd_buffer, stage, field) \
1202 anv_cmd_buffer_ensure_push_constants_size(cmd_buffer, stage, \
1203 (offsetof(struct anv_push_constants, field) + \
1204 sizeof(cmd_buffer->state.push_constants[0]->field)))
1206 struct anv_state
anv_cmd_buffer_emit_dynamic(struct anv_cmd_buffer
*cmd_buffer
,
1207 const void *data
, uint32_t size
, uint32_t alignment
);
1208 struct anv_state
anv_cmd_buffer_merge_dynamic(struct anv_cmd_buffer
*cmd_buffer
,
1209 uint32_t *a
, uint32_t *b
,
1210 uint32_t dwords
, uint32_t alignment
);
1213 anv_cmd_buffer_surface_base_address(struct anv_cmd_buffer
*cmd_buffer
);
1215 anv_cmd_buffer_alloc_binding_table(struct anv_cmd_buffer
*cmd_buffer
,
1216 uint32_t entries
, uint32_t *state_offset
);
1218 anv_cmd_buffer_alloc_surface_state(struct anv_cmd_buffer
*cmd_buffer
);
1220 anv_cmd_buffer_alloc_dynamic_state(struct anv_cmd_buffer
*cmd_buffer
,
1221 uint32_t size
, uint32_t alignment
);
1224 anv_cmd_buffer_new_binding_table_block(struct anv_cmd_buffer
*cmd_buffer
);
1226 void gen8_cmd_buffer_emit_viewport(struct anv_cmd_buffer
*cmd_buffer
);
1227 void gen8_cmd_buffer_emit_depth_viewport(struct anv_cmd_buffer
*cmd_buffer
,
1228 bool depth_clamp_enable
);
1229 void gen7_cmd_buffer_emit_scissor(struct anv_cmd_buffer
*cmd_buffer
);
1231 void anv_cmd_state_setup_attachments(struct anv_cmd_buffer
*cmd_buffer
,
1232 const VkRenderPassBeginInfo
*info
);
1235 anv_cmd_buffer_push_constants(struct anv_cmd_buffer
*cmd_buffer
,
1236 gl_shader_stage stage
);
1238 anv_cmd_buffer_cs_push_constants(struct anv_cmd_buffer
*cmd_buffer
);
1240 void anv_cmd_buffer_clear_subpass(struct anv_cmd_buffer
*cmd_buffer
);
1241 void anv_cmd_buffer_resolve_subpass(struct anv_cmd_buffer
*cmd_buffer
);
1243 const struct anv_image_view
*
1244 anv_cmd_buffer_get_depth_stencil_view(const struct anv_cmd_buffer
*cmd_buffer
);
1246 void anv_cmd_buffer_dump(struct anv_cmd_buffer
*cmd_buffer
);
1250 struct drm_i915_gem_execbuffer2 execbuf
;
1251 struct drm_i915_gem_exec_object2 exec2_objects
[1];
1257 struct anv_state state
;
1260 struct anv_shader_module
{
1261 unsigned char sha1
[20];
1266 void anv_hash_shader(unsigned char *hash
, const void *key
, size_t key_size
,
1267 struct anv_shader_module
*module
,
1268 const char *entrypoint
,
1269 const struct anv_pipeline_layout
*pipeline_layout
,
1270 const VkSpecializationInfo
*spec_info
);
1272 static inline gl_shader_stage
1273 vk_to_mesa_shader_stage(VkShaderStageFlagBits vk_stage
)
1275 assert(__builtin_popcount(vk_stage
) == 1);
1276 return ffs(vk_stage
) - 1;
1279 static inline VkShaderStageFlagBits
1280 mesa_to_vk_shader_stage(gl_shader_stage mesa_stage
)
1282 return (1 << mesa_stage
);
1285 #define ANV_STAGE_MASK ((1 << MESA_SHADER_STAGES) - 1)
1287 #define anv_foreach_stage(stage, stage_bits) \
1288 for (gl_shader_stage stage, \
1289 __tmp = (gl_shader_stage)((stage_bits) & ANV_STAGE_MASK); \
1290 stage = __builtin_ffs(__tmp) - 1, __tmp; \
1291 __tmp &= ~(1 << (stage)))
1293 struct anv_pipeline_bind_map
{
1294 uint32_t surface_count
;
1295 uint32_t sampler_count
;
1296 uint32_t image_count
;
1298 struct anv_pipeline_binding
* surface_to_descriptor
;
1299 struct anv_pipeline_binding
* sampler_to_descriptor
;
1302 struct anv_shader_bin
{
1305 struct anv_state kernel
;
1306 uint32_t kernel_size
;
1308 struct anv_pipeline_bind_map bind_map
;
1310 uint32_t prog_data_size
;
1312 /* Prog data follows, then the key, both aligned to 8-bytes */
1315 struct anv_shader_bin
*
1316 anv_shader_bin_create(struct anv_device
*device
,
1317 const void *key
, uint32_t key_size
,
1318 const void *kernel
, uint32_t kernel_size
,
1319 const void *prog_data
, uint32_t prog_data_size
,
1320 const struct anv_pipeline_bind_map
*bind_map
);
1323 anv_shader_bin_destroy(struct anv_device
*device
, struct anv_shader_bin
*shader
);
1326 anv_shader_bin_ref(struct anv_shader_bin
*shader
)
1328 assert(shader
->ref_cnt
>= 1);
1329 __sync_fetch_and_add(&shader
->ref_cnt
, 1);
1333 anv_shader_bin_unref(struct anv_device
*device
, struct anv_shader_bin
*shader
)
1335 assert(shader
->ref_cnt
>= 1);
1336 if (__sync_fetch_and_add(&shader
->ref_cnt
, -1) == 1)
1337 anv_shader_bin_destroy(device
, shader
);
1340 static inline const struct brw_stage_prog_data
*
1341 anv_shader_bin_get_prog_data(const struct anv_shader_bin
*shader
)
1343 const void *data
= shader
;
1344 data
+= align_u32(sizeof(struct anv_shader_bin
), 8);
1348 struct anv_pipeline
{
1349 struct anv_device
* device
;
1350 struct anv_batch batch
;
1351 uint32_t batch_data
[512];
1352 struct anv_reloc_list batch_relocs
;
1353 uint32_t dynamic_state_mask
;
1354 struct anv_dynamic_state dynamic_state
;
1356 struct anv_pipeline_layout
* layout
;
1358 bool needs_data_cache
;
1360 struct anv_shader_bin
* shaders
[MESA_SHADER_STAGES
];
1363 const struct gen_l3_config
* l3_config
;
1364 uint32_t total_size
;
1367 VkShaderStageFlags active_stages
;
1368 struct anv_state blend_state
;
1376 uint32_t binding_stride
[MAX_VBS
];
1377 bool instancing_enable
[MAX_VBS
];
1378 bool primitive_restart
;
1381 uint32_t cs_right_mask
;
1383 bool depth_clamp_enable
;
1387 uint32_t depth_stencil_state
[3];
1393 uint32_t wm_depth_stencil
[3];
1397 uint32_t wm_depth_stencil
[4];
1402 anv_pipeline_has_stage(const struct anv_pipeline
*pipeline
,
1403 gl_shader_stage stage
)
1405 return (pipeline
->active_stages
& mesa_to_vk_shader_stage(stage
)) != 0;
1408 #define ANV_DECL_GET_PROG_DATA_FUNC(prefix, stage) \
1409 static inline const struct brw_##prefix##_prog_data * \
1410 get_##prefix##_prog_data(struct anv_pipeline *pipeline) \
1412 if (anv_pipeline_has_stage(pipeline, stage)) { \
1413 return (const struct brw_##prefix##_prog_data *) \
1414 anv_shader_bin_get_prog_data(pipeline->shaders[stage]); \
1420 ANV_DECL_GET_PROG_DATA_FUNC(vs
, MESA_SHADER_VERTEX
)
1421 ANV_DECL_GET_PROG_DATA_FUNC(gs
, MESA_SHADER_GEOMETRY
)
1422 ANV_DECL_GET_PROG_DATA_FUNC(wm
, MESA_SHADER_FRAGMENT
)
1423 ANV_DECL_GET_PROG_DATA_FUNC(cs
, MESA_SHADER_COMPUTE
)
1426 anv_pipeline_init(struct anv_pipeline
*pipeline
, struct anv_device
*device
,
1427 struct anv_pipeline_cache
*cache
,
1428 const VkGraphicsPipelineCreateInfo
*pCreateInfo
,
1429 const VkAllocationCallbacks
*alloc
);
1432 anv_pipeline_compile_cs(struct anv_pipeline
*pipeline
,
1433 struct anv_pipeline_cache
*cache
,
1434 const VkComputePipelineCreateInfo
*info
,
1435 struct anv_shader_module
*module
,
1436 const char *entrypoint
,
1437 const VkSpecializationInfo
*spec_info
);
1440 enum isl_format isl_format
:16;
1441 struct isl_swizzle swizzle
;
1445 anv_get_format(const struct gen_device_info
*devinfo
, VkFormat format
,
1446 VkImageAspectFlags aspect
, VkImageTiling tiling
);
1448 static inline enum isl_format
1449 anv_get_isl_format(const struct gen_device_info
*devinfo
, VkFormat vk_format
,
1450 VkImageAspectFlags aspect
, VkImageTiling tiling
)
1452 return anv_get_format(devinfo
, vk_format
, aspect
, tiling
).isl_format
;
1456 anv_pipeline_setup_l3_config(struct anv_pipeline
*pipeline
, bool needs_slm
);
1459 * Subsurface of an anv_image.
1461 struct anv_surface
{
1462 /** Valid only if isl_surf::size > 0. */
1463 struct isl_surf isl
;
1466 * Offset from VkImage's base address, as bound by vkBindImageMemory().
1473 /* The original VkFormat provided by the client. This may not match any
1474 * of the actual surface formats.
1477 VkImageAspectFlags aspects
;
1480 uint32_t array_size
;
1481 uint32_t samples
; /**< VkImageCreateInfo::samples */
1482 VkImageUsageFlags usage
; /**< Superset of VkImageCreateInfo::usage. */
1483 VkImageTiling tiling
; /** VkImageCreateInfo::tiling */
1488 /* Set when bound */
1490 VkDeviceSize offset
;
1495 * For each foo, anv_image::foo_surface is valid if and only if
1496 * anv_image::aspects has a foo aspect.
1498 * The hardware requires that the depth buffer and stencil buffer be
1499 * separate surfaces. From Vulkan's perspective, though, depth and stencil
1500 * reside in the same VkImage. To satisfy both the hardware and Vulkan, we
1501 * allocate the depth and stencil buffers as separate surfaces in the same
1505 struct anv_surface color_surface
;
1508 struct anv_surface depth_surface
;
1509 struct anv_surface hiz_surface
;
1510 struct anv_surface stencil_surface
;
1515 static inline uint32_t
1516 anv_get_layerCount(const struct anv_image
*image
,
1517 const VkImageSubresourceRange
*range
)
1519 return range
->layerCount
== VK_REMAINING_ARRAY_LAYERS
?
1520 image
->array_size
- range
->baseArrayLayer
: range
->layerCount
;
1523 static inline uint32_t
1524 anv_get_levelCount(const struct anv_image
*image
,
1525 const VkImageSubresourceRange
*range
)
1527 return range
->levelCount
== VK_REMAINING_MIP_LEVELS
?
1528 image
->levels
- range
->baseMipLevel
: range
->levelCount
;
1532 struct anv_image_view
{
1533 const struct anv_image
*image
; /**< VkImageViewCreateInfo::image */
1535 uint32_t offset
; /**< Offset into bo. */
1537 struct isl_view isl
;
1539 VkImageAspectFlags aspect_mask
;
1541 VkExtent3D extent
; /**< Extent of VkImageViewCreateInfo::baseMipLevel. */
1543 /** RENDER_SURFACE_STATE when using image as a color render target. */
1544 struct anv_state color_rt_surface_state
;
1546 /** RENDER_SURFACE_STATE when using image as a sampler surface. */
1547 struct anv_state sampler_surface_state
;
1549 /** RENDER_SURFACE_STATE when using image as a storage image. */
1550 struct anv_state storage_surface_state
;
1552 struct brw_image_param storage_image_param
;
1555 struct anv_image_create_info
{
1556 const VkImageCreateInfo
*vk_info
;
1558 /** An opt-in bitmask which filters an ISL-mapping of the Vulkan tiling. */
1559 isl_tiling_flags_t isl_tiling_flags
;
1564 VkResult
anv_image_create(VkDevice _device
,
1565 const struct anv_image_create_info
*info
,
1566 const VkAllocationCallbacks
* alloc
,
1569 const struct anv_surface
*
1570 anv_image_get_surface_for_aspect_mask(const struct anv_image
*image
,
1571 VkImageAspectFlags aspect_mask
);
1574 anv_image_has_hiz(const struct anv_image
*image
)
1576 /* We must check the aspect because anv_image::hiz_surface belongs to
1579 return (image
->aspects
& VK_IMAGE_ASPECT_DEPTH_BIT
) &&
1580 image
->hiz_surface
.isl
.size
> 0;
1583 struct anv_buffer_view
{
1584 enum isl_format format
; /**< VkBufferViewCreateInfo::format */
1586 uint32_t offset
; /**< Offset into bo. */
1587 uint64_t range
; /**< VkBufferViewCreateInfo::range */
1589 struct anv_state surface_state
;
1590 struct anv_state storage_surface_state
;
1592 struct brw_image_param storage_image_param
;
1596 anv_isl_format_for_descriptor_type(VkDescriptorType type
);
1598 static inline struct VkExtent3D
1599 anv_sanitize_image_extent(const VkImageType imageType
,
1600 const struct VkExtent3D imageExtent
)
1602 switch (imageType
) {
1603 case VK_IMAGE_TYPE_1D
:
1604 return (VkExtent3D
) { imageExtent
.width
, 1, 1 };
1605 case VK_IMAGE_TYPE_2D
:
1606 return (VkExtent3D
) { imageExtent
.width
, imageExtent
.height
, 1 };
1607 case VK_IMAGE_TYPE_3D
:
1610 unreachable("invalid image type");
1614 static inline struct VkOffset3D
1615 anv_sanitize_image_offset(const VkImageType imageType
,
1616 const struct VkOffset3D imageOffset
)
1618 switch (imageType
) {
1619 case VK_IMAGE_TYPE_1D
:
1620 return (VkOffset3D
) { imageOffset
.x
, 0, 0 };
1621 case VK_IMAGE_TYPE_2D
:
1622 return (VkOffset3D
) { imageOffset
.x
, imageOffset
.y
, 0 };
1623 case VK_IMAGE_TYPE_3D
:
1626 unreachable("invalid image type");
1631 void anv_fill_buffer_surface_state(struct anv_device
*device
,
1632 struct anv_state state
,
1633 enum isl_format format
,
1634 uint32_t offset
, uint32_t range
,
1637 void anv_image_view_fill_image_param(struct anv_device
*device
,
1638 struct anv_image_view
*view
,
1639 struct brw_image_param
*param
);
1640 void anv_buffer_view_fill_image_param(struct anv_device
*device
,
1641 struct anv_buffer_view
*view
,
1642 struct brw_image_param
*param
);
1644 struct anv_sampler
{
1648 struct anv_framebuffer
{
1653 uint32_t attachment_count
;
1654 struct anv_image_view
* attachments
[0];
1657 struct anv_subpass
{
1658 uint32_t input_count
;
1659 uint32_t * input_attachments
;
1660 uint32_t color_count
;
1661 uint32_t * color_attachments
;
1662 uint32_t * resolve_attachments
;
1663 uint32_t depth_stencil_attachment
;
1665 /** Subpass has at least one resolve attachment */
1669 struct anv_render_pass_attachment
{
1672 VkAttachmentLoadOp load_op
;
1673 VkAttachmentStoreOp store_op
;
1674 VkAttachmentLoadOp stencil_load_op
;
1677 struct anv_render_pass
{
1678 uint32_t attachment_count
;
1679 uint32_t subpass_count
;
1680 uint32_t * subpass_attachments
;
1681 struct anv_render_pass_attachment
* attachments
;
1682 struct anv_subpass subpasses
[0];
1685 struct anv_query_pool_slot
{
1691 struct anv_query_pool
{
1697 void *anv_lookup_entrypoint(const struct gen_device_info
*devinfo
,
1700 void anv_dump_image_to_ppm(struct anv_device
*device
,
1701 struct anv_image
*image
, unsigned miplevel
,
1702 unsigned array_layer
, VkImageAspectFlagBits aspect
,
1703 const char *filename
);
1705 enum anv_dump_action
{
1706 ANV_DUMP_FRAMEBUFFERS_BIT
= 0x1,
1709 void anv_dump_start(struct anv_device
*device
, enum anv_dump_action actions
);
1710 void anv_dump_finish(void);
1712 void anv_dump_add_framebuffer(struct anv_cmd_buffer
*cmd_buffer
,
1713 struct anv_framebuffer
*fb
);
1715 #define ANV_DEFINE_HANDLE_CASTS(__anv_type, __VkType) \
1717 static inline struct __anv_type * \
1718 __anv_type ## _from_handle(__VkType _handle) \
1720 return (struct __anv_type *) _handle; \
1723 static inline __VkType \
1724 __anv_type ## _to_handle(struct __anv_type *_obj) \
1726 return (__VkType) _obj; \
1729 #define ANV_DEFINE_NONDISP_HANDLE_CASTS(__anv_type, __VkType) \
1731 static inline struct __anv_type * \
1732 __anv_type ## _from_handle(__VkType _handle) \
1734 return (struct __anv_type *)(uintptr_t) _handle; \
1737 static inline __VkType \
1738 __anv_type ## _to_handle(struct __anv_type *_obj) \
1740 return (__VkType)(uintptr_t) _obj; \
1743 #define ANV_FROM_HANDLE(__anv_type, __name, __handle) \
1744 struct __anv_type *__name = __anv_type ## _from_handle(__handle)
1746 ANV_DEFINE_HANDLE_CASTS(anv_cmd_buffer
, VkCommandBuffer
)
1747 ANV_DEFINE_HANDLE_CASTS(anv_device
, VkDevice
)
1748 ANV_DEFINE_HANDLE_CASTS(anv_instance
, VkInstance
)
1749 ANV_DEFINE_HANDLE_CASTS(anv_physical_device
, VkPhysicalDevice
)
1750 ANV_DEFINE_HANDLE_CASTS(anv_queue
, VkQueue
)
1752 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_cmd_pool
, VkCommandPool
)
1753 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_buffer
, VkBuffer
)
1754 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_buffer_view
, VkBufferView
)
1755 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_descriptor_pool
, VkDescriptorPool
)
1756 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_descriptor_set
, VkDescriptorSet
)
1757 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_descriptor_set_layout
, VkDescriptorSetLayout
)
1758 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_device_memory
, VkDeviceMemory
)
1759 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_fence
, VkFence
)
1760 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_event
, VkEvent
)
1761 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_framebuffer
, VkFramebuffer
)
1762 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_image
, VkImage
)
1763 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_image_view
, VkImageView
);
1764 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_pipeline_cache
, VkPipelineCache
)
1765 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_pipeline
, VkPipeline
)
1766 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_pipeline_layout
, VkPipelineLayout
)
1767 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_query_pool
, VkQueryPool
)
1768 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_render_pass
, VkRenderPass
)
1769 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_sampler
, VkSampler
)
1770 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_shader_module
, VkShaderModule
)
1772 #define ANV_DEFINE_STRUCT_CASTS(__anv_type, __VkType) \
1774 static inline const __VkType * \
1775 __anv_type ## _to_ ## __VkType(const struct __anv_type *__anv_obj) \
1777 return (const __VkType *) __anv_obj; \
1780 #define ANV_COMMON_TO_STRUCT(__VkType, __vk_name, __common_name) \
1781 const __VkType *__vk_name = anv_common_to_ ## __VkType(__common_name)
1783 ANV_DEFINE_STRUCT_CASTS(anv_common
, VkMemoryBarrier
)
1784 ANV_DEFINE_STRUCT_CASTS(anv_common
, VkBufferMemoryBarrier
)
1785 ANV_DEFINE_STRUCT_CASTS(anv_common
, VkImageMemoryBarrier
)
1787 /* Gen-specific function declarations */
1789 # include "anv_genX.h"
1791 # define genX(x) gen7_##x
1792 # include "anv_genX.h"
1794 # define genX(x) gen75_##x
1795 # include "anv_genX.h"
1797 # define genX(x) gen8_##x
1798 # include "anv_genX.h"
1800 # define genX(x) gen9_##x
1801 # include "anv_genX.h"
1809 #endif /* ANV_PRIVATE_H */