2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
38 #define __gen_validate_value(x) VALGRIND_CHECK_MEM_IS_DEFINED(&(x), sizeof(x))
43 #include "brw_device_info.h"
44 #include "brw_compiler.h"
45 #include "util/macros.h"
46 #include "util/list.h"
48 /* Pre-declarations needed for WSI entrypoints */
51 typedef struct xcb_connection_t xcb_connection_t
;
52 typedef uint32_t xcb_visualid_t
;
53 typedef uint32_t xcb_window_t
;
57 #include <vulkan/vulkan.h>
58 #include <vulkan/vulkan_intel.h>
59 #include <vulkan/vk_icd.h>
61 #include "anv_entrypoints.h"
62 #include "brw_context.h"
72 #define MAX_VIEWPORTS 16
73 #define MAX_SCISSORS 16
74 #define MAX_PUSH_CONSTANTS_SIZE 128
75 #define MAX_DYNAMIC_BUFFERS 16
77 #define MAX_SAMPLES_LOG2 4 /* SKL supports 16 samples */
79 #define anv_noreturn __attribute__((__noreturn__))
80 #define anv_printflike(a, b) __attribute__((__format__(__printf__, a, b)))
82 #define MIN(a, b) ((a) < (b) ? (a) : (b))
83 #define MAX(a, b) ((a) > (b) ? (a) : (b))
85 static inline uint32_t
86 align_down_npot_u32(uint32_t v
, uint32_t a
)
91 static inline uint32_t
92 align_u32(uint32_t v
, uint32_t a
)
94 assert(a
!= 0 && a
== (a
& -a
));
95 return (v
+ a
- 1) & ~(a
- 1);
98 static inline uint64_t
99 align_u64(uint64_t v
, uint64_t a
)
101 assert(a
!= 0 && a
== (a
& -a
));
102 return (v
+ a
- 1) & ~(a
- 1);
105 static inline int32_t
106 align_i32(int32_t v
, int32_t a
)
108 assert(a
!= 0 && a
== (a
& -a
));
109 return (v
+ a
- 1) & ~(a
- 1);
112 /** Alignment must be a power of 2. */
114 anv_is_aligned(uintmax_t n
, uintmax_t a
)
116 assert(a
== (a
& -a
));
117 return (n
& (a
- 1)) == 0;
120 static inline uint32_t
121 anv_minify(uint32_t n
, uint32_t levels
)
123 if (unlikely(n
== 0))
126 return MAX(n
>> levels
, 1);
130 anv_clamp_f(float f
, float min
, float max
)
143 anv_clear_mask(uint32_t *inout_mask
, uint32_t clear_mask
)
145 if (*inout_mask
& clear_mask
) {
146 *inout_mask
&= ~clear_mask
;
153 #define for_each_bit(b, dword) \
154 for (uint32_t __dword = (dword); \
155 (b) = __builtin_ffs(__dword) - 1, __dword; \
156 __dword &= ~(1 << (b)))
158 #define typed_memcpy(dest, src, count) ({ \
159 static_assert(sizeof(*src) == sizeof(*dest), ""); \
160 memcpy((dest), (src), (count) * sizeof(*(src))); \
163 #define zero(x) (memset(&(x), 0, sizeof(x)))
165 /* Define no kernel as 1, since that's an illegal offset for a kernel */
169 VkStructureType sType
;
173 /* Whenever we generate an error, pass it through this function. Useful for
174 * debugging, where we can break on it. Only call at error site, not when
175 * propagating errors. Might be useful to plug in a stack trace here.
178 VkResult
__vk_errorf(VkResult error
, const char *file
, int line
, const char *format
, ...);
181 #define vk_error(error) __vk_errorf(error, __FILE__, __LINE__, NULL);
182 #define vk_errorf(error, format, ...) __vk_errorf(error, __FILE__, __LINE__, format, ## __VA_ARGS__);
184 #define vk_error(error) error
185 #define vk_errorf(error, format, ...) error
188 void __anv_finishme(const char *file
, int line
, const char *format
, ...)
189 anv_printflike(3, 4);
190 void anv_loge(const char *format
, ...) anv_printflike(1, 2);
191 void anv_loge_v(const char *format
, va_list va
);
194 * Print a FINISHME message, including its source location.
196 #define anv_finishme(format, ...) \
197 __anv_finishme(__FILE__, __LINE__, format, ##__VA_ARGS__);
199 /* A non-fatal assert. Useful for debugging. */
201 #define anv_assert(x) ({ \
202 if (unlikely(!(x))) \
203 fprintf(stderr, "%s:%d ASSERT: %s\n", __FILE__, __LINE__, #x); \
206 #define anv_assert(x)
210 * If a block of code is annotated with anv_validate, then the block runs only
214 #define anv_validate if (1)
216 #define anv_validate if (0)
219 void anv_abortf(const char *format
, ...) anv_noreturn
anv_printflike(1, 2);
220 void anv_abortfv(const char *format
, va_list va
) anv_noreturn
;
222 #define stub_return(v) \
224 anv_finishme("stub %s", __func__); \
230 anv_finishme("stub %s", __func__); \
235 * A dynamically growable, circular buffer. Elements are added at head and
236 * removed from tail. head and tail are free-running uint32_t indices and we
237 * only compute the modulo with size when accessing the array. This way,
238 * number of bytes in the queue is always head - tail, even in case of
245 uint32_t element_size
;
250 int anv_vector_init(struct anv_vector
*queue
, uint32_t element_size
, uint32_t size
);
251 void *anv_vector_add(struct anv_vector
*queue
);
252 void *anv_vector_remove(struct anv_vector
*queue
);
255 anv_vector_length(struct anv_vector
*queue
)
257 return (queue
->head
- queue
->tail
) / queue
->element_size
;
261 anv_vector_head(struct anv_vector
*vector
)
263 assert(vector
->tail
< vector
->head
);
264 return (void *)((char *)vector
->data
+
265 ((vector
->head
- vector
->element_size
) &
266 (vector
->size
- 1)));
270 anv_vector_tail(struct anv_vector
*vector
)
272 return (void *)((char *)vector
->data
+ (vector
->tail
& (vector
->size
- 1)));
276 anv_vector_finish(struct anv_vector
*queue
)
281 #define anv_vector_foreach(elem, queue) \
282 static_assert(__builtin_types_compatible_p(__typeof__(queue), struct anv_vector *), ""); \
283 for (uint32_t __anv_vector_offset = (queue)->tail; \
284 elem = (queue)->data + (__anv_vector_offset & ((queue)->size - 1)), __anv_vector_offset < (queue)->head; \
285 __anv_vector_offset += (queue)->element_size)
290 /* Index into the current validation list. This is used by the
291 * validation list building alrogithm to track which buffers are already
292 * in the validation list so that we can ensure uniqueness.
296 /* Last known offset. This value is provided by the kernel when we
297 * execbuf and is used as the presumed offset for the next bunch of
305 /* We need to set the WRITE flag on winsys bos so GEM will know we're
306 * writing to them and synchronize uses on other rings (eg if the display
307 * server uses the blitter ring).
312 /* Represents a lock-free linked list of "free" things. This is used by
313 * both the block pool and the state pools. Unfortunately, in order to
314 * solve the ABA problem, we can't use a single uint32_t head.
316 union anv_free_list
{
320 /* A simple count that is incremented every time the head changes. */
326 #define ANV_FREE_LIST_EMPTY ((union anv_free_list) { { 1, 0 } })
328 struct anv_block_state
{
338 struct anv_block_pool
{
339 struct anv_device
*device
;
343 /* The offset from the start of the bo to the "center" of the block
344 * pool. Pointers to allocated blocks are given by
345 * bo.map + center_bo_offset + offsets.
347 uint32_t center_bo_offset
;
349 /* Current memory map of the block pool. This pointer may or may not
350 * point to the actual beginning of the block pool memory. If
351 * anv_block_pool_alloc_back has ever been called, then this pointer
352 * will point to the "center" position of the buffer and all offsets
353 * (negative or positive) given out by the block pool alloc functions
354 * will be valid relative to this pointer.
356 * In particular, map == bo.map + center_offset
362 * Array of mmaps and gem handles owned by the block pool, reclaimed when
363 * the block pool is destroyed.
365 struct anv_vector mmap_cleanups
;
369 union anv_free_list free_list
;
370 struct anv_block_state state
;
372 union anv_free_list back_free_list
;
373 struct anv_block_state back_state
;
376 /* Block pools are backed by a fixed-size 2GB memfd */
377 #define BLOCK_POOL_MEMFD_SIZE (1ull << 32)
379 /* The center of the block pool is also the middle of the memfd. This may
380 * change in the future if we decide differently for some reason.
382 #define BLOCK_POOL_MEMFD_CENTER (BLOCK_POOL_MEMFD_SIZE / 2)
384 static inline uint32_t
385 anv_block_pool_size(struct anv_block_pool
*pool
)
387 return pool
->state
.end
+ pool
->back_state
.end
;
396 struct anv_fixed_size_state_pool
{
398 union anv_free_list free_list
;
399 struct anv_block_state block
;
402 #define ANV_MIN_STATE_SIZE_LOG2 6
403 #define ANV_MAX_STATE_SIZE_LOG2 10
405 #define ANV_STATE_BUCKETS (ANV_MAX_STATE_SIZE_LOG2 - ANV_MIN_STATE_SIZE_LOG2 + 1)
407 struct anv_state_pool
{
408 struct anv_block_pool
*block_pool
;
409 struct anv_fixed_size_state_pool buckets
[ANV_STATE_BUCKETS
];
412 struct anv_state_stream_block
;
414 struct anv_state_stream
{
415 struct anv_block_pool
*block_pool
;
417 /* The current working block */
418 struct anv_state_stream_block
*block
;
420 /* Offset at which the current block starts */
422 /* Offset at which to allocate the next state */
424 /* Offset at which the current block ends */
428 #define CACHELINE_SIZE 64
429 #define CACHELINE_MASK 63
432 anv_clflush_range(void *start
, size_t size
)
434 void *p
= (void *) (((uintptr_t) start
) & ~CACHELINE_MASK
);
435 void *end
= start
+ size
;
437 __builtin_ia32_mfence();
439 __builtin_ia32_clflush(p
);
445 anv_state_clflush(struct anv_state state
)
447 anv_clflush_range(state
.map
, state
.alloc_size
);
450 void anv_block_pool_init(struct anv_block_pool
*pool
,
451 struct anv_device
*device
, uint32_t block_size
);
452 void anv_block_pool_finish(struct anv_block_pool
*pool
);
453 int32_t anv_block_pool_alloc(struct anv_block_pool
*pool
);
454 int32_t anv_block_pool_alloc_back(struct anv_block_pool
*pool
);
455 void anv_block_pool_free(struct anv_block_pool
*pool
, int32_t offset
);
456 void anv_state_pool_init(struct anv_state_pool
*pool
,
457 struct anv_block_pool
*block_pool
);
458 void anv_state_pool_finish(struct anv_state_pool
*pool
);
459 struct anv_state
anv_state_pool_alloc(struct anv_state_pool
*pool
,
460 size_t state_size
, size_t alignment
);
461 void anv_state_pool_free(struct anv_state_pool
*pool
, struct anv_state state
);
462 void anv_state_stream_init(struct anv_state_stream
*stream
,
463 struct anv_block_pool
*block_pool
);
464 void anv_state_stream_finish(struct anv_state_stream
*stream
);
465 struct anv_state
anv_state_stream_alloc(struct anv_state_stream
*stream
,
466 uint32_t size
, uint32_t alignment
);
469 * Implements a pool of re-usable BOs. The interface is identical to that
470 * of block_pool except that each block is its own BO.
473 struct anv_device
*device
;
478 void anv_bo_pool_init(struct anv_bo_pool
*pool
, struct anv_device
*device
);
479 void anv_bo_pool_finish(struct anv_bo_pool
*pool
);
480 VkResult
anv_bo_pool_alloc(struct anv_bo_pool
*pool
, struct anv_bo
*bo
,
482 void anv_bo_pool_free(struct anv_bo_pool
*pool
, const struct anv_bo
*bo
);
484 struct anv_scratch_pool
{
485 /* Indexed by Per-Thread Scratch Space number (the hardware value) and stage */
486 struct anv_bo bos
[16][MESA_SHADER_STAGES
];
489 void anv_scratch_pool_init(struct anv_device
*device
,
490 struct anv_scratch_pool
*pool
);
491 void anv_scratch_pool_finish(struct anv_device
*device
,
492 struct anv_scratch_pool
*pool
);
493 struct anv_bo
*anv_scratch_pool_alloc(struct anv_device
*device
,
494 struct anv_scratch_pool
*pool
,
495 gl_shader_stage stage
,
496 unsigned per_thread_scratch
);
498 void *anv_resolve_entrypoint(uint32_t index
);
500 extern struct anv_dispatch_table dtable
;
502 #define ANV_CALL(func) ({ \
503 if (dtable.func == NULL) { \
504 size_t idx = offsetof(struct anv_dispatch_table, func) / sizeof(void *); \
505 dtable.entrypoints[idx] = anv_resolve_entrypoint(idx); \
511 anv_alloc(const VkAllocationCallbacks
*alloc
,
512 size_t size
, size_t align
,
513 VkSystemAllocationScope scope
)
515 return alloc
->pfnAllocation(alloc
->pUserData
, size
, align
, scope
);
519 anv_realloc(const VkAllocationCallbacks
*alloc
,
520 void *ptr
, size_t size
, size_t align
,
521 VkSystemAllocationScope scope
)
523 return alloc
->pfnReallocation(alloc
->pUserData
, ptr
, size
, align
, scope
);
527 anv_free(const VkAllocationCallbacks
*alloc
, void *data
)
529 alloc
->pfnFree(alloc
->pUserData
, data
);
533 anv_alloc2(const VkAllocationCallbacks
*parent_alloc
,
534 const VkAllocationCallbacks
*alloc
,
535 size_t size
, size_t align
,
536 VkSystemAllocationScope scope
)
539 return anv_alloc(alloc
, size
, align
, scope
);
541 return anv_alloc(parent_alloc
, size
, align
, scope
);
545 anv_free2(const VkAllocationCallbacks
*parent_alloc
,
546 const VkAllocationCallbacks
*alloc
,
550 anv_free(alloc
, data
);
552 anv_free(parent_alloc
, data
);
555 struct anv_wsi_interaface
;
557 #define VK_ICD_WSI_PLATFORM_MAX 5
559 struct anv_physical_device
{
560 VK_LOADER_DATA _loader_data
;
562 struct anv_instance
* instance
;
566 const struct brw_device_info
* info
;
567 uint64_t aperture_size
;
568 struct brw_compiler
* compiler
;
569 struct isl_device isl_dev
;
570 int cmd_parser_version
;
572 struct anv_wsi_interface
* wsi
[VK_ICD_WSI_PLATFORM_MAX
];
575 struct anv_instance
{
576 VK_LOADER_DATA _loader_data
;
578 VkAllocationCallbacks alloc
;
581 int physicalDeviceCount
;
582 struct anv_physical_device physicalDevice
;
585 VkResult
anv_init_wsi(struct anv_physical_device
*physical_device
);
586 void anv_finish_wsi(struct anv_physical_device
*physical_device
);
588 struct anv_meta_state
{
589 VkAllocationCallbacks alloc
;
592 * Use array element `i` for images with `2^i` samples.
596 * Pipeline N is used to clear color attachment N of the current
599 * HACK: We use one pipeline per color attachment to work around the
600 * compiler's inability to dynamically set the render target index of
601 * the render target write message.
603 struct anv_pipeline
*color_pipelines
[MAX_RTS
];
605 struct anv_pipeline
*depth_only_pipeline
;
606 struct anv_pipeline
*stencil_only_pipeline
;
607 struct anv_pipeline
*depthstencil_pipeline
;
608 } clear
[1 + MAX_SAMPLES_LOG2
];
611 VkRenderPass render_pass
;
613 /** Pipeline that blits from a 1D image. */
614 VkPipeline pipeline_1d_src
;
616 /** Pipeline that blits from a 2D image. */
617 VkPipeline pipeline_2d_src
;
619 /** Pipeline that blits from a 3D image. */
620 VkPipeline pipeline_3d_src
;
622 VkPipelineLayout pipeline_layout
;
623 VkDescriptorSetLayout ds_layout
;
627 VkRenderPass render_pass
;
629 VkPipelineLayout img_p_layout
;
630 VkDescriptorSetLayout img_ds_layout
;
631 VkPipelineLayout buf_p_layout
;
632 VkDescriptorSetLayout buf_ds_layout
;
634 /* Pipelines indexed by source and destination type. See the
635 * blit2d_src_type and blit2d_dst_type enums in anv_meta_blit2d.c to
636 * see what these mean.
638 VkPipeline pipelines
[2][3];
642 /** Pipeline [i] resolves an image with 2^(i+1) samples. */
643 VkPipeline pipelines
[MAX_SAMPLES_LOG2
];
646 VkPipelineLayout pipeline_layout
;
647 VkDescriptorSetLayout ds_layout
;
652 VK_LOADER_DATA _loader_data
;
654 struct anv_device
* device
;
656 struct anv_state_pool
* pool
;
659 struct anv_pipeline_cache
{
660 struct anv_device
* device
;
661 struct anv_state_stream program_stream
;
662 pthread_mutex_t mutex
;
666 uint32_t kernel_count
;
667 uint32_t * hash_table
;
670 struct anv_pipeline_bind_map
;
672 void anv_pipeline_cache_init(struct anv_pipeline_cache
*cache
,
673 struct anv_device
*device
);
674 void anv_pipeline_cache_finish(struct anv_pipeline_cache
*cache
);
675 uint32_t anv_pipeline_cache_search(struct anv_pipeline_cache
*cache
,
676 const unsigned char *sha1
,
677 const struct brw_stage_prog_data
**prog_data
,
678 struct anv_pipeline_bind_map
*map
);
679 uint32_t anv_pipeline_cache_upload_kernel(struct anv_pipeline_cache
*cache
,
680 const unsigned char *sha1
,
683 const struct brw_stage_prog_data
**prog_data
,
684 size_t prog_data_size
,
685 struct anv_pipeline_bind_map
*map
);
688 VK_LOADER_DATA _loader_data
;
690 VkAllocationCallbacks alloc
;
692 struct anv_instance
* instance
;
694 struct brw_device_info info
;
695 struct isl_device isl_dev
;
698 bool can_chain_batches
;
699 bool robust_buffer_access
;
701 struct anv_bo_pool batch_bo_pool
;
703 struct anv_block_pool dynamic_state_block_pool
;
704 struct anv_state_pool dynamic_state_pool
;
706 struct anv_block_pool instruction_block_pool
;
707 struct anv_state_pool instruction_state_pool
;
708 struct anv_pipeline_cache default_pipeline_cache
;
710 struct anv_block_pool surface_state_block_pool
;
711 struct anv_state_pool surface_state_pool
;
713 struct anv_bo workaround_bo
;
715 struct anv_meta_state meta_state
;
717 struct anv_state border_colors
;
719 struct anv_queue queue
;
721 struct anv_scratch_pool scratch_pool
;
723 uint32_t default_mocs
;
725 pthread_mutex_t mutex
;
728 void anv_device_get_cache_uuid(void *uuid
);
731 void* anv_gem_mmap(struct anv_device
*device
,
732 uint32_t gem_handle
, uint64_t offset
, uint64_t size
, uint32_t flags
);
733 void anv_gem_munmap(void *p
, uint64_t size
);
734 uint32_t anv_gem_create(struct anv_device
*device
, size_t size
);
735 void anv_gem_close(struct anv_device
*device
, uint32_t gem_handle
);
736 uint32_t anv_gem_userptr(struct anv_device
*device
, void *mem
, size_t size
);
737 int anv_gem_wait(struct anv_device
*device
, uint32_t gem_handle
, int64_t *timeout_ns
);
738 int anv_gem_execbuffer(struct anv_device
*device
,
739 struct drm_i915_gem_execbuffer2
*execbuf
);
740 int anv_gem_set_tiling(struct anv_device
*device
, uint32_t gem_handle
,
741 uint32_t stride
, uint32_t tiling
);
742 int anv_gem_create_context(struct anv_device
*device
);
743 int anv_gem_destroy_context(struct anv_device
*device
, int context
);
744 int anv_gem_get_param(int fd
, uint32_t param
);
745 bool anv_gem_get_bit6_swizzle(int fd
, uint32_t tiling
);
746 int anv_gem_get_aperture(int fd
, uint64_t *size
);
747 int anv_gem_handle_to_fd(struct anv_device
*device
, uint32_t gem_handle
);
748 uint32_t anv_gem_fd_to_handle(struct anv_device
*device
, int fd
);
749 int anv_gem_set_caching(struct anv_device
*device
, uint32_t gem_handle
, uint32_t caching
);
750 int anv_gem_set_domain(struct anv_device
*device
, uint32_t gem_handle
,
751 uint32_t read_domains
, uint32_t write_domain
);
753 VkResult
anv_bo_init_new(struct anv_bo
*bo
, struct anv_device
*device
, uint64_t size
);
755 struct anv_reloc_list
{
758 struct drm_i915_gem_relocation_entry
* relocs
;
759 struct anv_bo
** reloc_bos
;
762 VkResult
anv_reloc_list_init(struct anv_reloc_list
*list
,
763 const VkAllocationCallbacks
*alloc
);
764 void anv_reloc_list_finish(struct anv_reloc_list
*list
,
765 const VkAllocationCallbacks
*alloc
);
767 uint64_t anv_reloc_list_add(struct anv_reloc_list
*list
,
768 const VkAllocationCallbacks
*alloc
,
769 uint32_t offset
, struct anv_bo
*target_bo
,
772 struct anv_batch_bo
{
773 /* Link in the anv_cmd_buffer.owned_batch_bos list */
774 struct list_head link
;
778 /* Bytes actually consumed in this batch BO */
781 /* Last seen surface state block pool bo offset */
782 uint32_t last_ss_pool_bo_offset
;
784 struct anv_reloc_list relocs
;
788 const VkAllocationCallbacks
* alloc
;
794 struct anv_reloc_list
* relocs
;
796 /* This callback is called (with the associated user data) in the event
797 * that the batch runs out of space.
799 VkResult (*extend_cb
)(struct anv_batch
*, void *);
803 void *anv_batch_emit_dwords(struct anv_batch
*batch
, int num_dwords
);
804 void anv_batch_emit_batch(struct anv_batch
*batch
, struct anv_batch
*other
);
805 uint64_t anv_batch_emit_reloc(struct anv_batch
*batch
,
806 void *location
, struct anv_bo
*bo
, uint32_t offset
);
807 VkResult
anv_device_submit_simple_batch(struct anv_device
*device
,
808 struct anv_batch
*batch
);
815 #define __gen_address_type struct anv_address
816 #define __gen_user_data struct anv_batch
818 static inline uint64_t
819 __gen_combine_address(struct anv_batch
*batch
, void *location
,
820 const struct anv_address address
, uint32_t delta
)
822 if (address
.bo
== NULL
) {
823 return address
.offset
+ delta
;
825 assert(batch
->start
<= location
&& location
< batch
->end
);
827 return anv_batch_emit_reloc(batch
, location
, address
.bo
, address
.offset
+ delta
);
831 /* Wrapper macros needed to work around preprocessor argument issues. In
832 * particular, arguments don't get pre-evaluated if they are concatenated.
833 * This means that, if you pass GENX(3DSTATE_PS) into the emit macro, the
834 * GENX macro won't get evaluated if the emit macro contains "cmd ## foo".
835 * We can work around this easily enough with these helpers.
837 #define __anv_cmd_length(cmd) cmd ## _length
838 #define __anv_cmd_length_bias(cmd) cmd ## _length_bias
839 #define __anv_cmd_header(cmd) cmd ## _header
840 #define __anv_cmd_pack(cmd) cmd ## _pack
841 #define __anv_reg_num(reg) reg ## _num
843 #define anv_pack_struct(dst, struc, ...) do { \
844 struct struc __template = { \
847 __anv_cmd_pack(struc)(NULL, dst, &__template); \
848 VG(VALGRIND_CHECK_MEM_IS_DEFINED(dst, __anv_cmd_length(struc) * 4)); \
851 #define anv_batch_emitn(batch, n, cmd, ...) ({ \
852 void *__dst = anv_batch_emit_dwords(batch, n); \
853 struct cmd __template = { \
854 __anv_cmd_header(cmd), \
855 .DWordLength = n - __anv_cmd_length_bias(cmd), \
858 __anv_cmd_pack(cmd)(batch, __dst, &__template); \
862 #define anv_batch_emit_merge(batch, dwords0, dwords1) \
866 static_assert(ARRAY_SIZE(dwords0) == ARRAY_SIZE(dwords1), "mismatch merge"); \
867 dw = anv_batch_emit_dwords((batch), ARRAY_SIZE(dwords0)); \
868 for (uint32_t i = 0; i < ARRAY_SIZE(dwords0); i++) \
869 dw[i] = (dwords0)[i] | (dwords1)[i]; \
870 VG(VALGRIND_CHECK_MEM_IS_DEFINED(dw, ARRAY_SIZE(dwords0) * 4));\
873 #define anv_batch_emit(batch, cmd, name) \
874 for (struct cmd name = { __anv_cmd_header(cmd) }, \
875 *_dst = anv_batch_emit_dwords(batch, __anv_cmd_length(cmd)); \
876 __builtin_expect(_dst != NULL, 1); \
877 ({ __anv_cmd_pack(cmd)(batch, _dst, &name); \
878 VG(VALGRIND_CHECK_MEM_IS_DEFINED(_dst, __anv_cmd_length(cmd) * 4)); \
882 #define anv_state_pool_emit(pool, cmd, align, ...) ({ \
883 const uint32_t __size = __anv_cmd_length(cmd) * 4; \
884 struct anv_state __state = \
885 anv_state_pool_alloc((pool), __size, align); \
886 struct cmd __template = { \
889 __anv_cmd_pack(cmd)(NULL, __state.map, &__template); \
890 VG(VALGRIND_CHECK_MEM_IS_DEFINED(__state.map, __anv_cmd_length(cmd) * 4)); \
891 if (!(pool)->block_pool->device->info.has_llc) \
892 anv_state_clflush(__state); \
896 #define GEN7_MOCS (struct GEN7_MEMORY_OBJECT_CONTROL_STATE) { \
897 .GraphicsDataTypeGFDT = 0, \
898 .LLCCacheabilityControlLLCCC = 0, \
899 .L3CacheabilityControlL3CC = 1, \
902 #define GEN75_MOCS (struct GEN75_MEMORY_OBJECT_CONTROL_STATE) { \
903 .LLCeLLCCacheabilityControlLLCCC = 0, \
904 .L3CacheabilityControlL3CC = 1, \
907 #define GEN8_MOCS (struct GEN8_MEMORY_OBJECT_CONTROL_STATE) { \
908 .MemoryTypeLLCeLLCCacheabilityControl = WB, \
909 .TargetCache = L3DefertoPATforLLCeLLCselection, \
913 /* Skylake: MOCS is now an index into an array of 62 different caching
914 * configurations programmed by the kernel.
917 #define GEN9_MOCS (struct GEN9_MEMORY_OBJECT_CONTROL_STATE) { \
918 /* TC=LLC/eLLC, LeCC=WB, LRUM=3, L3CC=WB */ \
919 .IndextoMOCSTables = 2 \
922 #define GEN9_MOCS_PTE { \
923 /* TC=LLC/eLLC, LeCC=WB, LRUM=3, L3CC=WB */ \
924 .IndextoMOCSTables = 1 \
927 struct anv_device_memory
{
930 VkDeviceSize map_size
;
935 * Header for Vertex URB Entry (VUE)
937 struct anv_vue_header
{
939 uint32_t RTAIndex
; /* RenderTargetArrayIndex */
940 uint32_t ViewportIndex
;
944 struct anv_descriptor_set_binding_layout
{
946 /* The type of the descriptors in this binding */
947 VkDescriptorType type
;
950 /* Number of array elements in this binding */
953 /* Index into the flattend descriptor set */
954 uint16_t descriptor_index
;
956 /* Index into the dynamic state array for a dynamic buffer */
957 int16_t dynamic_offset_index
;
959 /* Index into the descriptor set buffer views */
960 int16_t buffer_index
;
963 /* Index into the binding table for the associated surface */
964 int16_t surface_index
;
966 /* Index into the sampler table for the associated sampler */
967 int16_t sampler_index
;
969 /* Index into the image table for the associated image */
971 } stage
[MESA_SHADER_STAGES
];
973 /* Immutable samplers (or NULL if no immutable samplers) */
974 struct anv_sampler
**immutable_samplers
;
977 struct anv_descriptor_set_layout
{
978 /* Number of bindings in this descriptor set */
979 uint16_t binding_count
;
981 /* Total size of the descriptor set with room for all array entries */
984 /* Shader stages affected by this descriptor set */
985 uint16_t shader_stages
;
987 /* Number of buffers in this descriptor set */
988 uint16_t buffer_count
;
990 /* Number of dynamic offsets used by this descriptor set */
991 uint16_t dynamic_offset_count
;
993 /* Bindings in this descriptor set */
994 struct anv_descriptor_set_binding_layout binding
[0];
997 struct anv_descriptor
{
998 VkDescriptorType type
;
1002 struct anv_image_view
*image_view
;
1003 struct anv_sampler
*sampler
;
1006 struct anv_buffer_view
*buffer_view
;
1010 struct anv_descriptor_set
{
1011 const struct anv_descriptor_set_layout
*layout
;
1013 uint32_t buffer_count
;
1014 struct anv_buffer_view
*buffer_views
;
1015 struct anv_descriptor descriptors
[0];
1018 struct anv_descriptor_pool
{
1023 struct anv_state_stream surface_state_stream
;
1024 void *surface_state_free_list
;
1030 anv_descriptor_set_create(struct anv_device
*device
,
1031 struct anv_descriptor_pool
*pool
,
1032 const struct anv_descriptor_set_layout
*layout
,
1033 struct anv_descriptor_set
**out_set
);
1036 anv_descriptor_set_destroy(struct anv_device
*device
,
1037 struct anv_descriptor_pool
*pool
,
1038 struct anv_descriptor_set
*set
);
1040 #define ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS UINT8_MAX
1042 struct anv_pipeline_binding
{
1043 /* The descriptor set this surface corresponds to. The special value of
1044 * ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS indicates that the offset refers
1045 * to a color attachment and not a regular descriptor.
1049 /* Binding in the descriptor set */
1052 /* Index in the binding */
1056 struct anv_pipeline_layout
{
1058 struct anv_descriptor_set_layout
*layout
;
1059 uint32_t dynamic_offset_start
;
1065 bool has_dynamic_offsets
;
1066 } stage
[MESA_SHADER_STAGES
];
1068 unsigned char sha1
[20];
1072 struct anv_device
* device
;
1075 VkBufferUsageFlags usage
;
1077 /* Set when bound */
1079 VkDeviceSize offset
;
1082 enum anv_cmd_dirty_bits
{
1083 ANV_CMD_DIRTY_DYNAMIC_VIEWPORT
= 1 << 0, /* VK_DYNAMIC_STATE_VIEWPORT */
1084 ANV_CMD_DIRTY_DYNAMIC_SCISSOR
= 1 << 1, /* VK_DYNAMIC_STATE_SCISSOR */
1085 ANV_CMD_DIRTY_DYNAMIC_LINE_WIDTH
= 1 << 2, /* VK_DYNAMIC_STATE_LINE_WIDTH */
1086 ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS
= 1 << 3, /* VK_DYNAMIC_STATE_DEPTH_BIAS */
1087 ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS
= 1 << 4, /* VK_DYNAMIC_STATE_BLEND_CONSTANTS */
1088 ANV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS
= 1 << 5, /* VK_DYNAMIC_STATE_DEPTH_BOUNDS */
1089 ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK
= 1 << 6, /* VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK */
1090 ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK
= 1 << 7, /* VK_DYNAMIC_STATE_STENCIL_WRITE_MASK */
1091 ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE
= 1 << 8, /* VK_DYNAMIC_STATE_STENCIL_REFERENCE */
1092 ANV_CMD_DIRTY_DYNAMIC_ALL
= (1 << 9) - 1,
1093 ANV_CMD_DIRTY_PIPELINE
= 1 << 9,
1094 ANV_CMD_DIRTY_INDEX_BUFFER
= 1 << 10,
1095 ANV_CMD_DIRTY_RENDER_TARGETS
= 1 << 11,
1097 typedef uint32_t anv_cmd_dirty_mask_t
;
1099 enum anv_pipe_bits
{
1100 ANV_PIPE_DEPTH_CACHE_FLUSH_BIT
= (1 << 0),
1101 ANV_PIPE_STALL_AT_SCOREBOARD_BIT
= (1 << 1),
1102 ANV_PIPE_STATE_CACHE_INVALIDATE_BIT
= (1 << 2),
1103 ANV_PIPE_CONSTANT_CACHE_INVALIDATE_BIT
= (1 << 3),
1104 ANV_PIPE_VF_CACHE_INVALIDATE_BIT
= (1 << 4),
1105 ANV_PIPE_DATA_CACHE_FLUSH_BIT
= (1 << 5),
1106 ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT
= (1 << 10),
1107 ANV_PIPE_INSTRUCTION_CACHE_INVALIDATE_BIT
= (1 << 11),
1108 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT
= (1 << 12),
1109 ANV_PIPE_DEPTH_STALL_BIT
= (1 << 13),
1110 ANV_PIPE_CS_STALL_BIT
= (1 << 20),
1112 /* This bit does not exist directly in PIPE_CONTROL. Instead it means that
1113 * a flush has happened but not a CS stall. The next time we do any sort
1114 * of invalidation we need to insert a CS stall at that time. Otherwise,
1115 * we would have to CS stall on every flush which could be bad.
1117 ANV_PIPE_NEEDS_CS_STALL_BIT
= (1 << 21),
1120 #define ANV_PIPE_FLUSH_BITS ( \
1121 ANV_PIPE_DEPTH_CACHE_FLUSH_BIT | \
1122 ANV_PIPE_DATA_CACHE_FLUSH_BIT | \
1123 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT)
1125 #define ANV_PIPE_STALL_BITS ( \
1126 ANV_PIPE_STALL_AT_SCOREBOARD_BIT | \
1127 ANV_PIPE_DEPTH_STALL_BIT | \
1128 ANV_PIPE_CS_STALL_BIT)
1130 #define ANV_PIPE_INVALIDATE_BITS ( \
1131 ANV_PIPE_STATE_CACHE_INVALIDATE_BIT | \
1132 ANV_PIPE_CONSTANT_CACHE_INVALIDATE_BIT | \
1133 ANV_PIPE_VF_CACHE_INVALIDATE_BIT | \
1134 ANV_PIPE_DATA_CACHE_FLUSH_BIT | \
1135 ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT | \
1136 ANV_PIPE_INSTRUCTION_CACHE_INVALIDATE_BIT)
1138 struct anv_vertex_binding
{
1139 struct anv_buffer
* buffer
;
1140 VkDeviceSize offset
;
1143 struct anv_push_constants
{
1144 /* Current allocated size of this push constants data structure.
1145 * Because a decent chunk of it may not be used (images on SKL, for
1146 * instance), we won't actually allocate the entire structure up-front.
1150 /* Push constant data provided by the client through vkPushConstants */
1151 uint8_t client_data
[MAX_PUSH_CONSTANTS_SIZE
];
1153 /* Our hardware only provides zero-based vertex and instance id so, in
1154 * order to satisfy the vulkan requirements, we may have to push one or
1155 * both of these into the shader.
1157 uint32_t base_vertex
;
1158 uint32_t base_instance
;
1160 /* Offsets and ranges for dynamically bound buffers */
1164 } dynamic
[MAX_DYNAMIC_BUFFERS
];
1166 /* Image data for image_load_store on pre-SKL */
1167 struct brw_image_param images
[MAX_IMAGES
];
1170 struct anv_dynamic_state
{
1173 VkViewport viewports
[MAX_VIEWPORTS
];
1178 VkRect2D scissors
[MAX_SCISSORS
];
1189 float blend_constants
[4];
1199 } stencil_compare_mask
;
1204 } stencil_write_mask
;
1209 } stencil_reference
;
1212 extern const struct anv_dynamic_state default_dynamic_state
;
1214 void anv_dynamic_state_copy(struct anv_dynamic_state
*dest
,
1215 const struct anv_dynamic_state
*src
,
1216 uint32_t copy_mask
);
1219 * Attachment state when recording a renderpass instance.
1221 * The clear value is valid only if there exists a pending clear.
1223 struct anv_attachment_state
{
1224 VkImageAspectFlags pending_clear_aspects
;
1225 VkClearValue clear_value
;
1228 /** State required while building cmd buffer */
1229 struct anv_cmd_state
{
1230 /* PIPELINE_SELECT.PipelineSelection */
1231 uint32_t current_pipeline
;
1232 const struct anv_l3_config
* current_l3_config
;
1234 anv_cmd_dirty_mask_t dirty
;
1235 anv_cmd_dirty_mask_t compute_dirty
;
1236 enum anv_pipe_bits pending_pipe_bits
;
1237 uint32_t num_workgroups_offset
;
1238 struct anv_bo
*num_workgroups_bo
;
1239 VkShaderStageFlags descriptors_dirty
;
1240 VkShaderStageFlags push_constants_dirty
;
1241 uint32_t scratch_size
;
1242 struct anv_pipeline
* pipeline
;
1243 struct anv_pipeline
* compute_pipeline
;
1244 struct anv_framebuffer
* framebuffer
;
1245 struct anv_render_pass
* pass
;
1246 struct anv_subpass
* subpass
;
1247 VkRect2D render_area
;
1248 uint32_t restart_index
;
1249 struct anv_vertex_binding vertex_bindings
[MAX_VBS
];
1250 struct anv_descriptor_set
* descriptors
[MAX_SETS
];
1251 VkShaderStageFlags push_constant_stages
;
1252 struct anv_push_constants
* push_constants
[MESA_SHADER_STAGES
];
1253 struct anv_state binding_tables
[MESA_SHADER_STAGES
];
1254 struct anv_state samplers
[MESA_SHADER_STAGES
];
1255 struct anv_dynamic_state dynamic
;
1259 * Array length is anv_cmd_state::pass::attachment_count. Array content is
1260 * valid only when recording a render pass instance.
1262 struct anv_attachment_state
* attachments
;
1265 struct anv_buffer
* index_buffer
;
1266 uint32_t index_type
; /**< 3DSTATE_INDEX_BUFFER.IndexFormat */
1267 uint32_t index_offset
;
1271 struct anv_cmd_pool
{
1272 VkAllocationCallbacks alloc
;
1273 struct list_head cmd_buffers
;
1276 #define ANV_CMD_BUFFER_BATCH_SIZE 8192
1278 enum anv_cmd_buffer_exec_mode
{
1279 ANV_CMD_BUFFER_EXEC_MODE_PRIMARY
,
1280 ANV_CMD_BUFFER_EXEC_MODE_EMIT
,
1281 ANV_CMD_BUFFER_EXEC_MODE_GROW_AND_EMIT
,
1282 ANV_CMD_BUFFER_EXEC_MODE_CHAIN
,
1283 ANV_CMD_BUFFER_EXEC_MODE_COPY_AND_CHAIN
,
1286 struct anv_cmd_buffer
{
1287 VK_LOADER_DATA _loader_data
;
1289 struct anv_device
* device
;
1291 struct anv_cmd_pool
* pool
;
1292 struct list_head pool_link
;
1294 struct anv_batch batch
;
1296 /* Fields required for the actual chain of anv_batch_bo's.
1298 * These fields are initialized by anv_cmd_buffer_init_batch_bo_chain().
1300 struct list_head batch_bos
;
1301 enum anv_cmd_buffer_exec_mode exec_mode
;
1303 /* A vector of anv_batch_bo pointers for every batch or surface buffer
1304 * referenced by this command buffer
1306 * initialized by anv_cmd_buffer_init_batch_bo_chain()
1308 struct anv_vector seen_bbos
;
1310 /* A vector of int32_t's for every block of binding tables.
1312 * initialized by anv_cmd_buffer_init_batch_bo_chain()
1314 struct anv_vector bt_blocks
;
1316 struct anv_reloc_list surface_relocs
;
1318 /* Information needed for execbuf
1320 * These fields are generated by anv_cmd_buffer_prepare_execbuf().
1323 struct drm_i915_gem_execbuffer2 execbuf
;
1325 struct drm_i915_gem_exec_object2
* objects
;
1327 struct anv_bo
** bos
;
1329 /* Allocated length of the 'objects' and 'bos' arrays */
1330 uint32_t array_length
;
1335 /* Serial for tracking buffer completion */
1338 /* Stream objects for storing temporary data */
1339 struct anv_state_stream surface_state_stream
;
1340 struct anv_state_stream dynamic_state_stream
;
1342 VkCommandBufferUsageFlags usage_flags
;
1343 VkCommandBufferLevel level
;
1345 struct anv_cmd_state state
;
1348 VkResult
anv_cmd_buffer_init_batch_bo_chain(struct anv_cmd_buffer
*cmd_buffer
);
1349 void anv_cmd_buffer_fini_batch_bo_chain(struct anv_cmd_buffer
*cmd_buffer
);
1350 void anv_cmd_buffer_reset_batch_bo_chain(struct anv_cmd_buffer
*cmd_buffer
);
1351 void anv_cmd_buffer_end_batch_buffer(struct anv_cmd_buffer
*cmd_buffer
);
1352 void anv_cmd_buffer_add_secondary(struct anv_cmd_buffer
*primary
,
1353 struct anv_cmd_buffer
*secondary
);
1354 void anv_cmd_buffer_prepare_execbuf(struct anv_cmd_buffer
*cmd_buffer
);
1356 VkResult
anv_cmd_buffer_emit_binding_table(struct anv_cmd_buffer
*cmd_buffer
,
1357 unsigned stage
, struct anv_state
*bt_state
);
1358 VkResult
anv_cmd_buffer_emit_samplers(struct anv_cmd_buffer
*cmd_buffer
,
1359 unsigned stage
, struct anv_state
*state
);
1360 uint32_t anv_cmd_buffer_flush_descriptor_sets(struct anv_cmd_buffer
*cmd_buffer
);
1362 struct anv_state
anv_cmd_buffer_emit_dynamic(struct anv_cmd_buffer
*cmd_buffer
,
1363 const void *data
, uint32_t size
, uint32_t alignment
);
1364 struct anv_state
anv_cmd_buffer_merge_dynamic(struct anv_cmd_buffer
*cmd_buffer
,
1365 uint32_t *a
, uint32_t *b
,
1366 uint32_t dwords
, uint32_t alignment
);
1369 anv_cmd_buffer_surface_base_address(struct anv_cmd_buffer
*cmd_buffer
);
1371 anv_cmd_buffer_alloc_binding_table(struct anv_cmd_buffer
*cmd_buffer
,
1372 uint32_t entries
, uint32_t *state_offset
);
1374 anv_cmd_buffer_alloc_surface_state(struct anv_cmd_buffer
*cmd_buffer
);
1376 anv_cmd_buffer_alloc_dynamic_state(struct anv_cmd_buffer
*cmd_buffer
,
1377 uint32_t size
, uint32_t alignment
);
1380 anv_cmd_buffer_new_binding_table_block(struct anv_cmd_buffer
*cmd_buffer
);
1382 void gen8_cmd_buffer_emit_viewport(struct anv_cmd_buffer
*cmd_buffer
);
1383 void gen8_cmd_buffer_emit_depth_viewport(struct anv_cmd_buffer
*cmd_buffer
,
1384 bool depth_clamp_enable
);
1385 void gen7_cmd_buffer_emit_scissor(struct anv_cmd_buffer
*cmd_buffer
);
1387 void anv_cmd_buffer_emit_state_base_address(struct anv_cmd_buffer
*cmd_buffer
);
1389 void anv_cmd_state_setup_attachments(struct anv_cmd_buffer
*cmd_buffer
,
1390 const VkRenderPassBeginInfo
*info
);
1392 void anv_cmd_buffer_set_subpass(struct anv_cmd_buffer
*cmd_buffer
,
1393 struct anv_subpass
*subpass
);
1396 anv_cmd_buffer_push_constants(struct anv_cmd_buffer
*cmd_buffer
,
1397 gl_shader_stage stage
);
1399 anv_cmd_buffer_cs_push_constants(struct anv_cmd_buffer
*cmd_buffer
);
1401 void anv_cmd_buffer_clear_subpass(struct anv_cmd_buffer
*cmd_buffer
);
1402 void anv_cmd_buffer_resolve_subpass(struct anv_cmd_buffer
*cmd_buffer
);
1404 const struct anv_image_view
*
1405 anv_cmd_buffer_get_depth_stencil_view(const struct anv_cmd_buffer
*cmd_buffer
);
1407 void anv_cmd_buffer_dump(struct anv_cmd_buffer
*cmd_buffer
);
1411 struct drm_i915_gem_execbuffer2 execbuf
;
1412 struct drm_i915_gem_exec_object2 exec2_objects
[1];
1418 struct anv_state state
;
1423 struct anv_shader_module
{
1424 struct nir_shader
* nir
;
1426 unsigned char sha1
[20];
1431 void anv_hash_shader(unsigned char *hash
, const void *key
, size_t key_size
,
1432 struct anv_shader_module
*module
,
1433 const char *entrypoint
,
1434 const struct anv_pipeline_layout
*pipeline_layout
,
1435 const VkSpecializationInfo
*spec_info
);
1437 static inline gl_shader_stage
1438 vk_to_mesa_shader_stage(VkShaderStageFlagBits vk_stage
)
1440 assert(__builtin_popcount(vk_stage
) == 1);
1441 return ffs(vk_stage
) - 1;
1444 static inline VkShaderStageFlagBits
1445 mesa_to_vk_shader_stage(gl_shader_stage mesa_stage
)
1447 return (1 << mesa_stage
);
1450 #define ANV_STAGE_MASK ((1 << MESA_SHADER_STAGES) - 1)
1452 #define anv_foreach_stage(stage, stage_bits) \
1453 for (gl_shader_stage stage, \
1454 __tmp = (gl_shader_stage)((stage_bits) & ANV_STAGE_MASK); \
1455 stage = __builtin_ffs(__tmp) - 1, __tmp; \
1456 __tmp &= ~(1 << (stage)))
1458 struct anv_pipeline_bind_map
{
1459 uint32_t surface_count
;
1460 uint32_t sampler_count
;
1461 uint32_t image_count
;
1463 struct anv_pipeline_binding
* surface_to_descriptor
;
1464 struct anv_pipeline_binding
* sampler_to_descriptor
;
1467 struct anv_shader_bin
{
1470 struct anv_state kernel
;
1471 uint32_t kernel_size
;
1473 struct anv_pipeline_bind_map bind_map
;
1475 uint32_t prog_data_size
;
1477 /* Prog data follows, then the key, both aligned to 8-bytes */
1480 struct anv_shader_bin
*
1481 anv_shader_bin_create(struct anv_device
*device
,
1482 const void *key
, uint32_t key_size
,
1483 const void *kernel
, uint32_t kernel_size
,
1484 const void *prog_data
, uint32_t prog_data_size
,
1485 const struct anv_pipeline_bind_map
*bind_map
);
1488 anv_shader_bin_destroy(struct anv_device
*device
, struct anv_shader_bin
*shader
);
1491 anv_shader_bin_ref(struct anv_shader_bin
*shader
)
1493 assert(shader
->ref_cnt
>= 1);
1494 __sync_fetch_and_add(&shader
->ref_cnt
, 1);
1498 anv_shader_bin_unref(struct anv_device
*device
, struct anv_shader_bin
*shader
)
1500 assert(shader
->ref_cnt
>= 1);
1501 if (__sync_fetch_and_add(&shader
->ref_cnt
, -1) == 1)
1502 anv_shader_bin_destroy(device
, shader
);
1505 static inline const struct brw_stage_prog_data
*
1506 anv_shader_bin_get_prog_data(const struct anv_shader_bin
*shader
)
1508 const void *data
= shader
;
1509 data
+= align_u32(sizeof(struct anv_shader_bin
), 8);
1513 struct anv_pipeline
{
1514 struct anv_device
* device
;
1515 struct anv_batch batch
;
1516 uint32_t batch_data
[512];
1517 struct anv_reloc_list batch_relocs
;
1518 uint32_t dynamic_state_mask
;
1519 struct anv_dynamic_state dynamic_state
;
1521 struct anv_pipeline_layout
* layout
;
1522 struct anv_pipeline_bind_map bindings
[MESA_SHADER_STAGES
];
1525 bool needs_data_cache
;
1527 const struct brw_stage_prog_data
* prog_data
[MESA_SHADER_STAGES
];
1529 uint32_t start
[MESA_SHADER_GEOMETRY
+ 1];
1530 uint32_t size
[MESA_SHADER_GEOMETRY
+ 1];
1531 uint32_t entries
[MESA_SHADER_GEOMETRY
+ 1];
1532 const struct anv_l3_config
* l3_config
;
1533 uint32_t total_size
;
1536 VkShaderStageFlags active_stages
;
1537 struct anv_state blend_state
;
1545 uint32_t binding_stride
[MAX_VBS
];
1546 bool instancing_enable
[MAX_VBS
];
1547 bool primitive_restart
;
1550 uint32_t cs_right_mask
;
1552 bool depth_clamp_enable
;
1556 uint32_t depth_stencil_state
[3];
1562 uint32_t wm_depth_stencil
[3];
1566 uint32_t wm_depth_stencil
[4];
1571 anv_pipeline_has_stage(const struct anv_pipeline
*pipeline
,
1572 gl_shader_stage stage
)
1574 return (pipeline
->active_stages
& mesa_to_vk_shader_stage(stage
)) != 0;
1577 static inline const struct brw_vs_prog_data
*
1578 get_vs_prog_data(struct anv_pipeline
*pipeline
)
1580 return (const struct brw_vs_prog_data
*) pipeline
->prog_data
[MESA_SHADER_VERTEX
];
1583 static inline const struct brw_gs_prog_data
*
1584 get_gs_prog_data(struct anv_pipeline
*pipeline
)
1586 return (const struct brw_gs_prog_data
*) pipeline
->prog_data
[MESA_SHADER_GEOMETRY
];
1589 static inline const struct brw_wm_prog_data
*
1590 get_wm_prog_data(struct anv_pipeline
*pipeline
)
1592 return (const struct brw_wm_prog_data
*) pipeline
->prog_data
[MESA_SHADER_FRAGMENT
];
1595 static inline const struct brw_cs_prog_data
*
1596 get_cs_prog_data(struct anv_pipeline
*pipeline
)
1598 return (const struct brw_cs_prog_data
*) pipeline
->prog_data
[MESA_SHADER_COMPUTE
];
1601 struct anv_graphics_pipeline_create_info
{
1603 * If non-negative, overrides the color attachment count of the pipeline's
1606 int8_t color_attachment_count
;
1614 anv_pipeline_init(struct anv_pipeline
*pipeline
, struct anv_device
*device
,
1615 struct anv_pipeline_cache
*cache
,
1616 const VkGraphicsPipelineCreateInfo
*pCreateInfo
,
1617 const struct anv_graphics_pipeline_create_info
*extra
,
1618 const VkAllocationCallbacks
*alloc
);
1621 anv_pipeline_compile_cs(struct anv_pipeline
*pipeline
,
1622 struct anv_pipeline_cache
*cache
,
1623 const VkComputePipelineCreateInfo
*info
,
1624 struct anv_shader_module
*module
,
1625 const char *entrypoint
,
1626 const VkSpecializationInfo
*spec_info
);
1629 anv_graphics_pipeline_create(VkDevice device
,
1630 VkPipelineCache cache
,
1631 const VkGraphicsPipelineCreateInfo
*pCreateInfo
,
1632 const struct anv_graphics_pipeline_create_info
*extra
,
1633 const VkAllocationCallbacks
*alloc
,
1634 VkPipeline
*pPipeline
);
1636 struct anv_format_swizzle
{
1637 enum isl_channel_select r
:4;
1638 enum isl_channel_select g
:4;
1639 enum isl_channel_select b
:4;
1640 enum isl_channel_select a
:4;
1644 enum isl_format isl_format
:16;
1645 struct anv_format_swizzle swizzle
;
1649 anv_get_format(const struct brw_device_info
*devinfo
, VkFormat format
,
1650 VkImageAspectFlags aspect
, VkImageTiling tiling
);
1652 static inline enum isl_format
1653 anv_get_isl_format(const struct brw_device_info
*devinfo
, VkFormat vk_format
,
1654 VkImageAspectFlags aspect
, VkImageTiling tiling
)
1656 return anv_get_format(devinfo
, vk_format
, aspect
, tiling
).isl_format
;
1660 anv_compute_urb_partition(struct anv_pipeline
*pipeline
);
1663 anv_setup_pipeline_l3_config(struct anv_pipeline
*pipeline
);
1666 * Subsurface of an anv_image.
1668 struct anv_surface
{
1669 struct isl_surf isl
;
1672 * Offset from VkImage's base address, as bound by vkBindImageMemory().
1679 /* The original VkFormat provided by the client. This may not match any
1680 * of the actual surface formats.
1683 VkImageAspectFlags aspects
;
1686 uint32_t array_size
;
1687 uint32_t samples
; /**< VkImageCreateInfo::samples */
1688 VkImageUsageFlags usage
; /**< Superset of VkImageCreateInfo::usage. */
1689 VkImageTiling tiling
; /** VkImageCreateInfo::tiling */
1694 /* Set when bound */
1696 VkDeviceSize offset
;
1701 * For each foo, anv_image::foo_surface is valid if and only if
1702 * anv_image::aspects has a foo aspect.
1704 * The hardware requires that the depth buffer and stencil buffer be
1705 * separate surfaces. From Vulkan's perspective, though, depth and stencil
1706 * reside in the same VkImage. To satisfy both the hardware and Vulkan, we
1707 * allocate the depth and stencil buffers as separate surfaces in the same
1711 struct anv_surface color_surface
;
1714 struct anv_surface depth_surface
;
1715 struct anv_surface stencil_surface
;
1720 static inline uint32_t
1721 anv_get_layerCount(const struct anv_image
*image
,
1722 const VkImageSubresourceRange
*range
)
1724 return range
->layerCount
== VK_REMAINING_ARRAY_LAYERS
?
1725 image
->array_size
- range
->baseArrayLayer
: range
->layerCount
;
1728 static inline uint32_t
1729 anv_get_levelCount(const struct anv_image
*image
,
1730 const VkImageSubresourceRange
*range
)
1732 return range
->levelCount
== VK_REMAINING_MIP_LEVELS
?
1733 image
->levels
- range
->baseMipLevel
: range
->levelCount
;
1737 struct anv_image_view
{
1738 const struct anv_image
*image
; /**< VkImageViewCreateInfo::image */
1740 uint32_t offset
; /**< Offset into bo. */
1742 VkImageAspectFlags aspect_mask
;
1744 uint32_t base_layer
;
1746 VkExtent3D extent
; /**< Extent of VkImageViewCreateInfo::baseMipLevel. */
1748 /** RENDER_SURFACE_STATE when using image as a color render target. */
1749 struct anv_state color_rt_surface_state
;
1751 /** RENDER_SURFACE_STATE when using image as a sampler surface. */
1752 struct anv_state sampler_surface_state
;
1754 /** RENDER_SURFACE_STATE when using image as a storage image. */
1755 struct anv_state storage_surface_state
;
1757 struct brw_image_param storage_image_param
;
1760 struct anv_image_create_info
{
1761 const VkImageCreateInfo
*vk_info
;
1763 /** An opt-in bitmask which filters an ISL-mapping of the Vulkan tiling. */
1764 isl_tiling_flags_t isl_tiling_flags
;
1769 VkResult
anv_image_create(VkDevice _device
,
1770 const struct anv_image_create_info
*info
,
1771 const VkAllocationCallbacks
* alloc
,
1774 struct anv_surface
*
1775 anv_image_get_surface_for_aspect_mask(struct anv_image
*image
,
1776 VkImageAspectFlags aspect_mask
);
1778 void anv_image_view_init(struct anv_image_view
*view
,
1779 struct anv_device
*device
,
1780 const VkImageViewCreateInfo
* pCreateInfo
,
1781 struct anv_cmd_buffer
*cmd_buffer
,
1782 VkImageUsageFlags usage_mask
);
1784 struct anv_buffer_view
{
1785 enum isl_format format
; /**< VkBufferViewCreateInfo::format */
1787 uint32_t offset
; /**< Offset into bo. */
1788 uint64_t range
; /**< VkBufferViewCreateInfo::range */
1790 struct anv_state surface_state
;
1791 struct anv_state storage_surface_state
;
1793 struct brw_image_param storage_image_param
;
1796 void anv_buffer_view_init(struct anv_buffer_view
*view
,
1797 struct anv_device
*device
,
1798 const VkBufferViewCreateInfo
* pCreateInfo
,
1799 struct anv_cmd_buffer
*cmd_buffer
);
1802 anv_isl_format_for_descriptor_type(VkDescriptorType type
);
1804 static inline struct VkExtent3D
1805 anv_sanitize_image_extent(const VkImageType imageType
,
1806 const struct VkExtent3D imageExtent
)
1808 switch (imageType
) {
1809 case VK_IMAGE_TYPE_1D
:
1810 return (VkExtent3D
) { imageExtent
.width
, 1, 1 };
1811 case VK_IMAGE_TYPE_2D
:
1812 return (VkExtent3D
) { imageExtent
.width
, imageExtent
.height
, 1 };
1813 case VK_IMAGE_TYPE_3D
:
1816 unreachable("invalid image type");
1820 static inline struct VkOffset3D
1821 anv_sanitize_image_offset(const VkImageType imageType
,
1822 const struct VkOffset3D imageOffset
)
1824 switch (imageType
) {
1825 case VK_IMAGE_TYPE_1D
:
1826 return (VkOffset3D
) { imageOffset
.x
, 0, 0 };
1827 case VK_IMAGE_TYPE_2D
:
1828 return (VkOffset3D
) { imageOffset
.x
, imageOffset
.y
, 0 };
1829 case VK_IMAGE_TYPE_3D
:
1832 unreachable("invalid image type");
1837 void anv_fill_buffer_surface_state(struct anv_device
*device
,
1838 struct anv_state state
,
1839 enum isl_format format
,
1840 uint32_t offset
, uint32_t range
,
1843 void anv_image_view_fill_image_param(struct anv_device
*device
,
1844 struct anv_image_view
*view
,
1845 struct brw_image_param
*param
);
1846 void anv_buffer_view_fill_image_param(struct anv_device
*device
,
1847 struct anv_buffer_view
*view
,
1848 struct brw_image_param
*param
);
1850 struct anv_sampler
{
1854 struct anv_framebuffer
{
1859 uint32_t attachment_count
;
1860 struct anv_image_view
* attachments
[0];
1863 struct anv_subpass
{
1864 uint32_t input_count
;
1865 uint32_t * input_attachments
;
1866 uint32_t color_count
;
1867 uint32_t * color_attachments
;
1868 uint32_t * resolve_attachments
;
1869 uint32_t depth_stencil_attachment
;
1871 /** Subpass has at least one resolve attachment */
1875 struct anv_render_pass_attachment
{
1878 VkAttachmentLoadOp load_op
;
1879 VkAttachmentStoreOp store_op
;
1880 VkAttachmentLoadOp stencil_load_op
;
1883 struct anv_render_pass
{
1884 uint32_t attachment_count
;
1885 uint32_t subpass_count
;
1886 uint32_t * subpass_attachments
;
1887 struct anv_render_pass_attachment
* attachments
;
1888 struct anv_subpass subpasses
[0];
1891 extern struct anv_render_pass anv_meta_dummy_renderpass
;
1893 struct anv_query_pool_slot
{
1899 struct anv_query_pool
{
1905 VkResult
anv_device_init_meta(struct anv_device
*device
);
1906 void anv_device_finish_meta(struct anv_device
*device
);
1908 void *anv_lookup_entrypoint(const char *name
);
1910 void anv_dump_image_to_ppm(struct anv_device
*device
,
1911 struct anv_image
*image
, unsigned miplevel
,
1912 unsigned array_layer
, VkImageAspectFlagBits aspect
,
1913 const char *filename
);
1915 enum anv_dump_action
{
1916 ANV_DUMP_FRAMEBUFFERS_BIT
= 0x1,
1919 void anv_dump_start(struct anv_device
*device
, enum anv_dump_action actions
);
1920 void anv_dump_finish(void);
1922 void anv_dump_add_framebuffer(struct anv_cmd_buffer
*cmd_buffer
,
1923 struct anv_framebuffer
*fb
);
1925 #define ANV_DEFINE_HANDLE_CASTS(__anv_type, __VkType) \
1927 static inline struct __anv_type * \
1928 __anv_type ## _from_handle(__VkType _handle) \
1930 return (struct __anv_type *) _handle; \
1933 static inline __VkType \
1934 __anv_type ## _to_handle(struct __anv_type *_obj) \
1936 return (__VkType) _obj; \
1939 #define ANV_DEFINE_NONDISP_HANDLE_CASTS(__anv_type, __VkType) \
1941 static inline struct __anv_type * \
1942 __anv_type ## _from_handle(__VkType _handle) \
1944 return (struct __anv_type *)(uintptr_t) _handle; \
1947 static inline __VkType \
1948 __anv_type ## _to_handle(struct __anv_type *_obj) \
1950 return (__VkType)(uintptr_t) _obj; \
1953 #define ANV_FROM_HANDLE(__anv_type, __name, __handle) \
1954 struct __anv_type *__name = __anv_type ## _from_handle(__handle)
1956 ANV_DEFINE_HANDLE_CASTS(anv_cmd_buffer
, VkCommandBuffer
)
1957 ANV_DEFINE_HANDLE_CASTS(anv_device
, VkDevice
)
1958 ANV_DEFINE_HANDLE_CASTS(anv_instance
, VkInstance
)
1959 ANV_DEFINE_HANDLE_CASTS(anv_physical_device
, VkPhysicalDevice
)
1960 ANV_DEFINE_HANDLE_CASTS(anv_queue
, VkQueue
)
1962 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_cmd_pool
, VkCommandPool
)
1963 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_buffer
, VkBuffer
)
1964 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_buffer_view
, VkBufferView
)
1965 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_descriptor_pool
, VkDescriptorPool
)
1966 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_descriptor_set
, VkDescriptorSet
)
1967 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_descriptor_set_layout
, VkDescriptorSetLayout
)
1968 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_device_memory
, VkDeviceMemory
)
1969 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_fence
, VkFence
)
1970 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_event
, VkEvent
)
1971 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_framebuffer
, VkFramebuffer
)
1972 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_image
, VkImage
)
1973 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_image_view
, VkImageView
);
1974 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_pipeline_cache
, VkPipelineCache
)
1975 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_pipeline
, VkPipeline
)
1976 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_pipeline_layout
, VkPipelineLayout
)
1977 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_query_pool
, VkQueryPool
)
1978 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_render_pass
, VkRenderPass
)
1979 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_sampler
, VkSampler
)
1980 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_shader_module
, VkShaderModule
)
1982 #define ANV_DEFINE_STRUCT_CASTS(__anv_type, __VkType) \
1984 static inline const __VkType * \
1985 __anv_type ## _to_ ## __VkType(const struct __anv_type *__anv_obj) \
1987 return (const __VkType *) __anv_obj; \
1990 #define ANV_COMMON_TO_STRUCT(__VkType, __vk_name, __common_name) \
1991 const __VkType *__vk_name = anv_common_to_ ## __VkType(__common_name)
1993 ANV_DEFINE_STRUCT_CASTS(anv_common
, VkMemoryBarrier
)
1994 ANV_DEFINE_STRUCT_CASTS(anv_common
, VkBufferMemoryBarrier
)
1995 ANV_DEFINE_STRUCT_CASTS(anv_common
, VkImageMemoryBarrier
)
1997 /* Gen-specific function declarations */
1999 # include "anv_genX.h"
2001 # define genX(x) gen7_##x
2002 # include "anv_genX.h"
2004 # define genX(x) gen75_##x
2005 # include "anv_genX.h"
2007 # define genX(x) gen8_##x
2008 # include "anv_genX.h"
2010 # define genX(x) gen9_##x
2011 # include "anv_genX.h"