2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
37 #define __gen_validate_value(x) VALGRIND_CHECK_MEM_IS_DEFINED(&(x), sizeof(x))
42 #include "brw_device_info.h"
43 #include "util/macros.h"
46 #include <vulkan/vulkan.h>
47 #include <vulkan/vulkan_intel.h>
48 #include <vulkan/vk_wsi_lunarg.h>
50 #include "entrypoints.h"
52 #include "brw_context.h"
58 #define anv_noreturn __attribute__((__noreturn__))
59 #define anv_printflike(a, b) __attribute__((__format__(__printf__, a, b)))
61 #define MIN(a, b) ((a) < (b) ? (a) : (b))
62 #define MAX(a, b) ((a) > (b) ? (a) : (b))
64 static inline uint32_t
65 align_u32(uint32_t v
, uint32_t a
)
67 return (v
+ a
- 1) & ~(a
- 1);
71 align_i32(int32_t v
, int32_t a
)
73 return (v
+ a
- 1) & ~(a
- 1);
76 /** Alignment must be a power of 2. */
78 anv_is_aligned(uintmax_t n
, uintmax_t a
)
80 assert(a
== (a
& -a
));
81 return (n
& (a
- 1)) == 0;
84 static inline uint32_t
85 anv_minify(uint32_t n
, uint32_t levels
)
90 return MAX(n
>> levels
, 1);
94 anv_clear_mask(uint32_t *inout_mask
, uint32_t clear_mask
)
96 if (*inout_mask
& clear_mask
) {
97 *inout_mask
&= ~clear_mask
;
104 #define for_each_bit(b, dword) \
105 for (uint32_t __dword = (dword); \
106 (b) = __builtin_ffs(__dword) - 1, __dword; \
107 __dword &= ~(1 << (b)))
109 /* Define no kernel as 1, since that's an illegal offset for a kernel */
113 VkStructureType sType
;
117 /* Whenever we generate an error, pass it through this function. Useful for
118 * debugging, where we can break on it. Only call at error site, not when
119 * propagating errors. Might be useful to plug in a stack trace here.
122 static inline VkResult
123 vk_error(VkResult error
)
126 fprintf(stderr
, "vk_error: %x\n", error
);
132 void __anv_finishme(const char *file
, int line
, const char *format
, ...)
133 anv_printflike(3, 4);
134 void anv_loge(const char *format
, ...) anv_printflike(1, 2);
135 void anv_loge_v(const char *format
, va_list va
);
138 * Print a FINISHME message, including its source location.
140 #define anv_finishme(format, ...) \
141 __anv_finishme(__FILE__, __LINE__, format, ##__VA_ARGS__);
143 /* A non-fatal assert. Useful for debugging. */
145 #define anv_assert(x) ({ \
146 if (unlikely(!(x))) \
147 fprintf(stderr, "%s:%d ASSERT: %s\n", __FILE__, __LINE__, #x); \
150 #define anv_assert(x)
153 void anv_abortf(const char *format
, ...) anv_noreturn
anv_printflike(1, 2);
154 void anv_abortfv(const char *format
, va_list va
) anv_noreturn
;
156 #define stub_return(v) \
158 anv_finishme("stub %s", __func__); \
164 anv_finishme("stub %s", __func__); \
169 * A dynamically growable, circular buffer. Elements are added at head and
170 * removed from tail. head and tail are free-running uint32_t indices and we
171 * only compute the modulo with size when accessing the array. This way,
172 * number of bytes in the queue is always head - tail, even in case of
179 uint32_t element_size
;
184 int anv_vector_init(struct anv_vector
*queue
, uint32_t element_size
, uint32_t size
);
185 void *anv_vector_add(struct anv_vector
*queue
);
186 void *anv_vector_remove(struct anv_vector
*queue
);
189 anv_vector_length(struct anv_vector
*queue
)
191 return (queue
->head
- queue
->tail
) / queue
->element_size
;
195 anv_vector_finish(struct anv_vector
*queue
)
200 #define anv_vector_foreach(elem, queue) \
201 static_assert(__builtin_types_compatible_p(__typeof__(queue), struct anv_vector *), ""); \
202 for (uint32_t __anv_vector_offset = (queue)->tail; \
203 elem = (queue)->data + (__anv_vector_offset & ((queue)->size - 1)), __anv_vector_offset < (queue)->head; \
204 __anv_vector_offset += (queue)->element_size)
212 /* This field is here for the benefit of the aub dumper. It can (and for
213 * userptr bos it must) be set to the cpu map of the buffer. Destroying
214 * the bo won't clean up the mmap, it's still the responsibility of the bo
215 * user to do that. */
219 /* Represents a lock-free linked list of "free" things. This is used by
220 * both the block pool and the state pools. Unfortunately, in order to
221 * solve the ABA problem, we can't use a single uint32_t head.
223 union anv_free_list
{
227 /* A simple count that is incremented every time the head changes. */
233 #define ANV_FREE_LIST_EMPTY ((union anv_free_list) { { 1, 0 } })
235 struct anv_block_pool
{
236 struct anv_device
*device
;
244 * Array of mmaps and gem handles owned by the block pool, reclaimed when
245 * the block pool is destroyed.
247 struct anv_vector mmap_cleanups
;
252 union anv_free_list free_list
;
255 struct anv_block_state
{
271 struct anv_fixed_size_state_pool
{
273 union anv_free_list free_list
;
274 struct anv_block_state block
;
277 #define ANV_MIN_STATE_SIZE_LOG2 6
278 #define ANV_MAX_STATE_SIZE_LOG2 10
280 #define ANV_STATE_BUCKETS (ANV_MAX_STATE_SIZE_LOG2 - ANV_MIN_STATE_SIZE_LOG2)
282 struct anv_state_pool
{
283 struct anv_block_pool
*block_pool
;
284 struct anv_fixed_size_state_pool buckets
[ANV_STATE_BUCKETS
];
287 struct anv_state_stream
{
288 struct anv_block_pool
*block_pool
;
290 uint32_t current_block
;
294 void anv_block_pool_init(struct anv_block_pool
*pool
,
295 struct anv_device
*device
, uint32_t block_size
);
296 void anv_block_pool_finish(struct anv_block_pool
*pool
);
297 uint32_t anv_block_pool_alloc(struct anv_block_pool
*pool
);
298 void anv_block_pool_free(struct anv_block_pool
*pool
, uint32_t offset
);
299 void anv_state_pool_init(struct anv_state_pool
*pool
,
300 struct anv_block_pool
*block_pool
);
301 struct anv_state
anv_state_pool_alloc(struct anv_state_pool
*pool
,
302 size_t state_size
, size_t alignment
);
303 void anv_state_pool_free(struct anv_state_pool
*pool
, struct anv_state state
);
304 void anv_state_stream_init(struct anv_state_stream
*stream
,
305 struct anv_block_pool
*block_pool
);
306 void anv_state_stream_finish(struct anv_state_stream
*stream
);
307 struct anv_state
anv_state_stream_alloc(struct anv_state_stream
*stream
,
308 uint32_t size
, uint32_t alignment
);
311 * Implements a pool of re-usable BOs. The interface is identical to that
312 * of block_pool except that each block is its own BO.
315 struct anv_device
*device
;
322 void anv_bo_pool_init(struct anv_bo_pool
*pool
,
323 struct anv_device
*device
, uint32_t block_size
);
324 void anv_bo_pool_finish(struct anv_bo_pool
*pool
);
325 VkResult
anv_bo_pool_alloc(struct anv_bo_pool
*pool
, struct anv_bo
*bo
);
326 void anv_bo_pool_free(struct anv_bo_pool
*pool
, const struct anv_bo
*bo
);
331 typedef void (*anv_object_destructor_cb
)(struct anv_device
*,
336 anv_object_destructor_cb destructor
;
339 struct anv_physical_device
{
340 struct anv_instance
* instance
;
345 const struct brw_device_info
* info
;
349 struct anv_instance
{
350 void * pAllocUserData
;
351 PFN_vkAllocFunction pfnAlloc
;
352 PFN_vkFreeFunction pfnFree
;
354 uint32_t physicalDeviceCount
;
355 struct anv_physical_device physicalDevice
;
358 struct anv_meta_state
{
365 VkPipelineLayout pipeline_layout
;
366 VkDescriptorSetLayout ds_layout
;
370 VkDynamicRasterState rs_state
;
371 VkDynamicColorBlendState cb_state
;
372 VkDynamicDepthStencilState ds_state
;
377 struct anv_device
* device
;
379 struct anv_state_pool
* pool
;
382 * Serial number of the most recently completed batch executed on the
385 struct anv_state completed_serial
;
388 * The next batch submitted to the engine will be assigned this serial
391 uint32_t next_serial
;
393 uint32_t last_collected_serial
;
397 struct anv_instance
* instance
;
399 struct brw_device_info info
;
405 struct anv_bo_pool batch_bo_pool
;
407 struct anv_block_pool dynamic_state_block_pool
;
408 struct anv_state_pool dynamic_state_pool
;
410 struct anv_block_pool instruction_block_pool
;
411 struct anv_block_pool surface_state_block_pool
;
412 struct anv_state_pool surface_state_pool
;
414 struct anv_meta_state meta_state
;
416 struct anv_state border_colors
;
418 struct anv_queue queue
;
420 struct anv_block_pool scratch_block_pool
;
422 struct anv_compiler
* compiler
;
423 struct anv_aub_writer
* aub_writer
;
424 pthread_mutex_t mutex
;
428 anv_device_alloc(struct anv_device
* device
,
431 VkSystemAllocType allocType
);
434 anv_device_free(struct anv_device
* device
,
437 void* anv_gem_mmap(struct anv_device
*device
,
438 uint32_t gem_handle
, uint64_t offset
, uint64_t size
);
439 void anv_gem_munmap(void *p
, uint64_t size
);
440 uint32_t anv_gem_create(struct anv_device
*device
, size_t size
);
441 void anv_gem_close(struct anv_device
*device
, int gem_handle
);
442 int anv_gem_userptr(struct anv_device
*device
, void *mem
, size_t size
);
443 int anv_gem_wait(struct anv_device
*device
, int gem_handle
, int64_t *timeout_ns
);
444 int anv_gem_execbuffer(struct anv_device
*device
,
445 struct drm_i915_gem_execbuffer2
*execbuf
);
446 int anv_gem_set_tiling(struct anv_device
*device
, int gem_handle
,
447 uint32_t stride
, uint32_t tiling
);
448 int anv_gem_create_context(struct anv_device
*device
);
449 int anv_gem_destroy_context(struct anv_device
*device
, int context
);
450 int anv_gem_get_param(int fd
, uint32_t param
);
451 int anv_gem_get_aperture(struct anv_physical_device
*physical_dev
, uint64_t *size
);
452 int anv_gem_handle_to_fd(struct anv_device
*device
, int gem_handle
);
453 int anv_gem_fd_to_handle(struct anv_device
*device
, int fd
);
454 int anv_gem_userptr(struct anv_device
*device
, void *mem
, size_t size
);
456 VkResult
anv_bo_init_new(struct anv_bo
*bo
, struct anv_device
*device
, uint64_t size
);
458 struct anv_reloc_list
{
461 struct drm_i915_gem_relocation_entry
* relocs
;
462 struct anv_bo
** reloc_bos
;
465 VkResult
anv_reloc_list_init(struct anv_reloc_list
*list
,
466 struct anv_device
*device
);
467 void anv_reloc_list_finish(struct anv_reloc_list
*list
,
468 struct anv_device
*device
);
470 struct anv_batch_bo
{
473 /* Bytes actually consumed in this batch BO */
476 /* These offsets reference the per-batch reloc list */
480 struct anv_batch_bo
* prev_batch_bo
;
484 struct anv_device
* device
;
490 struct anv_reloc_list relocs
;
492 /* This callback is called (with the associated user data) in the event
493 * that the batch runs out of space.
495 VkResult (*extend_cb
)(struct anv_batch
*, void *);
499 void *anv_batch_emit_dwords(struct anv_batch
*batch
, int num_dwords
);
500 void anv_batch_emit_batch(struct anv_batch
*batch
, struct anv_batch
*other
);
501 uint64_t anv_batch_emit_reloc(struct anv_batch
*batch
,
502 void *location
, struct anv_bo
*bo
, uint32_t offset
);
509 #define __gen_address_type struct anv_address
510 #define __gen_user_data struct anv_batch
512 static inline uint64_t
513 __gen_combine_address(struct anv_batch
*batch
, void *location
,
514 const struct anv_address address
, uint32_t delta
)
516 if (address
.bo
== NULL
) {
519 assert(batch
->start
<= location
&& location
< batch
->end
);
521 return anv_batch_emit_reloc(batch
, location
, address
.bo
, address
.offset
+ delta
);
525 #include "gen7_pack.h"
526 #include "gen75_pack.h"
527 #undef GEN8_3DSTATE_MULTISAMPLE
528 #include "gen8_pack.h"
530 #define anv_batch_emit(batch, cmd, ...) do { \
531 struct cmd __template = { \
535 void *__dst = anv_batch_emit_dwords(batch, cmd ## _length); \
536 cmd ## _pack(batch, __dst, &__template); \
539 #define anv_batch_emitn(batch, n, cmd, ...) ({ \
540 struct cmd __template = { \
542 .DwordLength = n - cmd ## _length_bias, \
545 void *__dst = anv_batch_emit_dwords(batch, n); \
546 cmd ## _pack(batch, __dst, &__template); \
550 #define anv_batch_emit_merge(batch, dwords0, dwords1) \
554 assert(ARRAY_SIZE(dwords0) == ARRAY_SIZE(dwords1)); \
555 dw = anv_batch_emit_dwords((batch), ARRAY_SIZE(dwords0)); \
556 for (uint32_t i = 0; i < ARRAY_SIZE(dwords0); i++) \
557 dw[i] = (dwords0)[i] | (dwords1)[i]; \
558 VG(VALGRIND_CHECK_MEM_IS_DEFINED(dw, ARRAY_SIZE(dwords0) * 4));\
561 #define GEN8_MOCS { \
562 .MemoryTypeLLCeLLCCacheabilityControl = WB, \
563 .TargetCache = L3DefertoPATforLLCeLLCselection, \
567 struct anv_device_memory
{
569 VkDeviceSize map_size
;
573 struct anv_dynamic_vp_state
{
574 struct anv_object base
;
575 struct anv_state sf_clip_vp
;
576 struct anv_state cc_vp
;
577 struct anv_state scissor
;
580 struct anv_dynamic_rs_state
{
581 uint32_t state_sf
[GEN8_3DSTATE_SF_length
];
582 uint32_t state_raster
[GEN8_3DSTATE_RASTER_length
];
585 struct anv_dynamic_ds_state
{
586 uint32_t state_wm_depth_stencil
[GEN8_3DSTATE_WM_DEPTH_STENCIL_length
];
587 uint32_t state_color_calc
[GEN8_COLOR_CALC_STATE_length
];
590 struct anv_dynamic_cb_state
{
591 uint32_t state_color_calc
[GEN8_COLOR_CALC_STATE_length
];
595 struct anv_descriptor_slot
{
600 struct anv_descriptor_set_layout
{
602 uint32_t surface_count
;
603 struct anv_descriptor_slot
*surface_start
;
604 uint32_t sampler_count
;
605 struct anv_descriptor_slot
*sampler_start
;
606 } stage
[VK_SHADER_STAGE_NUM
];
609 uint32_t num_dynamic_buffers
;
610 uint32_t shader_stages
;
611 struct anv_descriptor_slot entries
[0];
614 struct anv_descriptor
{
615 struct anv_sampler
*sampler
;
616 struct anv_surface_view
*view
;
619 struct anv_descriptor_set
{
620 struct anv_descriptor descriptors
[0];
627 struct anv_pipeline_layout
{
629 struct anv_descriptor_set_layout
*layout
;
630 uint32_t surface_start
[VK_SHADER_STAGE_NUM
];
631 uint32_t sampler_start
[VK_SHADER_STAGE_NUM
];
637 uint32_t surface_count
;
638 uint32_t sampler_count
;
639 } stage
[VK_SHADER_STAGE_NUM
];
643 struct anv_device
* device
;
651 #define ANV_CMD_BUFFER_PIPELINE_DIRTY (1 << 0)
652 #define ANV_CMD_BUFFER_RS_DIRTY (1 << 2)
653 #define ANV_CMD_BUFFER_DS_DIRTY (1 << 3)
654 #define ANV_CMD_BUFFER_CB_DIRTY (1 << 4)
655 #define ANV_CMD_BUFFER_VP_DIRTY (1 << 5)
656 #define ANV_CMD_BUFFER_INDEX_BUFFER_DIRTY (1 << 6)
658 struct anv_vertex_binding
{
659 struct anv_buffer
* buffer
;
663 struct anv_descriptor_set_binding
{
664 struct anv_descriptor_set
* set
;
665 uint32_t dynamic_offsets
[128];
668 struct anv_cmd_buffer
{
669 struct anv_object base
;
670 struct anv_device
* device
;
672 struct drm_i915_gem_execbuffer2 execbuf
;
673 struct drm_i915_gem_exec_object2
* exec2_objects
;
674 struct anv_bo
** exec2_bos
;
675 uint32_t exec2_array_length
;
680 struct anv_batch batch
;
681 struct anv_batch_bo
* last_batch_bo
;
682 struct anv_batch_bo
* surface_batch_bo
;
683 uint32_t surface_next
;
684 struct anv_reloc_list surface_relocs
;
685 struct anv_state_stream surface_state_stream
;
686 struct anv_state_stream dynamic_state_stream
;
688 /* State required while building cmd buffer */
689 uint32_t current_pipeline
;
692 uint32_t compute_dirty
;
693 uint32_t descriptors_dirty
;
694 uint32_t scratch_size
;
695 struct anv_pipeline
* pipeline
;
696 struct anv_pipeline
* compute_pipeline
;
697 struct anv_framebuffer
* framebuffer
;
698 struct anv_render_pass
* pass
;
699 struct anv_subpass
* subpass
;
700 struct anv_dynamic_rs_state
* rs_state
;
701 struct anv_dynamic_ds_state
* ds_state
;
702 struct anv_dynamic_vp_state
* vp_state
;
703 struct anv_dynamic_cb_state
* cb_state
;
704 uint32_t state_vf
[GEN8_3DSTATE_VF_length
];
705 struct anv_vertex_binding vertex_bindings
[MAX_VBS
];
706 struct anv_descriptor_set_binding descriptors
[MAX_SETS
];
709 void anv_cmd_buffer_dump(struct anv_cmd_buffer
*cmd_buffer
);
710 void anv_aub_writer_destroy(struct anv_aub_writer
*writer
);
713 struct anv_object base
;
715 struct drm_i915_gem_execbuffer2 execbuf
;
716 struct drm_i915_gem_exec_object2 exec2_objects
[1];
720 struct anv_shader_module
{
726 struct anv_shader_module
* module
;
730 struct anv_pipeline
{
731 struct anv_object base
;
732 struct anv_device
* device
;
733 struct anv_batch batch
;
734 uint32_t batch_data
[256];
735 struct anv_shader
* shaders
[VK_SHADER_STAGE_NUM
];
736 struct anv_pipeline_layout
* layout
;
739 struct brw_vs_prog_data vs_prog_data
;
740 struct brw_wm_prog_data wm_prog_data
;
741 struct brw_gs_prog_data gs_prog_data
;
742 struct brw_cs_prog_data cs_prog_data
;
743 bool writes_point_size
;
744 struct brw_stage_prog_data
* prog_data
[VK_SHADER_STAGE_NUM
];
745 uint32_t scratch_start
[VK_SHADER_STAGE_NUM
];
746 uint32_t total_scratch
;
750 uint32_t nr_vs_entries
;
753 uint32_t nr_gs_entries
;
756 uint32_t active_stages
;
757 struct anv_state_stream program_stream
;
758 struct anv_state blend_state
;
763 uint32_t gs_vertex_count
;
767 uint32_t binding_stride
[MAX_VBS
];
769 uint32_t state_sf
[GEN8_3DSTATE_SF_length
];
770 uint32_t state_vf
[GEN8_3DSTATE_VF_length
];
771 uint32_t state_raster
[GEN8_3DSTATE_RASTER_length
];
772 uint32_t state_wm_depth_stencil
[GEN8_3DSTATE_WM_DEPTH_STENCIL_length
];
774 uint32_t cs_thread_width_max
;
775 uint32_t cs_right_mask
;
778 struct anv_pipeline_create_info
{
780 bool disable_viewport
;
781 bool disable_scissor
;
787 anv_pipeline_create(VkDevice device
,
788 const VkGraphicsPipelineCreateInfo
*pCreateInfo
,
789 const struct anv_pipeline_create_info
*extra
,
790 VkPipeline
*pPipeline
);
792 struct anv_compiler
*anv_compiler_create(struct anv_device
*device
);
793 void anv_compiler_destroy(struct anv_compiler
*compiler
);
794 int anv_compiler_run(struct anv_compiler
*compiler
, struct anv_pipeline
*pipeline
);
795 void anv_compiler_free(struct anv_pipeline
*pipeline
);
799 uint16_t surface_format
; /**< RENDER_SURFACE_STATE.SurfaceFormat */
800 uint8_t cpp
; /**< Bytes-per-pixel of anv_format::surface_format. */
801 uint8_t num_channels
;
802 uint16_t depth_format
; /**< 3DSTATE_DEPTH_BUFFER.SurfaceFormat */
806 const struct anv_format
*
807 anv_format_for_vk_format(VkFormat format
);
808 bool anv_is_vk_format_depth_or_stencil(VkFormat format
);
811 * A proxy for the color surfaces, depth surfaces, and stencil surfaces.
815 * Offset from VkImage's base address, as bound by vkBindImageMemory().
819 uint32_t stride
; /**< RENDER_SURFACE_STATE.SurfacePitch */
820 uint16_t qpitch
; /**< RENDER_SURFACE_STATE.QPitch */
823 * \name Alignment of miptree images, in units of pixels.
825 * These fields contain the real alignment values, not the values to be
826 * given to the GPU. For example, if h_align is 4, then program the GPU
830 uint8_t h_align
; /**< RENDER_SURFACE_STATE.SurfaceHorizontalAlignment */
831 uint8_t v_align
; /**< RENDER_SURFACE_STATE.SurfaceVerticalAlignment */
834 uint8_t tile_mode
; /**< RENDER_SURFACE_STATE.TileMode */
851 struct anv_swap_chain
*swap_chain
;
853 /** RENDER_SURFACE_STATE.SurfaceType */
856 /** Primary surface is either color or depth. */
857 struct anv_surface primary_surface
;
859 /** Stencil surface is optional. */
860 struct anv_surface stencil_surface
;
863 struct anv_surface_view
{
864 struct anv_state surface_state
;
872 enum anv_attachment_view_type
{
873 ANV_ATTACHMENT_VIEW_TYPE_COLOR
,
874 ANV_ATTACHMENT_VIEW_TYPE_DEPTH_STENCIL
,
877 struct anv_attachment_view
{
878 enum anv_attachment_view_type attachment_type
;
881 struct anv_color_attachment_view
{
882 struct anv_attachment_view base
;
884 struct anv_surface_view view
;
887 struct anv_depth_stencil_view
{
888 struct anv_attachment_view base
;
892 uint32_t depth_offset
; /**< Offset into bo. */
893 uint32_t depth_stride
; /**< 3DSTATE_DEPTH_BUFFER.SurfacePitch */
894 uint32_t depth_format
; /**< 3DSTATE_DEPTH_BUFFER.SurfaceFormat */
895 uint16_t depth_qpitch
; /**< 3DSTATE_DEPTH_BUFFER.SurfaceQPitch */
897 uint32_t stencil_offset
; /**< Offset into bo. */
898 uint32_t stencil_stride
; /**< 3DSTATE_STENCIL_BUFFER.SurfacePitch */
899 uint16_t stencil_qpitch
; /**< 3DSTATE_STENCIL_BUFFER.SurfaceQPitch */
902 struct anv_image_create_info
{
903 const VkImageCreateInfo
*vk_info
;
904 bool force_tile_mode
;
908 VkResult
anv_image_create(VkDevice _device
,
909 const struct anv_image_create_info
*info
,
912 void anv_image_view_init(struct anv_surface_view
*view
,
913 struct anv_device
*device
,
914 const VkImageViewCreateInfo
* pCreateInfo
,
915 struct anv_cmd_buffer
*cmd_buffer
);
917 void anv_color_attachment_view_init(struct anv_color_attachment_view
*view
,
918 struct anv_device
*device
,
919 const VkAttachmentViewCreateInfo
* pCreateInfo
,
920 struct anv_cmd_buffer
*cmd_buffer
);
922 void anv_surface_view_fini(struct anv_device
*device
,
923 struct anv_surface_view
*view
);
929 struct anv_framebuffer
{
930 struct anv_object base
;
936 /* Viewport for clears */
937 VkDynamicViewportState vp_state
;
939 uint32_t attachment_count
;
940 const struct anv_attachment_view
* attachments
[0];
944 uint32_t input_count
;
945 uint32_t * input_attachments
;
946 uint32_t color_count
;
947 uint32_t * color_attachments
;
948 uint32_t * resolve_attachments
;
949 uint32_t depth_stencil_attachment
;
952 struct anv_render_pass_attachment
{
955 VkAttachmentLoadOp load_op
;
956 VkAttachmentLoadOp stencil_load_op
;
959 struct anv_render_pass
{
960 uint32_t attachment_count
;
961 struct anv_render_pass_attachment
* attachments
;
963 struct anv_subpass subpasses
[0];
966 void anv_device_init_meta(struct anv_device
*device
);
967 void anv_device_finish_meta(struct anv_device
*device
);
970 anv_cmd_buffer_begin_subpass(struct anv_cmd_buffer
*cmd_buffer
,
971 struct anv_subpass
*subpass
);
974 anv_cmd_buffer_clear_attachments(struct anv_cmd_buffer
*cmd_buffer
,
975 struct anv_render_pass
*pass
,
976 const VkClearValue
*clear_values
);
979 anv_lookup_entrypoint(const char *name
);
981 #define ANV_DEFINE_CASTS(__anv_type, __VkType) \
982 static inline struct __anv_type * \
983 __anv_type ## _from_handle(__VkType _handle) \
985 return (struct __anv_type *) _handle; \
988 static inline __VkType \
989 __anv_type ## _to_handle(struct __anv_type *_obj) \
991 return (__VkType) _obj; \
994 ANV_DEFINE_CASTS(anv_physical_device
, VkPhysicalDevice
)
995 ANV_DEFINE_CASTS(anv_instance
, VkInstance
)
996 ANV_DEFINE_CASTS(anv_queue
, VkQueue
)
997 ANV_DEFINE_CASTS(anv_device
, VkDevice
)
998 ANV_DEFINE_CASTS(anv_device_memory
, VkDeviceMemory
)
999 ANV_DEFINE_CASTS(anv_dynamic_vp_state
, VkDynamicViewportState
)
1000 ANV_DEFINE_CASTS(anv_dynamic_rs_state
, VkDynamicRasterState
)
1001 ANV_DEFINE_CASTS(anv_dynamic_ds_state
, VkDynamicDepthStencilState
)
1002 ANV_DEFINE_CASTS(anv_dynamic_cb_state
, VkDynamicColorBlendState
)
1003 ANV_DEFINE_CASTS(anv_descriptor_set_layout
, VkDescriptorSetLayout
)
1004 ANV_DEFINE_CASTS(anv_descriptor_set
, VkDescriptorSet
)
1005 ANV_DEFINE_CASTS(anv_pipeline_layout
, VkPipelineLayout
)
1006 ANV_DEFINE_CASTS(anv_buffer
, VkBuffer
)
1007 ANV_DEFINE_CASTS(anv_cmd_buffer
, VkCmdBuffer
)
1008 ANV_DEFINE_CASTS(anv_fence
, VkFence
)
1009 ANV_DEFINE_CASTS(anv_shader_module
, VkShaderModule
)
1010 ANV_DEFINE_CASTS(anv_shader
, VkShader
)
1011 ANV_DEFINE_CASTS(anv_pipeline
, VkPipeline
)
1012 ANV_DEFINE_CASTS(anv_image
, VkImage
)
1013 ANV_DEFINE_CASTS(anv_sampler
, VkSampler
)
1014 ANV_DEFINE_CASTS(anv_attachment_view
, VkAttachmentView
)
1015 ANV_DEFINE_CASTS(anv_framebuffer
, VkFramebuffer
)
1016 ANV_DEFINE_CASTS(anv_render_pass
, VkRenderPass
)
1017 ANV_DEFINE_CASTS(anv_query_pool
, VkQueryPool
)
1019 #define ANV_FROM_HANDLE(__anv_type, __name, __handle) \
1020 struct __anv_type *__name = __anv_type ## _from_handle(__handle)