2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
28 #ifndef RADV_PRIVATE_H
29 #define RADV_PRIVATE_H
43 #define VG(x) ((void)0)
46 #include "c11/threads.h"
48 #include "compiler/shader_enums.h"
49 #include "util/macros.h"
50 #include "util/list.h"
51 #include "util/xmlconfig.h"
53 #include "vk_debug_report.h"
54 #include "vk_object.h"
56 #include "radv_radeon_winsys.h"
57 #include "ac_binary.h"
58 #include "ac_nir_to_llvm.h"
59 #include "ac_gpu_info.h"
60 #include "ac_surface.h"
61 #include "ac_llvm_build.h"
62 #include "ac_llvm_util.h"
63 #include "radv_constants.h"
64 #include "radv_descriptor_set.h"
65 #include "radv_extensions.h"
68 /* Pre-declarations needed for WSI entrypoints */
71 typedef struct xcb_connection_t xcb_connection_t
;
72 typedef uint32_t xcb_visualid_t
;
73 typedef uint32_t xcb_window_t
;
75 #include <vulkan/vulkan.h>
76 #include <vulkan/vulkan_intel.h>
77 #include <vulkan/vulkan_android.h>
78 #include <vulkan/vk_icd.h>
79 #include <vulkan/vk_android_native_buffer.h>
81 #include "radv_entrypoints.h"
83 #include "wsi_common.h"
84 #include "wsi_common_display.h"
86 /* Helper to determine if we should compile
87 * any of the Android AHB support.
89 * To actually enable the ext we also need
90 * the necessary kernel support.
92 #if defined(ANDROID) && ANDROID_API_LEVEL >= 26
93 #define RADV_SUPPORT_ANDROID_HARDWARE_BUFFER 1
95 #define RADV_SUPPORT_ANDROID_HARDWARE_BUFFER 0
98 #define radv_printflike(a, b) __attribute__((__format__(__printf__, a, b)))
100 static inline uint32_t
101 align_u32(uint32_t v
, uint32_t a
)
103 assert(a
!= 0 && a
== (a
& -a
));
104 return (v
+ a
- 1) & ~(a
- 1);
107 static inline uint32_t
108 align_u32_npot(uint32_t v
, uint32_t a
)
110 return (v
+ a
- 1) / a
* a
;
113 static inline uint64_t
114 align_u64(uint64_t v
, uint64_t a
)
116 assert(a
!= 0 && a
== (a
& -a
));
117 return (v
+ a
- 1) & ~(a
- 1);
120 static inline int32_t
121 align_i32(int32_t v
, int32_t a
)
123 assert(a
!= 0 && a
== (a
& -a
));
124 return (v
+ a
- 1) & ~(a
- 1);
127 /** Alignment must be a power of 2. */
129 radv_is_aligned(uintmax_t n
, uintmax_t a
)
131 assert(a
== (a
& -a
));
132 return (n
& (a
- 1)) == 0;
135 static inline uint32_t
136 round_up_u32(uint32_t v
, uint32_t a
)
138 return (v
+ a
- 1) / a
;
141 static inline uint64_t
142 round_up_u64(uint64_t v
, uint64_t a
)
144 return (v
+ a
- 1) / a
;
147 static inline uint32_t
148 radv_minify(uint32_t n
, uint32_t levels
)
150 if (unlikely(n
== 0))
153 return MAX2(n
>> levels
, 1);
156 radv_clamp_f(float f
, float min
, float max
)
169 radv_clear_mask(uint32_t *inout_mask
, uint32_t clear_mask
)
171 if (*inout_mask
& clear_mask
) {
172 *inout_mask
&= ~clear_mask
;
179 #define for_each_bit(b, dword) \
180 for (uint32_t __dword = (dword); \
181 (b) = __builtin_ffs(__dword) - 1, __dword; \
182 __dword &= ~(1 << (b)))
184 #define typed_memcpy(dest, src, count) ({ \
185 STATIC_ASSERT(sizeof(*src) == sizeof(*dest)); \
186 memcpy((dest), (src), (count) * sizeof(*(src))); \
189 /* Whenever we generate an error, pass it through this function. Useful for
190 * debugging, where we can break on it. Only call at error site, not when
191 * propagating errors. Might be useful to plug in a stack trace here.
194 struct radv_image_view
;
195 struct radv_instance
;
197 VkResult
__vk_errorv(struct radv_instance
*instance
, const void *object
,
198 VkDebugReportObjectTypeEXT type
, VkResult error
,
199 const char *file
, int line
, const char *format
,
202 VkResult
__vk_errorf(struct radv_instance
*instance
, const void *object
,
203 VkDebugReportObjectTypeEXT type
, VkResult error
,
204 const char *file
, int line
, const char *format
, ...)
205 radv_printflike(7, 8);
207 #define vk_error(instance, error) \
208 __vk_errorf(instance, NULL, \
209 VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, \
210 error, __FILE__, __LINE__, NULL);
211 #define vk_errorf(instance, error, format, ...) \
212 __vk_errorf(instance, NULL, \
213 VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, \
214 error, __FILE__, __LINE__, format, ## __VA_ARGS__);
216 void __radv_finishme(const char *file
, int line
, const char *format
, ...)
217 radv_printflike(3, 4);
218 void radv_loge(const char *format
, ...) radv_printflike(1, 2);
219 void radv_loge_v(const char *format
, va_list va
);
220 void radv_logi(const char *format
, ...) radv_printflike(1, 2);
221 void radv_logi_v(const char *format
, va_list va
);
224 * Print a FINISHME message, including its source location.
226 #define radv_finishme(format, ...) \
228 static bool reported = false; \
230 __radv_finishme(__FILE__, __LINE__, format, ##__VA_ARGS__); \
235 /* A non-fatal assert. Useful for debugging. */
237 #define radv_assert(x) ({ \
238 if (unlikely(!(x))) \
239 fprintf(stderr, "%s:%d ASSERT: %s\n", __FILE__, __LINE__, #x); \
242 #define radv_assert(x) do {} while(0)
245 #define stub_return(v) \
247 radv_finishme("stub %s", __func__); \
253 radv_finishme("stub %s", __func__); \
257 int radv_get_instance_entrypoint_index(const char *name
);
258 int radv_get_device_entrypoint_index(const char *name
);
259 int radv_get_physical_device_entrypoint_index(const char *name
);
261 const char *radv_get_instance_entry_name(int index
);
262 const char *radv_get_physical_device_entry_name(int index
);
263 const char *radv_get_device_entry_name(int index
);
265 bool radv_instance_entrypoint_is_enabled(int index
, uint32_t core_version
,
266 const struct radv_instance_extension_table
*instance
);
267 bool radv_physical_device_entrypoint_is_enabled(int index
, uint32_t core_version
,
268 const struct radv_instance_extension_table
*instance
);
269 bool radv_device_entrypoint_is_enabled(int index
, uint32_t core_version
,
270 const struct radv_instance_extension_table
*instance
,
271 const struct radv_device_extension_table
*device
);
273 void *radv_lookup_entrypoint(const char *name
);
275 struct radv_physical_device
{
276 VK_LOADER_DATA _loader_data
;
278 /* Link in radv_instance::physical_devices */
279 struct list_head link
;
281 struct radv_instance
* instance
;
283 struct radeon_winsys
*ws
;
284 struct radeon_info rad_info
;
285 char name
[VK_MAX_PHYSICAL_DEVICE_NAME_SIZE
];
286 uint8_t driver_uuid
[VK_UUID_SIZE
];
287 uint8_t device_uuid
[VK_UUID_SIZE
];
288 uint8_t cache_uuid
[VK_UUID_SIZE
];
292 struct wsi_device wsi_device
;
294 bool out_of_order_rast_allowed
;
296 /* Whether DCC should be enabled for MSAA textures. */
297 bool dcc_msaa_allowed
;
299 /* Whether to enable NGG. */
302 /* Whether to enable NGG GS. */
305 /* Whether to enable NGG streamout. */
306 bool use_ngg_streamout
;
308 /* Number of threads per wave. */
309 uint8_t ps_wave_size
;
310 uint8_t cs_wave_size
;
311 uint8_t ge_wave_size
;
313 /* Whether to use the LLVM compiler backend */
316 /* This is the drivers on-disk cache used as a fallback as opposed to
317 * the pipeline cache defined by apps.
319 struct disk_cache
* disk_cache
;
321 VkPhysicalDeviceMemoryProperties memory_properties
;
322 enum radeon_bo_domain memory_domains
[VK_MAX_MEMORY_TYPES
];
323 enum radeon_bo_flag memory_flags
[VK_MAX_MEMORY_TYPES
];
325 drmPciBusInfo bus_info
;
327 struct radv_device_extension_table supported_extensions
;
330 struct radv_instance
{
331 struct vk_object_base base
;
333 VkAllocationCallbacks alloc
;
337 char * applicationName
;
338 uint32_t applicationVersion
;
340 uint32_t engineVersion
;
342 uint64_t debug_flags
;
343 uint64_t perftest_flags
;
345 struct vk_debug_report_instance debug_report_callbacks
;
347 struct radv_instance_extension_table enabled_extensions
;
348 struct radv_instance_dispatch_table dispatch
;
349 struct radv_physical_device_dispatch_table physical_device_dispatch
;
350 struct radv_device_dispatch_table device_dispatch
;
352 bool physical_devices_enumerated
;
353 struct list_head physical_devices
;
355 struct driOptionCache dri_options
;
356 struct driOptionCache available_dri_options
;
359 * Workarounds for game bugs.
361 bool enable_mrt_output_nan_fixup
;
364 VkResult
radv_init_wsi(struct radv_physical_device
*physical_device
);
365 void radv_finish_wsi(struct radv_physical_device
*physical_device
);
367 bool radv_instance_extension_supported(const char *name
);
368 uint32_t radv_physical_device_api_version(struct radv_physical_device
*dev
);
369 bool radv_physical_device_extension_supported(struct radv_physical_device
*dev
,
374 struct radv_pipeline_cache
{
375 struct vk_object_base base
;
376 struct radv_device
* device
;
377 pthread_mutex_t mutex
;
378 VkPipelineCacheCreateFlags flags
;
382 uint32_t kernel_count
;
383 struct cache_entry
** hash_table
;
386 VkAllocationCallbacks alloc
;
389 struct radv_pipeline_key
{
390 uint32_t instance_rate_inputs
;
391 uint32_t instance_rate_divisors
[MAX_VERTEX_ATTRIBS
];
392 uint8_t vertex_attribute_formats
[MAX_VERTEX_ATTRIBS
];
393 uint32_t vertex_attribute_bindings
[MAX_VERTEX_ATTRIBS
];
394 uint32_t vertex_attribute_offsets
[MAX_VERTEX_ATTRIBS
];
395 uint32_t vertex_attribute_strides
[MAX_VERTEX_ATTRIBS
];
396 uint64_t vertex_alpha_adjust
;
397 uint32_t vertex_post_shuffle
;
398 unsigned tess_input_vertices
;
402 uint8_t log2_ps_iter_samples
;
405 uint32_t has_multiview_view_index
: 1;
406 uint32_t optimisations_disabled
: 1;
409 /* Non-zero if a required subgroup size is specified via
410 * VK_EXT_subgroup_size_control.
412 uint8_t compute_subgroup_size
;
415 struct radv_shader_binary
;
416 struct radv_shader_variant
;
419 radv_pipeline_cache_init(struct radv_pipeline_cache
*cache
,
420 struct radv_device
*device
);
422 radv_pipeline_cache_finish(struct radv_pipeline_cache
*cache
);
424 radv_pipeline_cache_load(struct radv_pipeline_cache
*cache
,
425 const void *data
, size_t size
);
428 radv_create_shader_variants_from_pipeline_cache(struct radv_device
*device
,
429 struct radv_pipeline_cache
*cache
,
430 const unsigned char *sha1
,
431 struct radv_shader_variant
**variants
,
432 bool *found_in_application_cache
);
435 radv_pipeline_cache_insert_shaders(struct radv_device
*device
,
436 struct radv_pipeline_cache
*cache
,
437 const unsigned char *sha1
,
438 struct radv_shader_variant
**variants
,
439 struct radv_shader_binary
*const *binaries
);
441 enum radv_blit_ds_layout
{
442 RADV_BLIT_DS_LAYOUT_TILE_ENABLE
,
443 RADV_BLIT_DS_LAYOUT_TILE_DISABLE
,
444 RADV_BLIT_DS_LAYOUT_COUNT
,
447 static inline enum radv_blit_ds_layout
radv_meta_blit_ds_to_type(VkImageLayout layout
)
449 return (layout
== VK_IMAGE_LAYOUT_GENERAL
) ? RADV_BLIT_DS_LAYOUT_TILE_DISABLE
: RADV_BLIT_DS_LAYOUT_TILE_ENABLE
;
452 static inline VkImageLayout
radv_meta_blit_ds_to_layout(enum radv_blit_ds_layout ds_layout
)
454 return ds_layout
== RADV_BLIT_DS_LAYOUT_TILE_ENABLE
? VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL
: VK_IMAGE_LAYOUT_GENERAL
;
457 enum radv_meta_dst_layout
{
458 RADV_META_DST_LAYOUT_GENERAL
,
459 RADV_META_DST_LAYOUT_OPTIMAL
,
460 RADV_META_DST_LAYOUT_COUNT
,
463 static inline enum radv_meta_dst_layout
radv_meta_dst_layout_from_layout(VkImageLayout layout
)
465 return (layout
== VK_IMAGE_LAYOUT_GENERAL
) ? RADV_META_DST_LAYOUT_GENERAL
: RADV_META_DST_LAYOUT_OPTIMAL
;
468 static inline VkImageLayout
radv_meta_dst_layout_to_layout(enum radv_meta_dst_layout layout
)
470 return layout
== RADV_META_DST_LAYOUT_OPTIMAL
? VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL
: VK_IMAGE_LAYOUT_GENERAL
;
473 struct radv_meta_state
{
474 VkAllocationCallbacks alloc
;
476 struct radv_pipeline_cache cache
;
479 * For on-demand pipeline creation, makes sure that
480 * only one thread tries to build a pipeline at the same time.
485 * Use array element `i` for images with `2^i` samples.
488 VkRenderPass render_pass
[NUM_META_FS_KEYS
];
489 VkPipeline color_pipelines
[NUM_META_FS_KEYS
];
491 VkRenderPass depthstencil_rp
;
492 VkPipeline depth_only_pipeline
[NUM_DEPTH_CLEAR_PIPELINES
];
493 VkPipeline stencil_only_pipeline
[NUM_DEPTH_CLEAR_PIPELINES
];
494 VkPipeline depthstencil_pipeline
[NUM_DEPTH_CLEAR_PIPELINES
];
496 VkPipeline depth_only_unrestricted_pipeline
[NUM_DEPTH_CLEAR_PIPELINES
];
497 VkPipeline stencil_only_unrestricted_pipeline
[NUM_DEPTH_CLEAR_PIPELINES
];
498 VkPipeline depthstencil_unrestricted_pipeline
[NUM_DEPTH_CLEAR_PIPELINES
];
499 } clear
[MAX_SAMPLES_LOG2
];
501 VkPipelineLayout clear_color_p_layout
;
502 VkPipelineLayout clear_depth_p_layout
;
503 VkPipelineLayout clear_depth_unrestricted_p_layout
;
505 /* Optimized compute fast HTILE clear for stencil or depth only. */
506 VkPipeline clear_htile_mask_pipeline
;
507 VkPipelineLayout clear_htile_mask_p_layout
;
508 VkDescriptorSetLayout clear_htile_mask_ds_layout
;
511 VkRenderPass render_pass
[NUM_META_FS_KEYS
][RADV_META_DST_LAYOUT_COUNT
];
513 /** Pipeline that blits from a 1D image. */
514 VkPipeline pipeline_1d_src
[NUM_META_FS_KEYS
];
516 /** Pipeline that blits from a 2D image. */
517 VkPipeline pipeline_2d_src
[NUM_META_FS_KEYS
];
519 /** Pipeline that blits from a 3D image. */
520 VkPipeline pipeline_3d_src
[NUM_META_FS_KEYS
];
522 VkRenderPass depth_only_rp
[RADV_BLIT_DS_LAYOUT_COUNT
];
523 VkPipeline depth_only_1d_pipeline
;
524 VkPipeline depth_only_2d_pipeline
;
525 VkPipeline depth_only_3d_pipeline
;
527 VkRenderPass stencil_only_rp
[RADV_BLIT_DS_LAYOUT_COUNT
];
528 VkPipeline stencil_only_1d_pipeline
;
529 VkPipeline stencil_only_2d_pipeline
;
530 VkPipeline stencil_only_3d_pipeline
;
531 VkPipelineLayout pipeline_layout
;
532 VkDescriptorSetLayout ds_layout
;
536 VkPipelineLayout p_layouts
[5];
537 VkDescriptorSetLayout ds_layouts
[5];
538 VkPipeline pipelines
[5][NUM_META_FS_KEYS
];
540 VkPipeline depth_only_pipeline
[5];
542 VkPipeline stencil_only_pipeline
[5];
543 } blit2d
[MAX_SAMPLES_LOG2
];
545 VkRenderPass blit2d_render_passes
[NUM_META_FS_KEYS
][RADV_META_DST_LAYOUT_COUNT
];
546 VkRenderPass blit2d_depth_only_rp
[RADV_BLIT_DS_LAYOUT_COUNT
];
547 VkRenderPass blit2d_stencil_only_rp
[RADV_BLIT_DS_LAYOUT_COUNT
];
550 VkPipelineLayout img_p_layout
;
551 VkDescriptorSetLayout img_ds_layout
;
553 VkPipeline pipeline_3d
;
556 VkPipelineLayout img_p_layout
;
557 VkDescriptorSetLayout img_ds_layout
;
559 VkPipeline pipeline_3d
;
562 VkPipelineLayout img_p_layout
;
563 VkDescriptorSetLayout img_ds_layout
;
567 VkPipelineLayout img_p_layout
;
568 VkDescriptorSetLayout img_ds_layout
;
570 VkPipeline pipeline_3d
;
573 VkPipelineLayout img_p_layout
;
574 VkDescriptorSetLayout img_ds_layout
;
578 VkPipelineLayout img_p_layout
;
579 VkDescriptorSetLayout img_ds_layout
;
581 VkPipeline pipeline_3d
;
584 VkPipelineLayout img_p_layout
;
585 VkDescriptorSetLayout img_ds_layout
;
590 VkPipelineLayout p_layout
;
591 VkPipeline pipeline
[NUM_META_FS_KEYS
];
592 VkRenderPass pass
[NUM_META_FS_KEYS
];
596 VkDescriptorSetLayout ds_layout
;
597 VkPipelineLayout p_layout
;
600 VkPipeline i_pipeline
;
601 VkPipeline srgb_pipeline
;
602 } rc
[MAX_SAMPLES_LOG2
];
604 VkPipeline depth_zero_pipeline
;
606 VkPipeline average_pipeline
;
607 VkPipeline max_pipeline
;
608 VkPipeline min_pipeline
;
609 } depth
[MAX_SAMPLES_LOG2
];
611 VkPipeline stencil_zero_pipeline
;
613 VkPipeline max_pipeline
;
614 VkPipeline min_pipeline
;
615 } stencil
[MAX_SAMPLES_LOG2
];
619 VkDescriptorSetLayout ds_layout
;
620 VkPipelineLayout p_layout
;
623 VkRenderPass render_pass
[NUM_META_FS_KEYS
][RADV_META_DST_LAYOUT_COUNT
];
624 VkPipeline pipeline
[NUM_META_FS_KEYS
];
625 } rc
[MAX_SAMPLES_LOG2
];
627 VkRenderPass depth_render_pass
;
628 VkPipeline depth_zero_pipeline
;
630 VkPipeline average_pipeline
;
631 VkPipeline max_pipeline
;
632 VkPipeline min_pipeline
;
633 } depth
[MAX_SAMPLES_LOG2
];
635 VkRenderPass stencil_render_pass
;
636 VkPipeline stencil_zero_pipeline
;
638 VkPipeline max_pipeline
;
639 VkPipeline min_pipeline
;
640 } stencil
[MAX_SAMPLES_LOG2
];
644 VkPipelineLayout p_layout
;
645 VkPipeline decompress_pipeline
[NUM_DEPTH_DECOMPRESS_PIPELINES
];
646 VkPipeline resummarize_pipeline
;
648 } depth_decomp
[MAX_SAMPLES_LOG2
];
651 VkPipelineLayout p_layout
;
652 VkPipeline cmask_eliminate_pipeline
;
653 VkPipeline fmask_decompress_pipeline
;
654 VkPipeline dcc_decompress_pipeline
;
657 VkDescriptorSetLayout dcc_decompress_compute_ds_layout
;
658 VkPipelineLayout dcc_decompress_compute_p_layout
;
659 VkPipeline dcc_decompress_compute_pipeline
;
663 VkPipelineLayout fill_p_layout
;
664 VkPipelineLayout copy_p_layout
;
665 VkDescriptorSetLayout fill_ds_layout
;
666 VkDescriptorSetLayout copy_ds_layout
;
667 VkPipeline fill_pipeline
;
668 VkPipeline copy_pipeline
;
672 VkDescriptorSetLayout ds_layout
;
673 VkPipelineLayout p_layout
;
674 VkPipeline occlusion_query_pipeline
;
675 VkPipeline pipeline_statistics_query_pipeline
;
676 VkPipeline tfb_query_pipeline
;
677 VkPipeline timestamp_query_pipeline
;
681 VkDescriptorSetLayout ds_layout
;
682 VkPipelineLayout p_layout
;
683 VkPipeline pipeline
[MAX_SAMPLES_LOG2
];
688 #define RADV_QUEUE_GENERAL 0
689 #define RADV_QUEUE_COMPUTE 1
690 #define RADV_QUEUE_TRANSFER 2
692 #define RADV_MAX_QUEUE_FAMILIES 3
694 struct radv_deferred_queue_submission
;
696 enum ring_type
radv_queue_family_to_ring(int f
);
699 VK_LOADER_DATA _loader_data
;
700 struct radv_device
* device
;
701 struct radeon_winsys_ctx
*hw_ctx
;
702 enum radeon_ctx_priority priority
;
703 uint32_t queue_family_index
;
705 VkDeviceQueueCreateFlags flags
;
707 uint32_t scratch_size_per_wave
;
708 uint32_t scratch_waves
;
709 uint32_t compute_scratch_size_per_wave
;
710 uint32_t compute_scratch_waves
;
711 uint32_t esgs_ring_size
;
712 uint32_t gsvs_ring_size
;
716 bool has_sample_positions
;
718 struct radeon_winsys_bo
*scratch_bo
;
719 struct radeon_winsys_bo
*descriptor_bo
;
720 struct radeon_winsys_bo
*compute_scratch_bo
;
721 struct radeon_winsys_bo
*esgs_ring_bo
;
722 struct radeon_winsys_bo
*gsvs_ring_bo
;
723 struct radeon_winsys_bo
*tess_rings_bo
;
724 struct radeon_winsys_bo
*gds_bo
;
725 struct radeon_winsys_bo
*gds_oa_bo
;
726 struct radeon_cmdbuf
*initial_preamble_cs
;
727 struct radeon_cmdbuf
*initial_full_flush_preamble_cs
;
728 struct radeon_cmdbuf
*continue_preamble_cs
;
730 struct list_head pending_submissions
;
731 pthread_mutex_t pending_mutex
;
733 pthread_mutex_t thread_mutex
;
734 pthread_cond_t thread_cond
;
735 struct radv_deferred_queue_submission
*thread_submission
;
736 pthread_t submission_thread
;
741 struct radv_bo_list
{
742 struct radv_winsys_bo_list list
;
744 pthread_mutex_t mutex
;
747 VkResult
radv_bo_list_add(struct radv_device
*device
,
748 struct radeon_winsys_bo
*bo
);
749 void radv_bo_list_remove(struct radv_device
*device
,
750 struct radeon_winsys_bo
*bo
);
752 #define RADV_BORDER_COLOR_COUNT 4096
753 #define RADV_BORDER_COLOR_BUFFER_SIZE (sizeof(VkClearColorValue) * RADV_BORDER_COLOR_COUNT)
755 struct radv_device_border_color_data
{
756 bool used
[RADV_BORDER_COLOR_COUNT
];
758 struct radeon_winsys_bo
*bo
;
759 VkClearColorValue
*colors_gpu_ptr
;
761 /* Mutex is required to guarantee vkCreateSampler thread safety
762 * given that we are writing to a buffer and checking color occupation */
763 pthread_mutex_t mutex
;
769 struct radv_instance
* instance
;
770 struct radeon_winsys
*ws
;
772 struct radv_meta_state meta_state
;
774 struct radv_queue
*queues
[RADV_MAX_QUEUE_FAMILIES
];
775 int queue_count
[RADV_MAX_QUEUE_FAMILIES
];
776 struct radeon_cmdbuf
*empty_cs
[RADV_MAX_QUEUE_FAMILIES
];
778 bool always_use_syncobj
;
781 uint32_t tess_offchip_block_dw_size
;
782 uint32_t scratch_waves
;
783 uint32_t dispatch_initiator
;
785 uint32_t gs_table_depth
;
787 /* MSAA sample locations.
788 * The first index is the sample index.
789 * The second index is the coordinate: X, Y. */
790 float sample_locations_1x
[1][2];
791 float sample_locations_2x
[2][2];
792 float sample_locations_4x
[4][2];
793 float sample_locations_8x
[8][2];
796 uint32_t gfx_init_size_dw
;
797 struct radeon_winsys_bo
*gfx_init
;
799 struct radeon_winsys_bo
*trace_bo
;
800 uint32_t *trace_id_ptr
;
802 /* Whether to keep shader debug info, for tracing or VK_AMD_shader_info */
803 bool keep_shader_info
;
805 struct radv_physical_device
*physical_device
;
807 /* Backup in-memory cache to be used if the app doesn't provide one */
808 struct radv_pipeline_cache
* mem_cache
;
811 * use different counters so MSAA MRTs get consecutive surface indices,
812 * even if MASK is allocated in between.
814 uint32_t image_mrt_offset_counter
;
815 uint32_t fmask_mrt_offset_counter
;
816 struct list_head shader_slabs
;
817 mtx_t shader_slab_mutex
;
819 /* For detecting VM faults reported by dmesg. */
820 uint64_t dmesg_timestamp
;
822 struct radv_device_extension_table enabled_extensions
;
823 struct radv_device_dispatch_table dispatch
;
825 /* Whether the app has enabled the robustBufferAccess feature. */
826 bool robust_buffer_access
;
828 /* Whether the driver uses a global BO list. */
829 bool use_global_bo_list
;
831 struct radv_bo_list bo_list
;
833 /* Whether anisotropy is forced with RADV_TEX_ANISO (-1 is disabled). */
836 struct radv_device_border_color_data border_color_data
;
838 /* Condition variable for legacy timelines, to notify waiters when a
839 * new point gets submitted. */
840 pthread_cond_t timeline_cond
;
843 struct radeon_cmdbuf
*thread_trace_start_cs
[2];
844 struct radeon_cmdbuf
*thread_trace_stop_cs
[2];
845 struct radeon_winsys_bo
*thread_trace_bo
;
846 void *thread_trace_ptr
;
847 uint32_t thread_trace_buffer_size
;
848 int thread_trace_start_frame
;
851 struct radv_shader_variant
*trap_handler_shader
;
852 struct radeon_winsys_bo
*tma_bo
; /* Trap Memory Address */
855 /* Overallocation. */
856 bool overallocation_disallowed
;
857 uint64_t allocated_memory_size
[VK_MAX_MEMORY_HEAPS
];
858 mtx_t overallocation_mutex
;
860 /* Track the number of device loss occurs. */
864 VkResult
_radv_device_set_lost(struct radv_device
*device
,
865 const char *file
, int line
,
866 const char *msg
, ...)
867 radv_printflike(4, 5);
869 #define radv_device_set_lost(dev, ...) \
870 _radv_device_set_lost(dev, __FILE__, __LINE__, __VA_ARGS__)
873 radv_device_is_lost(const struct radv_device
*device
)
875 return unlikely(p_atomic_read(&device
->lost
));
878 struct radv_device_memory
{
879 struct vk_object_base base
;
880 struct radeon_winsys_bo
*bo
;
881 /* for dedicated allocations */
882 struct radv_image
*image
;
883 struct radv_buffer
*buffer
;
889 #if RADV_SUPPORT_ANDROID_HARDWARE_BUFFER
890 struct AHardwareBuffer
* android_hardware_buffer
;
895 struct radv_descriptor_range
{
900 struct radv_descriptor_set
{
901 struct vk_object_base base
;
902 const struct radv_descriptor_set_layout
*layout
;
904 uint32_t buffer_count
;
906 struct radeon_winsys_bo
*bo
;
908 uint32_t *mapped_ptr
;
909 struct radv_descriptor_range
*dynamic_descriptors
;
911 struct radeon_winsys_bo
*descriptors
[0];
914 struct radv_push_descriptor_set
916 struct radv_descriptor_set set
;
920 struct radv_descriptor_pool_entry
{
923 struct radv_descriptor_set
*set
;
926 struct radv_descriptor_pool
{
927 struct vk_object_base base
;
928 struct radeon_winsys_bo
*bo
;
930 uint64_t current_offset
;
933 uint8_t *host_memory_base
;
934 uint8_t *host_memory_ptr
;
935 uint8_t *host_memory_end
;
937 uint32_t entry_count
;
938 uint32_t max_entry_count
;
939 struct radv_descriptor_pool_entry entries
[0];
942 struct radv_descriptor_update_template_entry
{
943 VkDescriptorType descriptor_type
;
945 /* The number of descriptors to update */
946 uint32_t descriptor_count
;
948 /* Into mapped_ptr or dynamic_descriptors, in units of the respective array */
951 /* In dwords. Not valid/used for dynamic descriptors */
954 uint32_t buffer_offset
;
956 /* Only valid for combined image samplers and samplers */
958 uint8_t sampler_offset
;
964 /* For push descriptors */
965 const uint32_t *immutable_samplers
;
968 struct radv_descriptor_update_template
{
969 struct vk_object_base base
;
970 uint32_t entry_count
;
971 VkPipelineBindPoint bind_point
;
972 struct radv_descriptor_update_template_entry entry
[0];
976 struct vk_object_base base
;
979 VkBufferUsageFlags usage
;
980 VkBufferCreateFlags flags
;
983 struct radeon_winsys_bo
* bo
;
989 enum radv_dynamic_state_bits
{
990 RADV_DYNAMIC_VIEWPORT
= 1 << 0,
991 RADV_DYNAMIC_SCISSOR
= 1 << 1,
992 RADV_DYNAMIC_LINE_WIDTH
= 1 << 2,
993 RADV_DYNAMIC_DEPTH_BIAS
= 1 << 3,
994 RADV_DYNAMIC_BLEND_CONSTANTS
= 1 << 4,
995 RADV_DYNAMIC_DEPTH_BOUNDS
= 1 << 5,
996 RADV_DYNAMIC_STENCIL_COMPARE_MASK
= 1 << 6,
997 RADV_DYNAMIC_STENCIL_WRITE_MASK
= 1 << 7,
998 RADV_DYNAMIC_STENCIL_REFERENCE
= 1 << 8,
999 RADV_DYNAMIC_DISCARD_RECTANGLE
= 1 << 9,
1000 RADV_DYNAMIC_SAMPLE_LOCATIONS
= 1 << 10,
1001 RADV_DYNAMIC_LINE_STIPPLE
= 1 << 11,
1002 RADV_DYNAMIC_CULL_MODE
= 1 << 12,
1003 RADV_DYNAMIC_FRONT_FACE
= 1 << 13,
1004 RADV_DYNAMIC_PRIMITIVE_TOPOLOGY
= 1 << 14,
1005 RADV_DYNAMIC_DEPTH_TEST_ENABLE
= 1 << 15,
1006 RADV_DYNAMIC_DEPTH_WRITE_ENABLE
= 1 << 16,
1007 RADV_DYNAMIC_DEPTH_COMPARE_OP
= 1 << 17,
1008 RADV_DYNAMIC_DEPTH_BOUNDS_TEST_ENABLE
= 1 << 18,
1009 RADV_DYNAMIC_STENCIL_TEST_ENABLE
= 1 << 19,
1010 RADV_DYNAMIC_STENCIL_OP
= 1 << 20,
1011 RADV_DYNAMIC_VERTEX_INPUT_BINDING_STRIDE
= 1 << 21,
1012 RADV_DYNAMIC_ALL
= (1 << 22) - 1,
1015 enum radv_cmd_dirty_bits
{
1016 /* Keep the dynamic state dirty bits in sync with
1017 * enum radv_dynamic_state_bits */
1018 RADV_CMD_DIRTY_DYNAMIC_VIEWPORT
= 1 << 0,
1019 RADV_CMD_DIRTY_DYNAMIC_SCISSOR
= 1 << 1,
1020 RADV_CMD_DIRTY_DYNAMIC_LINE_WIDTH
= 1 << 2,
1021 RADV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS
= 1 << 3,
1022 RADV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS
= 1 << 4,
1023 RADV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS
= 1 << 5,
1024 RADV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK
= 1 << 6,
1025 RADV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK
= 1 << 7,
1026 RADV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE
= 1 << 8,
1027 RADV_CMD_DIRTY_DYNAMIC_DISCARD_RECTANGLE
= 1 << 9,
1028 RADV_CMD_DIRTY_DYNAMIC_SAMPLE_LOCATIONS
= 1 << 10,
1029 RADV_CMD_DIRTY_DYNAMIC_LINE_STIPPLE
= 1 << 11,
1030 RADV_CMD_DIRTY_DYNAMIC_CULL_MODE
= 1 << 12,
1031 RADV_CMD_DIRTY_DYNAMIC_FRONT_FACE
= 1 << 13,
1032 RADV_CMD_DIRTY_DYNAMIC_PRIMITIVE_TOPOLOGY
= 1 << 14,
1033 RADV_CMD_DIRTY_DYNAMIC_DEPTH_TEST_ENABLE
= 1 << 15,
1034 RADV_CMD_DIRTY_DYNAMIC_DEPTH_WRITE_ENABLE
= 1 << 16,
1035 RADV_CMD_DIRTY_DYNAMIC_DEPTH_COMPARE_OP
= 1 << 17,
1036 RADV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS_TEST_ENABLE
= 1 << 18,
1037 RADV_CMD_DIRTY_DYNAMIC_STENCIL_TEST_ENABLE
= 1 << 19,
1038 RADV_CMD_DIRTY_DYNAMIC_STENCIL_OP
= 1 << 20,
1039 RADV_CMD_DIRTY_DYNAMIC_VERTEX_INPUT_BINDING_STRIDE
= 1 << 21,
1040 RADV_CMD_DIRTY_DYNAMIC_ALL
= (1 << 22) - 1,
1041 RADV_CMD_DIRTY_PIPELINE
= 1 << 22,
1042 RADV_CMD_DIRTY_INDEX_BUFFER
= 1 << 23,
1043 RADV_CMD_DIRTY_FRAMEBUFFER
= 1 << 24,
1044 RADV_CMD_DIRTY_VERTEX_BUFFER
= 1 << 25,
1045 RADV_CMD_DIRTY_STREAMOUT_BUFFER
= 1 << 26,
1048 enum radv_cmd_flush_bits
{
1049 /* Instruction cache. */
1050 RADV_CMD_FLAG_INV_ICACHE
= 1 << 0,
1051 /* Scalar L1 cache. */
1052 RADV_CMD_FLAG_INV_SCACHE
= 1 << 1,
1053 /* Vector L1 cache. */
1054 RADV_CMD_FLAG_INV_VCACHE
= 1 << 2,
1055 /* L2 cache + L2 metadata cache writeback & invalidate.
1056 * GFX6-8: Used by shaders only. GFX9-10: Used by everything. */
1057 RADV_CMD_FLAG_INV_L2
= 1 << 3,
1058 /* L2 writeback (write dirty L2 lines to memory for non-L2 clients).
1059 * Only used for coherency with non-L2 clients like CB, DB, CP on GFX6-8.
1060 * GFX6-7 will do complete invalidation, because the writeback is unsupported. */
1061 RADV_CMD_FLAG_WB_L2
= 1 << 4,
1062 /* Framebuffer caches */
1063 RADV_CMD_FLAG_FLUSH_AND_INV_CB_META
= 1 << 5,
1064 RADV_CMD_FLAG_FLUSH_AND_INV_DB_META
= 1 << 6,
1065 RADV_CMD_FLAG_FLUSH_AND_INV_DB
= 1 << 7,
1066 RADV_CMD_FLAG_FLUSH_AND_INV_CB
= 1 << 8,
1067 /* Engine synchronization. */
1068 RADV_CMD_FLAG_VS_PARTIAL_FLUSH
= 1 << 9,
1069 RADV_CMD_FLAG_PS_PARTIAL_FLUSH
= 1 << 10,
1070 RADV_CMD_FLAG_CS_PARTIAL_FLUSH
= 1 << 11,
1071 RADV_CMD_FLAG_VGT_FLUSH
= 1 << 12,
1072 /* Pipeline query controls. */
1073 RADV_CMD_FLAG_START_PIPELINE_STATS
= 1 << 13,
1074 RADV_CMD_FLAG_STOP_PIPELINE_STATS
= 1 << 14,
1075 RADV_CMD_FLAG_VGT_STREAMOUT_SYNC
= 1 << 15,
1077 RADV_CMD_FLUSH_AND_INV_FRAMEBUFFER
= (RADV_CMD_FLAG_FLUSH_AND_INV_CB
|
1078 RADV_CMD_FLAG_FLUSH_AND_INV_CB_META
|
1079 RADV_CMD_FLAG_FLUSH_AND_INV_DB
|
1080 RADV_CMD_FLAG_FLUSH_AND_INV_DB_META
)
1083 struct radv_vertex_binding
{
1084 struct radv_buffer
* buffer
;
1085 VkDeviceSize offset
;
1087 VkDeviceSize stride
;
1090 struct radv_streamout_binding
{
1091 struct radv_buffer
*buffer
;
1092 VkDeviceSize offset
;
1096 struct radv_streamout_state
{
1097 /* Mask of bound streamout buffers. */
1098 uint8_t enabled_mask
;
1100 /* External state that comes from the last vertex stage, it must be
1101 * set explicitely when binding a new graphics pipeline.
1103 uint16_t stride_in_dw
[MAX_SO_BUFFERS
];
1104 uint32_t enabled_stream_buffers_mask
; /* stream0 buffers0-3 in 4 LSB */
1106 /* State of VGT_STRMOUT_BUFFER_(CONFIG|END) */
1107 uint32_t hw_enabled_mask
;
1109 /* State of VGT_STRMOUT_(CONFIG|EN) */
1110 bool streamout_enabled
;
1113 struct radv_viewport_state
{
1115 VkViewport viewports
[MAX_VIEWPORTS
];
1118 struct radv_scissor_state
{
1120 VkRect2D scissors
[MAX_SCISSORS
];
1123 struct radv_discard_rectangle_state
{
1125 VkRect2D rectangles
[MAX_DISCARD_RECTANGLES
];
1128 struct radv_sample_locations_state
{
1129 VkSampleCountFlagBits per_pixel
;
1130 VkExtent2D grid_size
;
1132 VkSampleLocationEXT locations
[MAX_SAMPLE_LOCATIONS
];
1135 struct radv_dynamic_state
{
1137 * Bitmask of (1 << VK_DYNAMIC_STATE_*).
1138 * Defines the set of saved dynamic state.
1142 struct radv_viewport_state viewport
;
1144 struct radv_scissor_state scissor
;
1154 float blend_constants
[4];
1164 } stencil_compare_mask
;
1169 } stencil_write_mask
;
1173 VkStencilOp fail_op
;
1174 VkStencilOp pass_op
;
1175 VkStencilOp depth_fail_op
;
1176 VkCompareOp compare_op
;
1180 VkStencilOp fail_op
;
1181 VkStencilOp pass_op
;
1182 VkStencilOp depth_fail_op
;
1183 VkCompareOp compare_op
;
1190 } stencil_reference
;
1192 struct radv_discard_rectangle_state discard_rectangle
;
1194 struct radv_sample_locations_state sample_location
;
1201 VkCullModeFlags cull_mode
;
1202 VkFrontFace front_face
;
1203 unsigned primitive_topology
;
1205 bool depth_test_enable
;
1206 bool depth_write_enable
;
1207 VkCompareOp depth_compare_op
;
1208 bool depth_bounds_test_enable
;
1209 bool stencil_test_enable
;
1212 extern const struct radv_dynamic_state default_dynamic_state
;
1215 radv_get_debug_option_name(int id
);
1218 radv_get_perftest_option_name(int id
);
1220 struct radv_color_buffer_info
{
1221 uint64_t cb_color_base
;
1222 uint64_t cb_color_cmask
;
1223 uint64_t cb_color_fmask
;
1224 uint64_t cb_dcc_base
;
1225 uint32_t cb_color_slice
;
1226 uint32_t cb_color_view
;
1227 uint32_t cb_color_info
;
1228 uint32_t cb_color_attrib
;
1229 uint32_t cb_color_attrib2
; /* GFX9 and later */
1230 uint32_t cb_color_attrib3
; /* GFX10 and later */
1231 uint32_t cb_dcc_control
;
1232 uint32_t cb_color_cmask_slice
;
1233 uint32_t cb_color_fmask_slice
;
1235 uint32_t cb_color_pitch
; // GFX6-GFX8
1236 uint32_t cb_mrt_epitch
; // GFX9+
1240 struct radv_ds_buffer_info
{
1241 uint64_t db_z_read_base
;
1242 uint64_t db_stencil_read_base
;
1243 uint64_t db_z_write_base
;
1244 uint64_t db_stencil_write_base
;
1245 uint64_t db_htile_data_base
;
1246 uint32_t db_depth_info
;
1248 uint32_t db_stencil_info
;
1249 uint32_t db_depth_view
;
1250 uint32_t db_depth_size
;
1251 uint32_t db_depth_slice
;
1252 uint32_t db_htile_surface
;
1253 uint32_t pa_su_poly_offset_db_fmt_cntl
;
1254 uint32_t db_z_info2
; /* GFX9 only */
1255 uint32_t db_stencil_info2
; /* GFX9 only */
1260 radv_initialise_color_surface(struct radv_device
*device
,
1261 struct radv_color_buffer_info
*cb
,
1262 struct radv_image_view
*iview
);
1264 radv_initialise_ds_surface(struct radv_device
*device
,
1265 struct radv_ds_buffer_info
*ds
,
1266 struct radv_image_view
*iview
);
1269 * Attachment state when recording a renderpass instance.
1271 * The clear value is valid only if there exists a pending clear.
1273 struct radv_attachment_state
{
1274 VkImageAspectFlags pending_clear_aspects
;
1275 uint32_t cleared_views
;
1276 VkClearValue clear_value
;
1277 VkImageLayout current_layout
;
1278 VkImageLayout current_stencil_layout
;
1279 bool current_in_render_loop
;
1280 struct radv_sample_locations_state sample_location
;
1283 struct radv_color_buffer_info cb
;
1284 struct radv_ds_buffer_info ds
;
1286 struct radv_image_view
*iview
;
1289 struct radv_descriptor_state
{
1290 struct radv_descriptor_set
*sets
[MAX_SETS
];
1293 struct radv_push_descriptor_set push_set
;
1295 uint32_t dynamic_buffers
[4 * MAX_DYNAMIC_BUFFERS
];
1298 struct radv_subpass_sample_locs_state
{
1299 uint32_t subpass_idx
;
1300 struct radv_sample_locations_state sample_location
;
1303 struct radv_cmd_state
{
1304 /* Vertex descriptors */
1311 uint32_t prefetch_L2_mask
;
1313 struct radv_pipeline
* pipeline
;
1314 struct radv_pipeline
* emitted_pipeline
;
1315 struct radv_pipeline
* compute_pipeline
;
1316 struct radv_pipeline
* emitted_compute_pipeline
;
1317 struct radv_framebuffer
* framebuffer
;
1318 struct radv_render_pass
* pass
;
1319 const struct radv_subpass
* subpass
;
1320 struct radv_dynamic_state dynamic
;
1321 struct radv_attachment_state
* attachments
;
1322 struct radv_streamout_state streamout
;
1323 VkRect2D render_area
;
1325 uint32_t num_subpass_sample_locs
;
1326 struct radv_subpass_sample_locs_state
* subpass_sample_locs
;
1329 struct radv_buffer
*index_buffer
;
1330 uint64_t index_offset
;
1331 uint32_t index_type
;
1332 uint32_t max_index_count
;
1334 int32_t last_index_type
;
1336 int32_t last_primitive_reset_en
;
1337 uint32_t last_primitive_reset_index
;
1338 enum radv_cmd_flush_bits flush_bits
;
1339 unsigned active_occlusion_queries
;
1340 bool perfect_occlusion_queries_enabled
;
1341 unsigned active_pipeline_queries
;
1342 unsigned active_pipeline_gds_queries
;
1345 uint32_t last_ia_multi_vgt_param
;
1347 uint32_t last_num_instances
;
1348 uint32_t last_first_instance
;
1349 uint32_t last_vertex_offset
;
1351 uint32_t last_sx_ps_downconvert
;
1352 uint32_t last_sx_blend_opt_epsilon
;
1353 uint32_t last_sx_blend_opt_control
;
1355 /* Whether CP DMA is busy/idle. */
1358 /* Conditional rendering info. */
1359 int predication_type
; /* -1: disabled, 0: normal, 1: inverted */
1360 uint64_t predication_va
;
1362 /* Inheritance info. */
1363 VkQueryPipelineStatisticFlags inherited_pipeline_statistics
;
1365 bool context_roll_without_scissor_emitted
;
1367 /* SQTT related state. */
1368 uint32_t current_event_type
;
1369 uint32_t num_events
;
1370 uint32_t num_layout_transitions
;
1373 struct radv_cmd_pool
{
1374 struct vk_object_base base
;
1375 VkAllocationCallbacks alloc
;
1376 struct list_head cmd_buffers
;
1377 struct list_head free_cmd_buffers
;
1378 uint32_t queue_family_index
;
1381 struct radv_cmd_buffer_upload
{
1385 struct radeon_winsys_bo
*upload_bo
;
1386 struct list_head list
;
1389 enum radv_cmd_buffer_status
{
1390 RADV_CMD_BUFFER_STATUS_INVALID
,
1391 RADV_CMD_BUFFER_STATUS_INITIAL
,
1392 RADV_CMD_BUFFER_STATUS_RECORDING
,
1393 RADV_CMD_BUFFER_STATUS_EXECUTABLE
,
1394 RADV_CMD_BUFFER_STATUS_PENDING
,
1397 struct radv_cmd_buffer
{
1398 struct vk_object_base base
;
1400 struct radv_device
* device
;
1402 struct radv_cmd_pool
* pool
;
1403 struct list_head pool_link
;
1405 VkCommandBufferUsageFlags usage_flags
;
1406 VkCommandBufferLevel level
;
1407 enum radv_cmd_buffer_status status
;
1408 struct radeon_cmdbuf
*cs
;
1409 struct radv_cmd_state state
;
1410 struct radv_vertex_binding vertex_bindings
[MAX_VBS
];
1411 struct radv_streamout_binding streamout_bindings
[MAX_SO_BUFFERS
];
1412 uint32_t queue_family_index
;
1414 uint8_t push_constants
[MAX_PUSH_CONSTANTS_SIZE
];
1415 VkShaderStageFlags push_constant_stages
;
1416 struct radv_descriptor_set meta_push_descriptors
;
1418 struct radv_descriptor_state descriptors
[MAX_BIND_POINTS
];
1420 struct radv_cmd_buffer_upload upload
;
1422 uint32_t scratch_size_per_wave_needed
;
1423 uint32_t scratch_waves_wanted
;
1424 uint32_t compute_scratch_size_per_wave_needed
;
1425 uint32_t compute_scratch_waves_wanted
;
1426 uint32_t esgs_ring_size_needed
;
1427 uint32_t gsvs_ring_size_needed
;
1428 bool tess_rings_needed
;
1429 bool gds_needed
; /* for GFX10 streamout and NGG GS queries */
1430 bool gds_oa_needed
; /* for GFX10 streamout */
1431 bool sample_positions_needed
;
1433 VkResult record_result
;
1435 uint64_t gfx9_fence_va
;
1436 uint32_t gfx9_fence_idx
;
1437 uint64_t gfx9_eop_bug_va
;
1440 * Whether a query pool has been resetted and we have to flush caches.
1442 bool pending_reset_query
;
1445 * Bitmask of pending active query flushes.
1447 enum radv_cmd_flush_bits active_query_flush_bits
;
1451 struct radv_image_view
;
1453 bool radv_cmd_buffer_uses_mec(struct radv_cmd_buffer
*cmd_buffer
);
1455 void si_emit_graphics(struct radv_device
*device
,
1456 struct radeon_cmdbuf
*cs
);
1457 void si_emit_compute(struct radv_device
*device
,
1458 struct radeon_cmdbuf
*cs
);
1460 void cik_create_gfx_config(struct radv_device
*device
);
1462 void si_write_viewport(struct radeon_cmdbuf
*cs
, int first_vp
,
1463 int count
, const VkViewport
*viewports
);
1464 void si_write_scissors(struct radeon_cmdbuf
*cs
, int first
,
1465 int count
, const VkRect2D
*scissors
,
1466 const VkViewport
*viewports
, bool can_use_guardband
);
1467 uint32_t si_get_ia_multi_vgt_param(struct radv_cmd_buffer
*cmd_buffer
,
1468 bool instanced_draw
, bool indirect_draw
,
1469 bool count_from_stream_output
,
1470 uint32_t draw_vertex_count
,
1472 void si_cs_emit_write_event_eop(struct radeon_cmdbuf
*cs
,
1473 enum chip_class chip_class
,
1475 unsigned event
, unsigned event_flags
,
1476 unsigned dst_sel
, unsigned data_sel
,
1479 uint64_t gfx9_eop_bug_va
);
1481 void radv_cp_wait_mem(struct radeon_cmdbuf
*cs
, uint32_t op
, uint64_t va
,
1482 uint32_t ref
, uint32_t mask
);
1483 void si_cs_emit_cache_flush(struct radeon_cmdbuf
*cs
,
1484 enum chip_class chip_class
,
1485 uint32_t *fence_ptr
, uint64_t va
,
1487 enum radv_cmd_flush_bits flush_bits
,
1488 uint64_t gfx9_eop_bug_va
);
1489 void si_emit_cache_flush(struct radv_cmd_buffer
*cmd_buffer
);
1490 void si_emit_set_predication_state(struct radv_cmd_buffer
*cmd_buffer
,
1491 bool inverted
, uint64_t va
);
1492 void si_cp_dma_buffer_copy(struct radv_cmd_buffer
*cmd_buffer
,
1493 uint64_t src_va
, uint64_t dest_va
,
1495 void si_cp_dma_prefetch(struct radv_cmd_buffer
*cmd_buffer
, uint64_t va
,
1497 void si_cp_dma_clear_buffer(struct radv_cmd_buffer
*cmd_buffer
, uint64_t va
,
1498 uint64_t size
, unsigned value
);
1499 void si_cp_dma_wait_for_idle(struct radv_cmd_buffer
*cmd_buffer
);
1501 void radv_set_db_count_control(struct radv_cmd_buffer
*cmd_buffer
);
1503 radv_cmd_buffer_upload_alloc(struct radv_cmd_buffer
*cmd_buffer
,
1506 unsigned *out_offset
,
1509 radv_cmd_buffer_set_subpass(struct radv_cmd_buffer
*cmd_buffer
,
1510 const struct radv_subpass
*subpass
);
1512 radv_cmd_buffer_upload_data(struct radv_cmd_buffer
*cmd_buffer
,
1513 unsigned size
, unsigned alignmnet
,
1514 const void *data
, unsigned *out_offset
);
1516 void radv_cmd_buffer_clear_subpass(struct radv_cmd_buffer
*cmd_buffer
);
1517 void radv_cmd_buffer_resolve_subpass(struct radv_cmd_buffer
*cmd_buffer
);
1518 void radv_cmd_buffer_resolve_subpass_cs(struct radv_cmd_buffer
*cmd_buffer
);
1519 void radv_depth_stencil_resolve_subpass_cs(struct radv_cmd_buffer
*cmd_buffer
,
1520 VkImageAspectFlags aspects
,
1521 VkResolveModeFlagBits resolve_mode
);
1522 void radv_cmd_buffer_resolve_subpass_fs(struct radv_cmd_buffer
*cmd_buffer
);
1523 void radv_depth_stencil_resolve_subpass_fs(struct radv_cmd_buffer
*cmd_buffer
,
1524 VkImageAspectFlags aspects
,
1525 VkResolveModeFlagBits resolve_mode
);
1526 void radv_emit_default_sample_locations(struct radeon_cmdbuf
*cs
, int nr_samples
);
1527 unsigned radv_get_default_max_sample_dist(int log_samples
);
1528 void radv_device_init_msaa(struct radv_device
*device
);
1530 void radv_update_ds_clear_metadata(struct radv_cmd_buffer
*cmd_buffer
,
1531 const struct radv_image_view
*iview
,
1532 VkClearDepthStencilValue ds_clear_value
,
1533 VkImageAspectFlags aspects
);
1535 void radv_update_color_clear_metadata(struct radv_cmd_buffer
*cmd_buffer
,
1536 const struct radv_image_view
*iview
,
1538 uint32_t color_values
[2]);
1540 void radv_update_fce_metadata(struct radv_cmd_buffer
*cmd_buffer
,
1541 struct radv_image
*image
,
1542 const VkImageSubresourceRange
*range
, bool value
);
1544 void radv_update_dcc_metadata(struct radv_cmd_buffer
*cmd_buffer
,
1545 struct radv_image
*image
,
1546 const VkImageSubresourceRange
*range
, bool value
);
1548 uint32_t radv_fill_buffer(struct radv_cmd_buffer
*cmd_buffer
,
1549 struct radeon_winsys_bo
*bo
,
1550 uint64_t offset
, uint64_t size
, uint32_t value
);
1551 void radv_cmd_buffer_trace_emit(struct radv_cmd_buffer
*cmd_buffer
);
1552 bool radv_get_memory_fd(struct radv_device
*device
,
1553 struct radv_device_memory
*memory
,
1555 void radv_free_memory(struct radv_device
*device
,
1556 const VkAllocationCallbacks
* pAllocator
,
1557 struct radv_device_memory
*mem
);
1560 radv_emit_shader_pointer_head(struct radeon_cmdbuf
*cs
,
1561 unsigned sh_offset
, unsigned pointer_count
,
1562 bool use_32bit_pointers
)
1564 radeon_emit(cs
, PKT3(PKT3_SET_SH_REG
, pointer_count
* (use_32bit_pointers
? 1 : 2), 0));
1565 radeon_emit(cs
, (sh_offset
- SI_SH_REG_OFFSET
) >> 2);
1569 radv_emit_shader_pointer_body(struct radv_device
*device
,
1570 struct radeon_cmdbuf
*cs
,
1571 uint64_t va
, bool use_32bit_pointers
)
1573 radeon_emit(cs
, va
);
1575 if (use_32bit_pointers
) {
1577 (va
>> 32) == device
->physical_device
->rad_info
.address32_hi
);
1579 radeon_emit(cs
, va
>> 32);
1584 radv_emit_shader_pointer(struct radv_device
*device
,
1585 struct radeon_cmdbuf
*cs
,
1586 uint32_t sh_offset
, uint64_t va
, bool global
)
1588 bool use_32bit_pointers
= !global
;
1590 radv_emit_shader_pointer_head(cs
, sh_offset
, 1, use_32bit_pointers
);
1591 radv_emit_shader_pointer_body(device
, cs
, va
, use_32bit_pointers
);
1594 static inline struct radv_descriptor_state
*
1595 radv_get_descriptors_state(struct radv_cmd_buffer
*cmd_buffer
,
1596 VkPipelineBindPoint bind_point
)
1598 assert(bind_point
== VK_PIPELINE_BIND_POINT_GRAPHICS
||
1599 bind_point
== VK_PIPELINE_BIND_POINT_COMPUTE
);
1600 return &cmd_buffer
->descriptors
[bind_point
];
1604 * Takes x,y,z as exact numbers of invocations, instead of blocks.
1606 * Limitations: Can't call normal dispatch functions without binding or rebinding
1607 * the compute pipeline.
1609 void radv_unaligned_dispatch(
1610 struct radv_cmd_buffer
*cmd_buffer
,
1616 struct vk_object_base base
;
1617 struct radeon_winsys_bo
*bo
;
1621 struct radv_shader_module
;
1623 #define RADV_HASH_SHADER_NO_NGG (1 << 0)
1624 #define RADV_HASH_SHADER_CS_WAVE32 (1 << 1)
1625 #define RADV_HASH_SHADER_PS_WAVE32 (1 << 2)
1626 #define RADV_HASH_SHADER_GE_WAVE32 (1 << 3)
1627 #define RADV_HASH_SHADER_LLVM (1 << 4)
1630 radv_hash_shaders(unsigned char *hash
,
1631 const VkPipelineShaderStageCreateInfo
**stages
,
1632 const struct radv_pipeline_layout
*layout
,
1633 const struct radv_pipeline_key
*key
,
1636 static inline gl_shader_stage
1637 vk_to_mesa_shader_stage(VkShaderStageFlagBits vk_stage
)
1639 assert(__builtin_popcount(vk_stage
) == 1);
1640 return ffs(vk_stage
) - 1;
1643 static inline VkShaderStageFlagBits
1644 mesa_to_vk_shader_stage(gl_shader_stage mesa_stage
)
1646 return (1 << mesa_stage
);
1649 #define RADV_STAGE_MASK ((1 << MESA_SHADER_STAGES) - 1)
1651 #define radv_foreach_stage(stage, stage_bits) \
1652 for (gl_shader_stage stage, \
1653 __tmp = (gl_shader_stage)((stage_bits) & RADV_STAGE_MASK); \
1654 stage = __builtin_ffs(__tmp) - 1, __tmp; \
1655 __tmp &= ~(1 << (stage)))
1657 extern const VkFormat radv_fs_key_format_exemplars
[NUM_META_FS_KEYS
];
1658 unsigned radv_format_meta_fs_key(VkFormat format
);
1660 struct radv_multisample_state
{
1662 uint32_t pa_sc_mode_cntl_0
;
1663 uint32_t pa_sc_mode_cntl_1
;
1664 uint32_t pa_sc_aa_config
;
1665 uint32_t pa_sc_aa_mask
[2];
1666 unsigned num_samples
;
1669 struct radv_prim_vertex_count
{
1674 struct radv_ia_multi_vgt_param_helpers
{
1676 bool partial_es_wave
;
1677 uint8_t primgroup_size
;
1678 bool ia_switch_on_eoi
;
1679 bool partial_vs_wave
;
1682 struct radv_binning_state
{
1683 uint32_t pa_sc_binner_cntl_0
;
1684 uint32_t db_dfsm_control
;
1687 #define SI_GS_PER_ES 128
1689 struct radv_pipeline
{
1690 struct vk_object_base base
;
1691 struct radv_device
* device
;
1692 struct radv_dynamic_state dynamic_state
;
1694 struct radv_pipeline_layout
* layout
;
1696 bool need_indirect_descriptor_sets
;
1697 struct radv_shader_variant
* shaders
[MESA_SHADER_STAGES
];
1698 struct radv_shader_variant
*gs_copy_shader
;
1699 VkShaderStageFlags active_stages
;
1701 struct radeon_cmdbuf cs
;
1702 uint32_t ctx_cs_hash
;
1703 struct radeon_cmdbuf ctx_cs
;
1705 uint32_t binding_stride
[MAX_VBS
];
1706 uint8_t num_vertex_bindings
;
1708 uint32_t user_data_0
[MESA_SHADER_STAGES
];
1711 struct radv_multisample_state ms
;
1712 struct radv_binning_state binning
;
1713 uint32_t spi_baryc_cntl
;
1714 bool prim_restart_enable
;
1715 unsigned esgs_ring_size
;
1716 unsigned gsvs_ring_size
;
1717 uint32_t vtx_base_sgpr
;
1718 struct radv_ia_multi_vgt_param_helpers ia_multi_vgt_param
;
1719 uint8_t vtx_emit_num
;
1720 bool can_use_guardband
;
1721 uint32_t needed_dynamic_state
;
1722 bool disable_out_of_order_rast_for_occlusion
;
1723 unsigned tess_patch_control_points
;
1724 unsigned pa_su_sc_mode_cntl
;
1725 unsigned db_depth_control
;
1726 bool uses_dynamic_stride
;
1728 /* Used for rbplus */
1729 uint32_t col_format
;
1730 uint32_t cb_target_mask
;
1735 unsigned scratch_bytes_per_wave
;
1737 /* Not NULL if graphics pipeline uses streamout. */
1738 struct radv_shader_variant
*streamout_shader
;
1741 static inline bool radv_pipeline_has_gs(const struct radv_pipeline
*pipeline
)
1743 return pipeline
->shaders
[MESA_SHADER_GEOMETRY
] ? true : false;
1746 static inline bool radv_pipeline_has_tess(const struct radv_pipeline
*pipeline
)
1748 return pipeline
->shaders
[MESA_SHADER_TESS_CTRL
] ? true : false;
1751 bool radv_pipeline_has_ngg(const struct radv_pipeline
*pipeline
);
1753 bool radv_pipeline_has_ngg_passthrough(const struct radv_pipeline
*pipeline
);
1755 bool radv_pipeline_has_gs_copy_shader(const struct radv_pipeline
*pipeline
);
1757 struct radv_userdata_info
*radv_lookup_user_sgpr(struct radv_pipeline
*pipeline
,
1758 gl_shader_stage stage
,
1761 struct radv_shader_variant
*radv_get_shader(const struct radv_pipeline
*pipeline
,
1762 gl_shader_stage stage
);
1764 struct radv_graphics_pipeline_create_info
{
1766 bool db_depth_clear
;
1767 bool db_stencil_clear
;
1768 bool db_depth_disable_expclear
;
1769 bool db_stencil_disable_expclear
;
1770 bool depth_compress_disable
;
1771 bool stencil_compress_disable
;
1772 bool resummarize_enable
;
1773 uint32_t custom_blend_mode
;
1777 radv_graphics_pipeline_create(VkDevice device
,
1778 VkPipelineCache cache
,
1779 const VkGraphicsPipelineCreateInfo
*pCreateInfo
,
1780 const struct radv_graphics_pipeline_create_info
*extra
,
1781 const VkAllocationCallbacks
*alloc
,
1782 VkPipeline
*pPipeline
);
1784 struct radv_binning_settings
{
1785 unsigned context_states_per_bin
; /* allowed range: [1, 6] */
1786 unsigned persistent_states_per_bin
; /* allowed range: [1, 32] */
1787 unsigned fpovs_per_batch
; /* allowed range: [0, 255], 0 = unlimited */
1790 struct radv_binning_settings
1791 radv_get_binning_settings(const struct radv_physical_device
*pdev
);
1793 struct vk_format_description
;
1794 uint32_t radv_translate_buffer_dataformat(const struct vk_format_description
*desc
,
1795 int first_non_void
);
1796 uint32_t radv_translate_buffer_numformat(const struct vk_format_description
*desc
,
1797 int first_non_void
);
1798 bool radv_is_buffer_format_supported(VkFormat format
, bool *scaled
);
1799 uint32_t radv_translate_colorformat(VkFormat format
);
1800 uint32_t radv_translate_color_numformat(VkFormat format
,
1801 const struct vk_format_description
*desc
,
1802 int first_non_void
);
1803 uint32_t radv_colorformat_endian_swap(uint32_t colorformat
);
1804 unsigned radv_translate_colorswap(VkFormat format
, bool do_endian_swap
);
1805 uint32_t radv_translate_dbformat(VkFormat format
);
1806 uint32_t radv_translate_tex_dataformat(VkFormat format
,
1807 const struct vk_format_description
*desc
,
1808 int first_non_void
);
1809 uint32_t radv_translate_tex_numformat(VkFormat format
,
1810 const struct vk_format_description
*desc
,
1811 int first_non_void
);
1812 bool radv_format_pack_clear_color(VkFormat format
,
1813 uint32_t clear_vals
[2],
1814 VkClearColorValue
*value
);
1815 bool radv_is_colorbuffer_format_supported(VkFormat format
, bool *blendable
);
1816 bool radv_dcc_formats_compatible(VkFormat format1
,
1818 bool radv_device_supports_etc(struct radv_physical_device
*physical_device
);
1820 struct radv_image_plane
{
1822 struct radeon_surf surface
;
1827 struct vk_object_base base
;
1829 /* The original VkFormat provided by the client. This may not match any
1830 * of the actual surface formats.
1833 VkImageAspectFlags aspects
;
1834 VkImageUsageFlags usage
; /**< Superset of VkImageCreateInfo::usage. */
1835 struct ac_surf_info info
;
1836 VkImageTiling tiling
; /** VkImageCreateInfo::tiling */
1837 VkImageCreateFlags flags
; /** VkImageCreateInfo::flags */
1842 unsigned queue_family_mask
;
1846 /* Set when bound */
1847 struct radeon_winsys_bo
*bo
;
1848 VkDeviceSize offset
;
1849 bool tc_compatible_htile
;
1850 bool tc_compatible_cmask
;
1852 uint64_t clear_value_offset
;
1853 uint64_t fce_pred_offset
;
1854 uint64_t dcc_pred_offset
;
1857 * Metadata for the TC-compat zrange workaround. If the 32-bit value
1858 * stored at this offset is UINT_MAX, the driver will emit
1859 * DB_Z_INFO.ZRANGE_PRECISION=0, otherwise it will skip the
1860 * SET_CONTEXT_REG packet.
1862 uint64_t tc_compat_zrange_offset
;
1864 /* For VK_ANDROID_native_buffer, the WSI image owns the memory, */
1865 VkDeviceMemory owned_memory
;
1867 unsigned plane_count
;
1868 struct radv_image_plane planes
[0];
1871 /* Whether the image has a htile that is known consistent with the contents of
1872 * the image and is allowed to be in compressed form.
1874 * If this is false reads that don't use the htile should be able to return
1877 bool radv_layout_is_htile_compressed(const struct radv_image
*image
,
1878 VkImageLayout layout
,
1879 bool in_render_loop
,
1880 unsigned queue_mask
);
1882 bool radv_layout_can_fast_clear(const struct radv_image
*image
,
1883 VkImageLayout layout
,
1884 bool in_render_loop
,
1885 unsigned queue_mask
);
1887 bool radv_layout_dcc_compressed(const struct radv_device
*device
,
1888 const struct radv_image
*image
,
1889 VkImageLayout layout
,
1890 bool in_render_loop
,
1891 unsigned queue_mask
);
1894 * Return whether the image has CMASK metadata for color surfaces.
1897 radv_image_has_cmask(const struct radv_image
*image
)
1899 return image
->planes
[0].surface
.cmask_offset
;
1903 * Return whether the image has FMASK metadata for color surfaces.
1906 radv_image_has_fmask(const struct radv_image
*image
)
1908 return image
->planes
[0].surface
.fmask_offset
;
1912 * Return whether the image has DCC metadata for color surfaces.
1915 radv_image_has_dcc(const struct radv_image
*image
)
1917 return image
->planes
[0].surface
.dcc_size
;
1921 * Return whether the image is TC-compatible CMASK.
1924 radv_image_is_tc_compat_cmask(const struct radv_image
*image
)
1926 return radv_image_has_fmask(image
) && image
->tc_compatible_cmask
;
1930 * Return whether DCC metadata is enabled for a level.
1933 radv_dcc_enabled(const struct radv_image
*image
, unsigned level
)
1935 return radv_image_has_dcc(image
) &&
1936 level
< image
->planes
[0].surface
.num_dcc_levels
;
1940 * Return whether the image has CB metadata.
1943 radv_image_has_CB_metadata(const struct radv_image
*image
)
1945 return radv_image_has_cmask(image
) ||
1946 radv_image_has_fmask(image
) ||
1947 radv_image_has_dcc(image
);
1951 * Return whether the image has HTILE metadata for depth surfaces.
1954 radv_image_has_htile(const struct radv_image
*image
)
1956 return image
->planes
[0].surface
.htile_size
;
1960 * Return whether HTILE metadata is enabled for a level.
1963 radv_htile_enabled(const struct radv_image
*image
, unsigned level
)
1965 return radv_image_has_htile(image
) && level
== 0;
1969 * Return whether the image is TC-compatible HTILE.
1972 radv_image_is_tc_compat_htile(const struct radv_image
*image
)
1974 return radv_image_has_htile(image
) && image
->tc_compatible_htile
;
1977 static inline uint64_t
1978 radv_image_get_fast_clear_va(const struct radv_image
*image
,
1979 uint32_t base_level
)
1981 uint64_t va
= radv_buffer_get_va(image
->bo
);
1982 va
+= image
->offset
+ image
->clear_value_offset
+ base_level
* 8;
1986 static inline uint64_t
1987 radv_image_get_fce_pred_va(const struct radv_image
*image
,
1988 uint32_t base_level
)
1990 uint64_t va
= radv_buffer_get_va(image
->bo
);
1991 va
+= image
->offset
+ image
->fce_pred_offset
+ base_level
* 8;
1995 static inline uint64_t
1996 radv_image_get_dcc_pred_va(const struct radv_image
*image
,
1997 uint32_t base_level
)
1999 uint64_t va
= radv_buffer_get_va(image
->bo
);
2000 va
+= image
->offset
+ image
->dcc_pred_offset
+ base_level
* 8;
2004 static inline uint64_t
2005 radv_get_tc_compat_zrange_va(const struct radv_image
*image
,
2006 uint32_t base_level
)
2008 uint64_t va
= radv_buffer_get_va(image
->bo
);
2009 va
+= image
->offset
+ image
->tc_compat_zrange_offset
+ base_level
* 4;
2013 static inline uint64_t
2014 radv_get_ds_clear_value_va(const struct radv_image
*image
,
2015 uint32_t base_level
)
2017 uint64_t va
= radv_buffer_get_va(image
->bo
);
2018 va
+= image
->offset
+ image
->clear_value_offset
+ base_level
* 8;
2022 unsigned radv_image_queue_family_mask(const struct radv_image
*image
, uint32_t family
, uint32_t queue_family
);
2024 static inline uint32_t
2025 radv_get_layerCount(const struct radv_image
*image
,
2026 const VkImageSubresourceRange
*range
)
2028 return range
->layerCount
== VK_REMAINING_ARRAY_LAYERS
?
2029 image
->info
.array_size
- range
->baseArrayLayer
: range
->layerCount
;
2032 static inline uint32_t
2033 radv_get_levelCount(const struct radv_image
*image
,
2034 const VkImageSubresourceRange
*range
)
2036 return range
->levelCount
== VK_REMAINING_MIP_LEVELS
?
2037 image
->info
.levels
- range
->baseMipLevel
: range
->levelCount
;
2040 struct radeon_bo_metadata
;
2042 radv_init_metadata(struct radv_device
*device
,
2043 struct radv_image
*image
,
2044 struct radeon_bo_metadata
*metadata
);
2047 radv_image_override_offset_stride(struct radv_device
*device
,
2048 struct radv_image
*image
,
2049 uint64_t offset
, uint32_t stride
);
2051 union radv_descriptor
{
2053 uint32_t plane0_descriptor
[8];
2054 uint32_t fmask_descriptor
[8];
2057 uint32_t plane_descriptors
[3][8];
2061 struct radv_image_view
{
2062 struct vk_object_base base
;
2063 struct radv_image
*image
; /**< VkImageViewCreateInfo::image */
2064 struct radeon_winsys_bo
*bo
;
2066 VkImageViewType type
;
2067 VkImageAspectFlags aspect_mask
;
2070 bool multiple_planes
;
2071 uint32_t base_layer
;
2072 uint32_t layer_count
;
2074 uint32_t level_count
;
2075 VkExtent3D extent
; /**< Extent of VkImageViewCreateInfo::baseMipLevel. */
2077 union radv_descriptor descriptor
;
2079 /* Descriptor for use as a storage image as opposed to a sampled image.
2080 * This has a few differences for cube maps (e.g. type).
2082 union radv_descriptor storage_descriptor
;
2085 struct radv_image_create_info
{
2086 const VkImageCreateInfo
*vk_info
;
2088 bool no_metadata_planes
;
2089 const struct radeon_bo_metadata
*bo_metadata
;
2093 radv_image_create_layout(struct radv_device
*device
,
2094 struct radv_image_create_info create_info
,
2095 struct radv_image
*image
);
2097 VkResult
radv_image_create(VkDevice _device
,
2098 const struct radv_image_create_info
*info
,
2099 const VkAllocationCallbacks
* alloc
,
2102 bool vi_alpha_is_on_msb(struct radv_device
*device
, VkFormat format
);
2105 radv_image_from_gralloc(VkDevice device_h
,
2106 const VkImageCreateInfo
*base_info
,
2107 const VkNativeBufferANDROID
*gralloc_info
,
2108 const VkAllocationCallbacks
*alloc
,
2109 VkImage
*out_image_h
);
2111 radv_ahb_usage_from_vk_usage(const VkImageCreateFlags vk_create
,
2112 const VkImageUsageFlags vk_usage
);
2114 radv_import_ahb_memory(struct radv_device
*device
,
2115 struct radv_device_memory
*mem
,
2117 const VkImportAndroidHardwareBufferInfoANDROID
*info
);
2119 radv_create_ahb_memory(struct radv_device
*device
,
2120 struct radv_device_memory
*mem
,
2122 const VkMemoryAllocateInfo
*pAllocateInfo
);
2125 radv_select_android_external_format(const void *next
, VkFormat default_format
);
2127 bool radv_android_gralloc_supports_format(VkFormat format
, VkImageUsageFlagBits usage
);
2129 struct radv_image_view_extra_create_info
{
2130 bool disable_compression
;
2133 void radv_image_view_init(struct radv_image_view
*view
,
2134 struct radv_device
*device
,
2135 const VkImageViewCreateInfo
*pCreateInfo
,
2136 const struct radv_image_view_extra_create_info
* extra_create_info
);
2138 VkFormat
radv_get_aspect_format(struct radv_image
*image
, VkImageAspectFlags mask
);
2140 struct radv_sampler_ycbcr_conversion
{
2141 struct vk_object_base base
;
2143 VkSamplerYcbcrModelConversion ycbcr_model
;
2144 VkSamplerYcbcrRange ycbcr_range
;
2145 VkComponentMapping components
;
2146 VkChromaLocation chroma_offsets
[2];
2147 VkFilter chroma_filter
;
2150 struct radv_buffer_view
{
2151 struct vk_object_base base
;
2152 struct radeon_winsys_bo
*bo
;
2154 uint64_t range
; /**< VkBufferViewCreateInfo::range */
2157 void radv_buffer_view_init(struct radv_buffer_view
*view
,
2158 struct radv_device
*device
,
2159 const VkBufferViewCreateInfo
* pCreateInfo
);
2161 static inline struct VkExtent3D
2162 radv_sanitize_image_extent(const VkImageType imageType
,
2163 const struct VkExtent3D imageExtent
)
2165 switch (imageType
) {
2166 case VK_IMAGE_TYPE_1D
:
2167 return (VkExtent3D
) { imageExtent
.width
, 1, 1 };
2168 case VK_IMAGE_TYPE_2D
:
2169 return (VkExtent3D
) { imageExtent
.width
, imageExtent
.height
, 1 };
2170 case VK_IMAGE_TYPE_3D
:
2173 unreachable("invalid image type");
2177 static inline struct VkOffset3D
2178 radv_sanitize_image_offset(const VkImageType imageType
,
2179 const struct VkOffset3D imageOffset
)
2181 switch (imageType
) {
2182 case VK_IMAGE_TYPE_1D
:
2183 return (VkOffset3D
) { imageOffset
.x
, 0, 0 };
2184 case VK_IMAGE_TYPE_2D
:
2185 return (VkOffset3D
) { imageOffset
.x
, imageOffset
.y
, 0 };
2186 case VK_IMAGE_TYPE_3D
:
2189 unreachable("invalid image type");
2194 radv_image_extent_compare(const struct radv_image
*image
,
2195 const VkExtent3D
*extent
)
2197 if (extent
->width
!= image
->info
.width
||
2198 extent
->height
!= image
->info
.height
||
2199 extent
->depth
!= image
->info
.depth
)
2204 struct radv_sampler
{
2205 struct vk_object_base base
;
2207 struct radv_sampler_ycbcr_conversion
*ycbcr_sampler
;
2208 uint32_t border_color_slot
;
2211 struct radv_framebuffer
{
2212 struct vk_object_base base
;
2217 uint32_t attachment_count
;
2218 struct radv_image_view
*attachments
[0];
2221 struct radv_subpass_barrier
{
2222 VkPipelineStageFlags src_stage_mask
;
2223 VkAccessFlags src_access_mask
;
2224 VkAccessFlags dst_access_mask
;
2227 void radv_subpass_barrier(struct radv_cmd_buffer
*cmd_buffer
,
2228 const struct radv_subpass_barrier
*barrier
);
2230 struct radv_subpass_attachment
{
2231 uint32_t attachment
;
2232 VkImageLayout layout
;
2233 VkImageLayout stencil_layout
;
2234 bool in_render_loop
;
2237 struct radv_subpass
{
2238 uint32_t attachment_count
;
2239 struct radv_subpass_attachment
* attachments
;
2241 uint32_t input_count
;
2242 uint32_t color_count
;
2243 struct radv_subpass_attachment
* input_attachments
;
2244 struct radv_subpass_attachment
* color_attachments
;
2245 struct radv_subpass_attachment
* resolve_attachments
;
2246 struct radv_subpass_attachment
* depth_stencil_attachment
;
2247 struct radv_subpass_attachment
* ds_resolve_attachment
;
2248 VkResolveModeFlagBits depth_resolve_mode
;
2249 VkResolveModeFlagBits stencil_resolve_mode
;
2251 /** Subpass has at least one color resolve attachment */
2252 bool has_color_resolve
;
2254 /** Subpass has at least one color attachment */
2257 struct radv_subpass_barrier start_barrier
;
2261 VkSampleCountFlagBits color_sample_count
;
2262 VkSampleCountFlagBits depth_sample_count
;
2263 VkSampleCountFlagBits max_sample_count
;
2267 radv_get_subpass_id(struct radv_cmd_buffer
*cmd_buffer
);
2269 struct radv_render_pass_attachment
{
2272 VkAttachmentLoadOp load_op
;
2273 VkAttachmentLoadOp stencil_load_op
;
2274 VkImageLayout initial_layout
;
2275 VkImageLayout final_layout
;
2276 VkImageLayout stencil_initial_layout
;
2277 VkImageLayout stencil_final_layout
;
2279 /* The subpass id in which the attachment will be used first/last. */
2280 uint32_t first_subpass_idx
;
2281 uint32_t last_subpass_idx
;
2284 struct radv_render_pass
{
2285 struct vk_object_base base
;
2286 uint32_t attachment_count
;
2287 uint32_t subpass_count
;
2288 struct radv_subpass_attachment
* subpass_attachments
;
2289 struct radv_render_pass_attachment
* attachments
;
2290 struct radv_subpass_barrier end_barrier
;
2291 struct radv_subpass subpasses
[0];
2294 VkResult
radv_device_init_meta(struct radv_device
*device
);
2295 void radv_device_finish_meta(struct radv_device
*device
);
2297 struct radv_query_pool
{
2298 struct vk_object_base base
;
2299 struct radeon_winsys_bo
*bo
;
2301 uint32_t availability_offset
;
2305 uint32_t pipeline_stats_mask
;
2309 RADV_SEMAPHORE_NONE
,
2310 RADV_SEMAPHORE_WINSYS
,
2311 RADV_SEMAPHORE_SYNCOBJ
,
2312 RADV_SEMAPHORE_TIMELINE_SYNCOBJ
,
2313 RADV_SEMAPHORE_TIMELINE
,
2314 } radv_semaphore_kind
;
2316 struct radv_deferred_queue_submission
;
2318 struct radv_timeline_waiter
{
2319 struct list_head list
;
2320 struct radv_deferred_queue_submission
*submission
;
2324 struct radv_timeline_point
{
2325 struct list_head list
;
2330 /* Separate from the list to accomodate CPU wait being async, as well
2331 * as prevent point deletion during submission. */
2332 unsigned wait_count
;
2335 struct radv_timeline
{
2336 /* Using a pthread mutex to be compatible with condition variables. */
2337 pthread_mutex_t mutex
;
2339 uint64_t highest_signaled
;
2340 uint64_t highest_submitted
;
2342 struct list_head points
;
2344 /* Keep free points on hand so we do not have to recreate syncobjs all
2346 struct list_head free_points
;
2348 /* Submissions that are deferred waiting for a specific value to be
2350 struct list_head waiters
;
2353 struct radv_timeline_syncobj
{
2354 /* Keep syncobj first, so common-code can just handle this as
2355 * non-timeline syncobj. */
2357 uint64_t max_point
; /* max submitted point. */
2360 struct radv_semaphore_part
{
2361 radv_semaphore_kind kind
;
2364 struct radeon_winsys_sem
*ws_sem
;
2365 struct radv_timeline timeline
;
2366 struct radv_timeline_syncobj timeline_syncobj
;
2370 struct radv_semaphore
{
2371 struct vk_object_base base
;
2372 struct radv_semaphore_part permanent
;
2373 struct radv_semaphore_part temporary
;
2376 bool radv_queue_internal_submit(struct radv_queue
*queue
,
2377 struct radeon_cmdbuf
*cs
);
2379 void radv_set_descriptor_set(struct radv_cmd_buffer
*cmd_buffer
,
2380 VkPipelineBindPoint bind_point
,
2381 struct radv_descriptor_set
*set
,
2385 radv_update_descriptor_sets(struct radv_device
*device
,
2386 struct radv_cmd_buffer
*cmd_buffer
,
2387 VkDescriptorSet overrideSet
,
2388 uint32_t descriptorWriteCount
,
2389 const VkWriteDescriptorSet
*pDescriptorWrites
,
2390 uint32_t descriptorCopyCount
,
2391 const VkCopyDescriptorSet
*pDescriptorCopies
);
2394 radv_update_descriptor_set_with_template(struct radv_device
*device
,
2395 struct radv_cmd_buffer
*cmd_buffer
,
2396 struct radv_descriptor_set
*set
,
2397 VkDescriptorUpdateTemplate descriptorUpdateTemplate
,
2400 void radv_meta_push_descriptor_set(struct radv_cmd_buffer
*cmd_buffer
,
2401 VkPipelineBindPoint pipelineBindPoint
,
2402 VkPipelineLayout _layout
,
2404 uint32_t descriptorWriteCount
,
2405 const VkWriteDescriptorSet
*pDescriptorWrites
);
2407 void radv_initialize_dcc(struct radv_cmd_buffer
*cmd_buffer
,
2408 struct radv_image
*image
,
2409 const VkImageSubresourceRange
*range
, uint32_t value
);
2411 void radv_initialize_fmask(struct radv_cmd_buffer
*cmd_buffer
,
2412 struct radv_image
*image
,
2413 const VkImageSubresourceRange
*range
);
2422 struct radv_fence_part
{
2423 radv_fence_kind kind
;
2426 /* AMDGPU winsys fence. */
2427 struct radeon_winsys_fence
*fence
;
2429 /* DRM syncobj handle for syncobj-based fences. */
2433 struct wsi_fence
*fence_wsi
;
2438 struct vk_object_base base
;
2439 struct radv_fence_part permanent
;
2440 struct radv_fence_part temporary
;
2443 /* radv_nir_to_llvm.c */
2444 struct radv_shader_args
;
2446 void llvm_compile_shader(struct radv_device
*device
,
2447 unsigned shader_count
,
2448 struct nir_shader
*const *shaders
,
2449 struct radv_shader_binary
**binary
,
2450 struct radv_shader_args
*args
);
2452 unsigned radv_nir_get_max_workgroup_size(enum chip_class chip_class
,
2453 gl_shader_stage stage
,
2454 const struct nir_shader
*nir
);
2456 /* radv_shader_info.h */
2457 struct radv_shader_info
;
2458 struct radv_shader_variant_key
;
2460 void radv_nir_shader_info_pass(const struct nir_shader
*nir
,
2461 const struct radv_pipeline_layout
*layout
,
2462 const struct radv_shader_variant_key
*key
,
2463 struct radv_shader_info
*info
,
2466 void radv_nir_shader_info_init(struct radv_shader_info
*info
);
2469 struct radv_thread_trace_info
{
2470 uint32_t cur_offset
;
2471 uint32_t trace_status
;
2473 uint32_t gfx9_write_counter
;
2474 uint32_t gfx10_dropped_cntr
;
2478 struct radv_thread_trace_se
{
2479 struct radv_thread_trace_info info
;
2481 uint32_t shader_engine
;
2482 uint32_t compute_unit
;
2485 struct radv_thread_trace
{
2486 uint32_t num_traces
;
2487 struct radv_thread_trace_se traces
[4];
2490 bool radv_thread_trace_init(struct radv_device
*device
);
2491 void radv_thread_trace_finish(struct radv_device
*device
);
2492 bool radv_begin_thread_trace(struct radv_queue
*queue
);
2493 bool radv_end_thread_trace(struct radv_queue
*queue
);
2494 bool radv_get_thread_trace(struct radv_queue
*queue
,
2495 struct radv_thread_trace
*thread_trace
);
2496 void radv_emit_thread_trace_userdata(struct radeon_cmdbuf
*cs
,
2497 const void *data
, uint32_t num_dwords
);
2500 int radv_dump_thread_trace(struct radv_device
*device
,
2501 const struct radv_thread_trace
*trace
);
2503 /* radv_sqtt_layer_.c */
2504 struct radv_barrier_data
{
2507 uint16_t depth_stencil_expand
: 1;
2508 uint16_t htile_hiz_range_expand
: 1;
2509 uint16_t depth_stencil_resummarize
: 1;
2510 uint16_t dcc_decompress
: 1;
2511 uint16_t fmask_decompress
: 1;
2512 uint16_t fast_clear_eliminate
: 1;
2513 uint16_t fmask_color_expand
: 1;
2514 uint16_t init_mask_ram
: 1;
2515 uint16_t reserved
: 8;
2518 } layout_transitions
;
2522 * Value for the reason field of an RGP barrier start marker originating from
2523 * the Vulkan client (does not include PAL-defined values). (Table 15)
2525 enum rgp_barrier_reason
{
2526 RGP_BARRIER_UNKNOWN_REASON
= 0xFFFFFFFF,
2528 /* External app-generated barrier reasons, i.e. API synchronization
2529 * commands Range of valid values: [0x00000001 ... 0x7FFFFFFF].
2531 RGP_BARRIER_EXTERNAL_CMD_PIPELINE_BARRIER
= 0x00000001,
2532 RGP_BARRIER_EXTERNAL_RENDER_PASS_SYNC
= 0x00000002,
2533 RGP_BARRIER_EXTERNAL_CMD_WAIT_EVENTS
= 0x00000003,
2535 /* Internal barrier reasons, i.e. implicit synchronization inserted by
2536 * the Vulkan driver Range of valid values: [0xC0000000 ... 0xFFFFFFFE].
2538 RGP_BARRIER_INTERNAL_BASE
= 0xC0000000,
2539 RGP_BARRIER_INTERNAL_PRE_RESET_QUERY_POOL_SYNC
= RGP_BARRIER_INTERNAL_BASE
+ 0,
2540 RGP_BARRIER_INTERNAL_POST_RESET_QUERY_POOL_SYNC
= RGP_BARRIER_INTERNAL_BASE
+ 1,
2541 RGP_BARRIER_INTERNAL_GPU_EVENT_RECYCLE_STALL
= RGP_BARRIER_INTERNAL_BASE
+ 2,
2542 RGP_BARRIER_INTERNAL_PRE_COPY_QUERY_POOL_RESULTS_SYNC
= RGP_BARRIER_INTERNAL_BASE
+ 3
2545 void radv_describe_begin_cmd_buffer(struct radv_cmd_buffer
*cmd_buffer
);
2546 void radv_describe_end_cmd_buffer(struct radv_cmd_buffer
*cmd_buffer
);
2547 void radv_describe_draw(struct radv_cmd_buffer
*cmd_buffer
);
2548 void radv_describe_dispatch(struct radv_cmd_buffer
*cmd_buffer
, int x
, int y
, int z
);
2549 void radv_describe_begin_render_pass_clear(struct radv_cmd_buffer
*cmd_buffer
,
2550 VkImageAspectFlagBits aspects
);
2551 void radv_describe_end_render_pass_clear(struct radv_cmd_buffer
*cmd_buffer
);
2552 void radv_describe_barrier_start(struct radv_cmd_buffer
*cmd_buffer
,
2553 enum rgp_barrier_reason reason
);
2554 void radv_describe_barrier_end(struct radv_cmd_buffer
*cmd_buffer
);
2555 void radv_describe_layout_transition(struct radv_cmd_buffer
*cmd_buffer
,
2556 const struct radv_barrier_data
*barrier
);
2558 struct radeon_winsys_sem
;
2560 uint64_t radv_get_current_time(void);
2562 static inline uint32_t
2563 si_conv_gl_prim_to_vertices(unsigned gl_prim
)
2566 case 0: /* GL_POINTS */
2568 case 1: /* GL_LINES */
2569 case 3: /* GL_LINE_STRIP */
2571 case 4: /* GL_TRIANGLES */
2572 case 5: /* GL_TRIANGLE_STRIP */
2574 case 0xA: /* GL_LINE_STRIP_ADJACENCY_ARB */
2576 case 0xc: /* GL_TRIANGLES_ADJACENCY_ARB */
2578 case 7: /* GL_QUADS */
2579 return V_028A6C_OUTPRIM_TYPE_TRISTRIP
;
2586 void radv_cmd_buffer_begin_render_pass(struct radv_cmd_buffer
*cmd_buffer
,
2587 const VkRenderPassBeginInfo
*pRenderPassBegin
);
2588 void radv_cmd_buffer_end_render_pass(struct radv_cmd_buffer
*cmd_buffer
);
2590 static inline uint32_t si_translate_prim(unsigned topology
)
2593 case VK_PRIMITIVE_TOPOLOGY_POINT_LIST
:
2594 return V_008958_DI_PT_POINTLIST
;
2595 case VK_PRIMITIVE_TOPOLOGY_LINE_LIST
:
2596 return V_008958_DI_PT_LINELIST
;
2597 case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP
:
2598 return V_008958_DI_PT_LINESTRIP
;
2599 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST
:
2600 return V_008958_DI_PT_TRILIST
;
2601 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP
:
2602 return V_008958_DI_PT_TRISTRIP
;
2603 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN
:
2604 return V_008958_DI_PT_TRIFAN
;
2605 case VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY
:
2606 return V_008958_DI_PT_LINELIST_ADJ
;
2607 case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY
:
2608 return V_008958_DI_PT_LINESTRIP_ADJ
;
2609 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY
:
2610 return V_008958_DI_PT_TRILIST_ADJ
;
2611 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY
:
2612 return V_008958_DI_PT_TRISTRIP_ADJ
;
2613 case VK_PRIMITIVE_TOPOLOGY_PATCH_LIST
:
2614 return V_008958_DI_PT_PATCH
;
2621 static inline uint32_t si_translate_stencil_op(enum VkStencilOp op
)
2624 case VK_STENCIL_OP_KEEP
:
2625 return V_02842C_STENCIL_KEEP
;
2626 case VK_STENCIL_OP_ZERO
:
2627 return V_02842C_STENCIL_ZERO
;
2628 case VK_STENCIL_OP_REPLACE
:
2629 return V_02842C_STENCIL_REPLACE_TEST
;
2630 case VK_STENCIL_OP_INCREMENT_AND_CLAMP
:
2631 return V_02842C_STENCIL_ADD_CLAMP
;
2632 case VK_STENCIL_OP_DECREMENT_AND_CLAMP
:
2633 return V_02842C_STENCIL_SUB_CLAMP
;
2634 case VK_STENCIL_OP_INVERT
:
2635 return V_02842C_STENCIL_INVERT
;
2636 case VK_STENCIL_OP_INCREMENT_AND_WRAP
:
2637 return V_02842C_STENCIL_ADD_WRAP
;
2638 case VK_STENCIL_OP_DECREMENT_AND_WRAP
:
2639 return V_02842C_STENCIL_SUB_WRAP
;
2646 * Helper used for debugging compiler issues by enabling/disabling LLVM for a
2647 * specific shader stage (developers only).
2650 radv_use_llvm_for_stage(struct radv_device
*device
, UNUSED gl_shader_stage stage
)
2652 return device
->physical_device
->use_llvm
;
2655 #define RADV_DEFINE_HANDLE_CASTS(__radv_type, __VkType) \
2657 static inline struct __radv_type * \
2658 __radv_type ## _from_handle(__VkType _handle) \
2660 return (struct __radv_type *) _handle; \
2663 static inline __VkType \
2664 __radv_type ## _to_handle(struct __radv_type *_obj) \
2666 return (__VkType) _obj; \
2669 #define RADV_DEFINE_NONDISP_HANDLE_CASTS(__radv_type, __VkType) \
2671 static inline struct __radv_type * \
2672 __radv_type ## _from_handle(__VkType _handle) \
2674 return (struct __radv_type *)(uintptr_t) _handle; \
2677 static inline __VkType \
2678 __radv_type ## _to_handle(struct __radv_type *_obj) \
2680 return (__VkType)(uintptr_t) _obj; \
2683 #define RADV_FROM_HANDLE(__radv_type, __name, __handle) \
2684 struct __radv_type *__name = __radv_type ## _from_handle(__handle)
2686 RADV_DEFINE_HANDLE_CASTS(radv_cmd_buffer
, VkCommandBuffer
)
2687 RADV_DEFINE_HANDLE_CASTS(radv_device
, VkDevice
)
2688 RADV_DEFINE_HANDLE_CASTS(radv_instance
, VkInstance
)
2689 RADV_DEFINE_HANDLE_CASTS(radv_physical_device
, VkPhysicalDevice
)
2690 RADV_DEFINE_HANDLE_CASTS(radv_queue
, VkQueue
)
2692 RADV_DEFINE_NONDISP_HANDLE_CASTS(radv_cmd_pool
, VkCommandPool
)
2693 RADV_DEFINE_NONDISP_HANDLE_CASTS(radv_buffer
, VkBuffer
)
2694 RADV_DEFINE_NONDISP_HANDLE_CASTS(radv_buffer_view
, VkBufferView
)
2695 RADV_DEFINE_NONDISP_HANDLE_CASTS(radv_descriptor_pool
, VkDescriptorPool
)
2696 RADV_DEFINE_NONDISP_HANDLE_CASTS(radv_descriptor_set
, VkDescriptorSet
)
2697 RADV_DEFINE_NONDISP_HANDLE_CASTS(radv_descriptor_set_layout
, VkDescriptorSetLayout
)
2698 RADV_DEFINE_NONDISP_HANDLE_CASTS(radv_descriptor_update_template
, VkDescriptorUpdateTemplate
)
2699 RADV_DEFINE_NONDISP_HANDLE_CASTS(radv_device_memory
, VkDeviceMemory
)
2700 RADV_DEFINE_NONDISP_HANDLE_CASTS(radv_fence
, VkFence
)
2701 RADV_DEFINE_NONDISP_HANDLE_CASTS(radv_event
, VkEvent
)
2702 RADV_DEFINE_NONDISP_HANDLE_CASTS(radv_framebuffer
, VkFramebuffer
)
2703 RADV_DEFINE_NONDISP_HANDLE_CASTS(radv_image
, VkImage
)
2704 RADV_DEFINE_NONDISP_HANDLE_CASTS(radv_image_view
, VkImageView
);
2705 RADV_DEFINE_NONDISP_HANDLE_CASTS(radv_pipeline_cache
, VkPipelineCache
)
2706 RADV_DEFINE_NONDISP_HANDLE_CASTS(radv_pipeline
, VkPipeline
)
2707 RADV_DEFINE_NONDISP_HANDLE_CASTS(radv_pipeline_layout
, VkPipelineLayout
)
2708 RADV_DEFINE_NONDISP_HANDLE_CASTS(radv_query_pool
, VkQueryPool
)
2709 RADV_DEFINE_NONDISP_HANDLE_CASTS(radv_render_pass
, VkRenderPass
)
2710 RADV_DEFINE_NONDISP_HANDLE_CASTS(radv_sampler
, VkSampler
)
2711 RADV_DEFINE_NONDISP_HANDLE_CASTS(radv_sampler_ycbcr_conversion
, VkSamplerYcbcrConversion
)
2712 RADV_DEFINE_NONDISP_HANDLE_CASTS(radv_shader_module
, VkShaderModule
)
2713 RADV_DEFINE_NONDISP_HANDLE_CASTS(radv_semaphore
, VkSemaphore
)
2715 #endif /* RADV_PRIVATE_H */