2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
43 #define VG(x) ((void)0)
46 #include "c11/threads.h"
47 #include "main/macros.h"
48 #include "util/list.h"
49 #include "util/macros.h"
51 #include "vk_debug_report.h"
52 #include "wsi_common.h"
54 #include "drm-uapi/msm_drm.h"
55 #include "ir3/ir3_compiler.h"
56 #include "ir3/ir3_shader.h"
58 #include "adreno_common.xml.h"
59 #include "adreno_pm4.xml.h"
61 #include "fdl/freedreno_layout.h"
63 #include "tu_descriptor_set.h"
64 #include "tu_extensions.h"
66 /* Pre-declarations needed for WSI entrypoints */
69 typedef struct xcb_connection_t xcb_connection_t
;
70 typedef uint32_t xcb_visualid_t
;
71 typedef uint32_t xcb_window_t
;
73 #include <vulkan/vk_android_native_buffer.h>
74 #include <vulkan/vk_icd.h>
75 #include <vulkan/vulkan.h>
76 #include <vulkan/vulkan_intel.h>
78 #include "tu_entrypoints.h"
80 #include "vk_format.h"
83 #define MAX_VERTEX_ATTRIBS 32
85 #define MAX_VSC_PIPES 32
86 #define MAX_VIEWPORTS 1
87 #define MAX_SCISSORS 16
88 #define MAX_DISCARD_RECTANGLES 4
89 #define MAX_PUSH_CONSTANTS_SIZE 128
90 #define MAX_PUSH_DESCRIPTORS 32
91 #define MAX_DYNAMIC_UNIFORM_BUFFERS 16
92 #define MAX_DYNAMIC_STORAGE_BUFFERS 8
93 #define MAX_DYNAMIC_BUFFERS \
94 (MAX_DYNAMIC_UNIFORM_BUFFERS + MAX_DYNAMIC_STORAGE_BUFFERS)
95 #define MAX_SAMPLES_LOG2 4
96 #define NUM_META_FS_KEYS 13
97 #define TU_MAX_DRM_DEVICES 8
99 #define MAX_BIND_POINTS 2 /* compute + graphics */
100 /* The Qualcomm driver exposes 0x20000058 */
101 #define MAX_STORAGE_BUFFER_RANGE 0x20000000
102 /* We use ldc for uniform buffer loads, just like the Qualcomm driver, so
103 * expose the same maximum range.
104 * TODO: The SIZE bitfield is 15 bits, and in 4-dword units, so the actual
105 * range might be higher.
107 #define MAX_UNIFORM_BUFFER_RANGE 0x10000
109 #define NUM_DEPTH_CLEAR_PIPELINES 3
112 * This is the point we switch from using CP to compute shader
113 * for certain buffer operations.
115 #define TU_BUFFER_OPS_CS_THRESHOLD 4096
117 #define A6XX_TEX_CONST_DWORDS 16
118 #define A6XX_TEX_SAMP_DWORDS 4
123 TU_MEM_HEAP_VRAM_CPU_ACCESS
,
131 TU_MEM_TYPE_GTT_WRITE_COMBINE
,
132 TU_MEM_TYPE_VRAM_CPU_ACCESS
,
133 TU_MEM_TYPE_GTT_CACHED
,
137 #define tu_printflike(a, b) __attribute__((__format__(__printf__, a, b)))
139 static inline uint32_t
140 align_u32(uint32_t v
, uint32_t a
)
142 assert(a
!= 0 && a
== (a
& -a
));
143 return (v
+ a
- 1) & ~(a
- 1);
146 static inline uint32_t
147 align_u32_npot(uint32_t v
, uint32_t a
)
149 return (v
+ a
- 1) / a
* a
;
152 static inline uint64_t
153 align_u64(uint64_t v
, uint64_t a
)
155 assert(a
!= 0 && a
== (a
& -a
));
156 return (v
+ a
- 1) & ~(a
- 1);
159 static inline int32_t
160 align_i32(int32_t v
, int32_t a
)
162 assert(a
!= 0 && a
== (a
& -a
));
163 return (v
+ a
- 1) & ~(a
- 1);
166 /** Alignment must be a power of 2. */
168 tu_is_aligned(uintmax_t n
, uintmax_t a
)
170 assert(a
== (a
& -a
));
171 return (n
& (a
- 1)) == 0;
174 static inline uint32_t
175 round_up_u32(uint32_t v
, uint32_t a
)
177 return (v
+ a
- 1) / a
;
180 static inline uint64_t
181 round_up_u64(uint64_t v
, uint64_t a
)
183 return (v
+ a
- 1) / a
;
186 static inline uint32_t
187 tu_minify(uint32_t n
, uint32_t levels
)
189 if (unlikely(n
== 0))
192 return MAX2(n
>> levels
, 1);
195 tu_clamp_f(float f
, float min
, float max
)
208 tu_clear_mask(uint32_t *inout_mask
, uint32_t clear_mask
)
210 if (*inout_mask
& clear_mask
) {
211 *inout_mask
&= ~clear_mask
;
218 #define for_each_bit(b, dword) \
219 for (uint32_t __dword = (dword); \
220 (b) = __builtin_ffs(__dword) - 1, __dword; __dword &= ~(1 << (b)))
222 #define typed_memcpy(dest, src, count) \
224 STATIC_ASSERT(sizeof(*src) == sizeof(*dest)); \
225 memcpy((dest), (src), (count) * sizeof(*(src))); \
228 #define COND(bool, val) ((bool) ? (val) : 0)
230 /* Whenever we generate an error, pass it through this function. Useful for
231 * debugging, where we can break on it. Only call at error site, not when
232 * propagating errors. Might be useful to plug in a stack trace here.
238 __vk_errorf(struct tu_instance
*instance
,
245 #define vk_error(instance, error) \
246 __vk_errorf(instance, error, __FILE__, __LINE__, NULL);
247 #define vk_errorf(instance, error, format, ...) \
248 __vk_errorf(instance, error, __FILE__, __LINE__, format, ##__VA_ARGS__);
251 __tu_finishme(const char *file
, int line
, const char *format
, ...)
254 tu_loge(const char *format
, ...) tu_printflike(1, 2);
256 tu_loge_v(const char *format
, va_list va
);
258 tu_logi(const char *format
, ...) tu_printflike(1, 2);
260 tu_logi_v(const char *format
, va_list va
);
263 * Print a FINISHME message, including its source location.
265 #define tu_finishme(format, ...) \
267 static bool reported = false; \
269 __tu_finishme(__FILE__, __LINE__, format, ##__VA_ARGS__); \
274 /* A non-fatal assert. Useful for debugging. */
276 #define tu_assert(x) \
278 if (unlikely(!(x))) \
279 fprintf(stderr, "%s:%d ASSERT: %s\n", __FILE__, __LINE__, #x); \
285 /* Suppress -Wunused in stub functions */
286 #define tu_use_args(...) __tu_use_args(0, ##__VA_ARGS__)
288 __tu_use_args(int ignore
, ...)
294 tu_finishme("stub %s", __func__); \
298 tu_lookup_entrypoint_unchecked(const char *name
);
300 tu_lookup_entrypoint_checked(
302 uint32_t core_version
,
303 const struct tu_instance_extension_table
*instance
,
304 const struct tu_device_extension_table
*device
);
306 struct tu_physical_device
308 VK_LOADER_DATA _loader_data
;
310 struct tu_instance
*instance
;
313 char name
[VK_MAX_PHYSICAL_DEVICE_NAME_SIZE
];
314 uint8_t driver_uuid
[VK_UUID_SIZE
];
315 uint8_t device_uuid
[VK_UUID_SIZE
];
316 uint8_t cache_uuid
[VK_UUID_SIZE
];
318 struct wsi_device wsi_device
;
326 uint32_t ccu_offset_gmem
;
327 uint32_t ccu_offset_bypass
;
328 /* alignment for size of tiles */
329 uint32_t tile_align_w
;
330 #define TILE_ALIGN_H 16
331 /* gmem store/load granularity */
332 #define GMEM_ALIGN_W 16
333 #define GMEM_ALIGN_H 4
336 uint32_t PC_UNKNOWN_9805
;
337 uint32_t SP_UNKNOWN_A0F8
;
340 /* This is the drivers on-disk cache used as a fallback as opposed to
341 * the pipeline cache defined by apps.
343 struct disk_cache
*disk_cache
;
345 struct tu_device_extension_table supported_extensions
;
350 TU_DEBUG_STARTUP
= 1 << 0,
351 TU_DEBUG_NIR
= 1 << 1,
352 TU_DEBUG_IR3
= 1 << 2,
353 TU_DEBUG_NOBIN
= 1 << 3,
354 TU_DEBUG_SYSMEM
= 1 << 4,
355 TU_DEBUG_FORCEBIN
= 1 << 5,
360 VK_LOADER_DATA _loader_data
;
362 VkAllocationCallbacks alloc
;
364 uint32_t api_version
;
365 int physical_device_count
;
366 struct tu_physical_device physical_devices
[TU_MAX_DRM_DEVICES
];
368 enum tu_debug_flags debug_flags
;
370 struct vk_debug_report_instance debug_report_callbacks
;
372 struct tu_instance_extension_table enabled_extensions
;
376 tu_wsi_init(struct tu_physical_device
*physical_device
);
378 tu_wsi_finish(struct tu_physical_device
*physical_device
);
381 tu_instance_extension_supported(const char *name
);
383 tu_physical_device_api_version(struct tu_physical_device
*dev
);
385 tu_physical_device_extension_supported(struct tu_physical_device
*dev
,
390 struct tu_pipeline_cache
392 struct tu_device
*device
;
393 pthread_mutex_t mutex
;
397 uint32_t kernel_count
;
398 struct cache_entry
**hash_table
;
401 VkAllocationCallbacks alloc
;
404 struct tu_pipeline_key
409 tu_pipeline_cache_init(struct tu_pipeline_cache
*cache
,
410 struct tu_device
*device
);
412 tu_pipeline_cache_finish(struct tu_pipeline_cache
*cache
);
414 tu_pipeline_cache_load(struct tu_pipeline_cache
*cache
,
418 struct tu_shader_variant
;
421 tu_create_shader_variants_from_pipeline_cache(
422 struct tu_device
*device
,
423 struct tu_pipeline_cache
*cache
,
424 const unsigned char *sha1
,
425 struct tu_shader_variant
**variants
);
428 tu_pipeline_cache_insert_shaders(struct tu_device
*device
,
429 struct tu_pipeline_cache
*cache
,
430 const unsigned char *sha1
,
431 struct tu_shader_variant
**variants
,
432 const void *const *codes
,
433 const unsigned *code_sizes
);
437 VkAllocationCallbacks alloc
;
439 struct tu_pipeline_cache cache
;
443 #define TU_QUEUE_GENERAL 0
445 #define TU_MAX_QUEUE_FAMILIES 1
449 struct wsi_fence
*fence_wsi
;
455 tu_fence_init(struct tu_fence
*fence
, bool signaled
);
457 tu_fence_finish(struct tu_fence
*fence
);
459 tu_fence_update_fd(struct tu_fence
*fence
, int fd
);
461 tu_fence_copy(struct tu_fence
*fence
, const struct tu_fence
*src
);
463 tu_fence_signal(struct tu_fence
*fence
);
465 tu_fence_wait_idle(struct tu_fence
*fence
);
469 VK_LOADER_DATA _loader_data
;
470 struct tu_device
*device
;
471 uint32_t queue_family_index
;
473 VkDeviceQueueCreateFlags flags
;
475 uint32_t msm_queue_id
;
476 struct tu_fence submit_fence
;
489 VK_LOADER_DATA _loader_data
;
491 VkAllocationCallbacks alloc
;
493 struct tu_instance
*instance
;
495 struct tu_meta_state meta_state
;
497 struct tu_queue
*queues
[TU_MAX_QUEUE_FAMILIES
];
498 int queue_count
[TU_MAX_QUEUE_FAMILIES
];
500 struct tu_physical_device
*physical_device
;
502 struct ir3_compiler
*compiler
;
504 /* Backup in-memory cache to be used if the app doesn't provide one */
505 struct tu_pipeline_cache
*mem_cache
;
507 struct tu_bo vsc_draw_strm
;
508 struct tu_bo vsc_prim_strm
;
509 uint32_t vsc_draw_strm_pitch
;
510 uint32_t vsc_prim_strm_pitch
;
512 #define MIN_SCRATCH_BO_SIZE_LOG2 12 /* A page */
514 /* Currently the kernel driver uses a 32-bit GPU address space, but it
515 * should be impossible to go beyond 48 bits.
521 } scratch_bos
[48 - MIN_SCRATCH_BO_SIZE_LOG2
];
523 struct tu_bo border_color
;
525 struct list_head shader_slabs
;
526 mtx_t shader_slab_mutex
;
528 struct tu_device_extension_table enabled_extensions
;
532 tu_bo_init_new(struct tu_device
*dev
, struct tu_bo
*bo
, uint64_t size
);
534 tu_bo_init_dmabuf(struct tu_device
*dev
,
539 tu_bo_export_dmabuf(struct tu_device
*dev
, struct tu_bo
*bo
);
541 tu_bo_finish(struct tu_device
*dev
, struct tu_bo
*bo
);
543 tu_bo_map(struct tu_device
*dev
, struct tu_bo
*bo
);
545 /* Get a scratch bo for use inside a command buffer. This will always return
546 * the same bo given the same size or similar sizes, so only one scratch bo
547 * can be used at the same time. It's meant for short-lived things where we
548 * need to write to some piece of memory, read from it, and then immediately
552 tu_get_scratch_bo(struct tu_device
*dev
, uint64_t size
, struct tu_bo
**bo
);
557 const struct tu_bo
*bo
;
563 struct ts_cs_memory
{
572 * A command stream in TU_CS_MODE_GROW mode grows automatically whenever it
573 * is full. tu_cs_begin must be called before command packet emission and
574 * tu_cs_end must be called after.
576 * This mode may create multiple entries internally. The entries must be
577 * submitted together.
582 * A command stream in TU_CS_MODE_EXTERNAL mode wraps an external,
583 * fixed-size buffer. tu_cs_begin and tu_cs_end are optional and have no
586 * This mode does not create any entry or any BO.
591 * A command stream in TU_CS_MODE_SUB_STREAM mode does not support direct
592 * command packet emission. tu_cs_begin_sub_stream must be called to get a
593 * sub-stream to emit comamnd packets to. When done with the sub-stream,
594 * tu_cs_end_sub_stream must be called.
596 * This mode does not create any entry internally.
598 TU_CS_MODE_SUB_STREAM
,
605 uint32_t *reserved_end
;
608 struct tu_device
*device
;
609 enum tu_cs_mode mode
;
610 uint32_t next_bo_size
;
612 struct tu_cs_entry
*entries
;
613 uint32_t entry_count
;
614 uint32_t entry_capacity
;
618 uint32_t bo_capacity
;
620 /* state for cond_exec_start/cond_exec_end */
622 uint32_t *cond_dwords
;
625 struct tu_device_memory
630 /* for dedicated allocations */
631 struct tu_image
*image
;
632 struct tu_buffer
*buffer
;
639 struct tu_descriptor_range
645 struct tu_descriptor_set
647 const struct tu_descriptor_set_layout
*layout
;
648 struct tu_descriptor_pool
*pool
;
652 uint32_t *mapped_ptr
;
654 uint32_t *dynamic_descriptors
;
656 struct tu_bo
*buffers
[0];
659 struct tu_push_descriptor_set
661 struct tu_descriptor_set set
;
665 struct tu_descriptor_pool_entry
669 struct tu_descriptor_set
*set
;
672 struct tu_descriptor_pool
675 uint64_t current_offset
;
678 uint8_t *host_memory_base
;
679 uint8_t *host_memory_ptr
;
680 uint8_t *host_memory_end
;
682 uint32_t entry_count
;
683 uint32_t max_entry_count
;
684 struct tu_descriptor_pool_entry entries
[0];
687 struct tu_descriptor_update_template_entry
689 VkDescriptorType descriptor_type
;
691 /* The number of descriptors to update */
692 uint32_t descriptor_count
;
694 /* Into mapped_ptr or dynamic_descriptors, in units of the respective array
698 /* In dwords. Not valid/used for dynamic descriptors */
701 uint32_t buffer_offset
;
703 /* Only valid for combined image samplers and samplers */
704 uint16_t has_sampler
;
710 /* For push descriptors */
711 const uint32_t *immutable_samplers
;
714 struct tu_descriptor_update_template
716 uint32_t entry_count
;
717 struct tu_descriptor_update_template_entry entry
[0];
724 VkBufferUsageFlags usage
;
725 VkBufferCreateFlags flags
;
728 VkDeviceSize bo_offset
;
731 static inline uint64_t
732 tu_buffer_iova(struct tu_buffer
*buffer
)
734 return buffer
->bo
->iova
+ buffer
->bo_offset
;
737 enum tu_dynamic_state_bits
739 TU_DYNAMIC_VIEWPORT
= 1 << 0,
740 TU_DYNAMIC_SCISSOR
= 1 << 1,
741 TU_DYNAMIC_LINE_WIDTH
= 1 << 2,
742 TU_DYNAMIC_DEPTH_BIAS
= 1 << 3,
743 TU_DYNAMIC_BLEND_CONSTANTS
= 1 << 4,
744 TU_DYNAMIC_DEPTH_BOUNDS
= 1 << 5,
745 TU_DYNAMIC_STENCIL_COMPARE_MASK
= 1 << 6,
746 TU_DYNAMIC_STENCIL_WRITE_MASK
= 1 << 7,
747 TU_DYNAMIC_STENCIL_REFERENCE
= 1 << 8,
748 TU_DYNAMIC_DISCARD_RECTANGLE
= 1 << 9,
749 TU_DYNAMIC_SAMPLE_LOCATIONS
= 1 << 10,
750 TU_DYNAMIC_ALL
= (1 << 11) - 1,
753 struct tu_vertex_binding
755 struct tu_buffer
*buffer
;
759 struct tu_viewport_state
762 VkViewport viewports
[MAX_VIEWPORTS
];
765 struct tu_scissor_state
768 VkRect2D scissors
[MAX_SCISSORS
];
771 struct tu_discard_rectangle_state
774 VkRect2D rectangles
[MAX_DISCARD_RECTANGLES
];
777 struct tu_dynamic_state
780 * Bitmask of (1 << VK_DYNAMIC_STATE_*).
781 * Defines the set of saved dynamic state.
785 struct tu_viewport_state viewport
;
787 struct tu_scissor_state scissor
;
798 float blend_constants
[4];
810 } stencil_compare_mask
;
816 } stencil_write_mask
;
824 struct tu_discard_rectangle_state discard_rectangle
;
827 extern const struct tu_dynamic_state default_dynamic_state
;
830 tu_get_debug_option_name(int id
);
833 tu_get_perftest_option_name(int id
);
835 struct tu_descriptor_state
837 struct tu_descriptor_set
*sets
[MAX_SETS
];
839 struct tu_push_descriptor_set push_set
;
841 uint32_t dynamic_descriptors
[MAX_DYNAMIC_BUFFERS
* A6XX_TEX_CONST_DWORDS
];
842 uint32_t input_attachments
[MAX_RTS
* A6XX_TEX_CONST_DWORDS
];
853 struct tu_tiling_config
855 VkRect2D render_area
;
857 /* position and size of the first tile */
859 /* number of tiles */
860 VkExtent2D tile_count
;
862 /* size of the first VSC pipe */
864 /* number of VSC pipes */
865 VkExtent2D pipe_count
;
867 /* pipe register values */
868 uint32_t pipe_config
[MAX_VSC_PIPES
];
869 uint32_t pipe_sizes
[MAX_VSC_PIPES
];
871 /* Whether sysmem rendering must be used */
875 enum tu_cmd_dirty_bits
877 TU_CMD_DIRTY_PIPELINE
= 1 << 0,
878 TU_CMD_DIRTY_COMPUTE_PIPELINE
= 1 << 1,
879 TU_CMD_DIRTY_VERTEX_BUFFERS
= 1 << 2,
880 TU_CMD_DIRTY_DESCRIPTOR_SETS
= 1 << 3,
881 TU_CMD_DIRTY_COMPUTE_DESCRIPTOR_SETS
= 1 << 4,
882 TU_CMD_DIRTY_PUSH_CONSTANTS
= 1 << 5,
883 TU_CMD_DIRTY_STREAMOUT_BUFFERS
= 1 << 6,
884 TU_CMD_DIRTY_INPUT_ATTACHMENTS
= 1 << 7,
886 TU_CMD_DIRTY_DYNAMIC_LINE_WIDTH
= 1 << 16,
887 TU_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK
= 1 << 17,
888 TU_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK
= 1 << 18,
889 TU_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE
= 1 << 19,
890 TU_CMD_DIRTY_DYNAMIC_VIEWPORT
= 1 << 20,
891 TU_CMD_DIRTY_DYNAMIC_SCISSOR
= 1 << 21,
894 struct tu_streamout_state
{
895 uint16_t stride
[IR3_MAX_SO_BUFFERS
];
896 uint32_t ncomp
[IR3_MAX_SO_BUFFERS
];
897 uint32_t prog
[IR3_MAX_SO_OUTPUTS
* 2];
899 uint32_t vpc_so_buf_cntl
;
906 struct tu_pipeline
*pipeline
;
907 struct tu_pipeline
*compute_pipeline
;
912 struct tu_buffer
*buffers
[MAX_VBS
];
913 VkDeviceSize offsets
[MAX_VBS
];
916 struct tu_dynamic_state dynamic
;
918 /* Stream output buffers */
921 struct tu_buffer
*buffers
[IR3_MAX_SO_BUFFERS
];
922 VkDeviceSize offsets
[IR3_MAX_SO_BUFFERS
];
923 VkDeviceSize sizes
[IR3_MAX_SO_BUFFERS
];
926 uint8_t streamout_reset
;
927 uint8_t streamout_enabled
;
930 struct tu_buffer
*index_buffer
;
931 uint64_t index_offset
;
933 uint32_t max_index_count
;
936 const struct tu_render_pass
*pass
;
937 const struct tu_subpass
*subpass
;
938 const struct tu_framebuffer
*framebuffer
;
940 struct tu_tiling_config tiling_config
;
942 struct tu_cs_entry tile_store_ib
;
947 VkAllocationCallbacks alloc
;
948 struct list_head cmd_buffers
;
949 struct list_head free_cmd_buffers
;
950 uint32_t queue_family_index
;
953 struct tu_cmd_buffer_upload
958 struct list_head list
;
961 enum tu_cmd_buffer_status
963 TU_CMD_BUFFER_STATUS_INVALID
,
964 TU_CMD_BUFFER_STATUS_INITIAL
,
965 TU_CMD_BUFFER_STATUS_RECORDING
,
966 TU_CMD_BUFFER_STATUS_EXECUTABLE
,
967 TU_CMD_BUFFER_STATUS_PENDING
,
974 struct drm_msm_gem_submit_bo
*bo_infos
;
977 #define TU_BO_LIST_FAILED (~0)
980 tu_bo_list_init(struct tu_bo_list
*list
);
982 tu_bo_list_destroy(struct tu_bo_list
*list
);
984 tu_bo_list_reset(struct tu_bo_list
*list
);
986 tu_bo_list_add(struct tu_bo_list
*list
,
987 const struct tu_bo
*bo
,
990 tu_bo_list_merge(struct tu_bo_list
*list
, const struct tu_bo_list
*other
);
992 /* This struct defines the layout of the scratch_bo */
995 uint32_t seqno
; /* seqno for async CP_EVENT_WRITE, etc */
997 volatile uint32_t vsc_overflow
;
999 /* flag set from cmdstream when VSC overflow detected: */
1000 uint32_t vsc_scratch
;
1005 /* scratch space for VPC_SO[i].FLUSH_BASE_LO/HI, start on 32 byte boundary. */
1012 #define ctrl_offset(member) offsetof(struct tu6_control, member)
1014 struct tu_cmd_buffer
1016 VK_LOADER_DATA _loader_data
;
1018 struct tu_device
*device
;
1020 struct tu_cmd_pool
*pool
;
1021 struct list_head pool_link
;
1023 VkCommandBufferUsageFlags usage_flags
;
1024 VkCommandBufferLevel level
;
1025 enum tu_cmd_buffer_status status
;
1027 struct tu_cmd_state state
;
1028 struct tu_vertex_binding vertex_bindings
[MAX_VBS
];
1029 uint32_t queue_family_index
;
1031 uint32_t push_constants
[MAX_PUSH_CONSTANTS_SIZE
/ 4];
1032 VkShaderStageFlags push_constant_stages
;
1033 struct tu_descriptor_set meta_push_descriptors
;
1035 struct tu_descriptor_state descriptors
[MAX_BIND_POINTS
];
1037 struct tu_cmd_buffer_upload upload
;
1039 VkResult record_result
;
1041 struct tu_bo_list bo_list
;
1043 struct tu_cs draw_cs
;
1044 struct tu_cs draw_epilogue_cs
;
1045 struct tu_cs sub_cs
;
1047 struct tu_bo scratch_bo
;
1048 uint32_t scratch_seqno
;
1050 struct tu_bo vsc_draw_strm
;
1051 struct tu_bo vsc_prim_strm
;
1052 uint32_t vsc_draw_strm_pitch
;
1053 uint32_t vsc_prim_strm_pitch
;
1059 /* Temporary struct for tracking a register state to be written, used by
1060 * a6xx-pack.h and tu_cs_emit_regs()
1062 struct tu_reg_value
{
1073 tu6_emit_event_write(struct tu_cmd_buffer
*cmd
,
1075 enum vgt_event_type event
,
1079 tu_get_memory_fd(struct tu_device
*device
,
1080 struct tu_device_memory
*memory
,
1083 static inline struct tu_descriptor_state
*
1084 tu_get_descriptors_state(struct tu_cmd_buffer
*cmd_buffer
,
1085 VkPipelineBindPoint bind_point
)
1087 return &cmd_buffer
->descriptors
[bind_point
];
1091 * Takes x,y,z as exact numbers of invocations, instead of blocks.
1093 * Limitations: Can't call normal dispatch functions without binding or
1095 * the compute pipeline.
1098 tu_unaligned_dispatch(struct tu_cmd_buffer
*cmd_buffer
,
1108 struct tu_shader_module
;
1110 #define TU_HASH_SHADER_IS_GEOM_COPY_SHADER (1 << 0)
1111 #define TU_HASH_SHADER_SISCHED (1 << 1)
1112 #define TU_HASH_SHADER_UNSAFE_MATH (1 << 2)
1114 tu_hash_shaders(unsigned char *hash
,
1115 const VkPipelineShaderStageCreateInfo
**stages
,
1116 const struct tu_pipeline_layout
*layout
,
1117 const struct tu_pipeline_key
*key
,
1120 static inline gl_shader_stage
1121 vk_to_mesa_shader_stage(VkShaderStageFlagBits vk_stage
)
1123 assert(__builtin_popcount(vk_stage
) == 1);
1124 return ffs(vk_stage
) - 1;
1127 static inline VkShaderStageFlagBits
1128 mesa_to_vk_shader_stage(gl_shader_stage mesa_stage
)
1130 return (1 << mesa_stage
);
1133 #define TU_STAGE_MASK ((1 << MESA_SHADER_STAGES) - 1)
1135 #define tu_foreach_stage(stage, stage_bits) \
1136 for (gl_shader_stage stage, \
1137 __tmp = (gl_shader_stage)((stage_bits) &TU_STAGE_MASK); \
1138 stage = __builtin_ffs(__tmp) - 1, __tmp; __tmp &= ~(1 << (stage)))
1140 struct tu_shader_module
1142 unsigned char sha1
[20];
1145 const uint32_t *code
[0];
1148 struct tu_shader_compile_options
1150 struct ir3_shader_key key
;
1153 bool include_binning_pass
;
1156 struct tu_push_constant_range
1164 struct ir3_shader ir3_shader
;
1166 struct tu_push_constant_range push_consts
;
1167 unsigned attachment_idx
[MAX_RTS
];
1169 /* This may be true for vertex shaders. When true, variants[1] is the
1170 * binning variant and binning_binary is non-NULL.
1172 bool has_binning_pass
;
1175 void *binning_binary
;
1177 struct ir3_shader_variant variants
[0];
1181 tu_shader_create(struct tu_device
*dev
,
1182 gl_shader_stage stage
,
1183 const VkPipelineShaderStageCreateInfo
*stage_info
,
1184 struct tu_pipeline_layout
*layout
,
1185 const VkAllocationCallbacks
*alloc
);
1188 tu_shader_destroy(struct tu_device
*dev
,
1189 struct tu_shader
*shader
,
1190 const VkAllocationCallbacks
*alloc
);
1193 tu_shader_compile_options_init(
1194 struct tu_shader_compile_options
*options
,
1195 const VkGraphicsPipelineCreateInfo
*pipeline_info
);
1198 tu_shader_compile(struct tu_device
*dev
,
1199 struct tu_shader
*shader
,
1200 const struct tu_shader
*next_stage
,
1201 const struct tu_shader_compile_options
*options
,
1202 const VkAllocationCallbacks
*alloc
);
1204 struct tu_program_descriptor_linkage
1206 struct ir3_ubo_analysis_state ubo_state
;
1207 struct ir3_const_state const_state
;
1211 struct tu_push_constant_range push_consts
;
1218 struct tu_dynamic_state dynamic_state
;
1220 struct tu_pipeline_layout
*layout
;
1222 bool need_indirect_descriptor_sets
;
1223 VkShaderStageFlags active_stages
;
1225 struct tu_streamout_state streamout
;
1229 struct tu_bo binary_bo
;
1230 struct tu_cs_entry state_ib
;
1231 struct tu_cs_entry binning_state_ib
;
1233 struct tu_program_descriptor_linkage link
[MESA_SHADER_STAGES
];
1234 unsigned input_attachment_idx
[MAX_RTS
];
1239 struct tu_cs_entry state_ib
;
1244 uint8_t bindings
[MAX_VERTEX_ATTRIBS
];
1247 uint8_t binning_bindings
[MAX_VERTEX_ATTRIBS
];
1248 uint32_t binning_count
;
1250 struct tu_cs_entry state_ib
;
1251 struct tu_cs_entry binning_state_ib
;
1256 enum pc_di_primtype primtype
;
1257 bool primitive_restart
;
1262 struct tu_cs_entry state_ib
;
1267 uint32_t gras_su_cntl
;
1268 struct tu_cs_entry state_ib
;
1273 struct tu_cs_entry state_ib
;
1278 struct tu_cs_entry state_ib
;
1283 uint32_t local_size
[3];
1288 tu6_emit_viewport(struct tu_cs
*cs
, const VkViewport
*viewport
);
1291 tu6_emit_scissor(struct tu_cs
*cs
, const VkRect2D
*scissor
);
1294 tu6_emit_sample_locations(struct tu_cs
*cs
, const VkSampleLocationsInfoEXT
*samp_loc
);
1297 tu6_emit_gras_su_cntl(struct tu_cs
*cs
,
1298 uint32_t gras_su_cntl
,
1302 tu6_emit_depth_bias(struct tu_cs
*cs
,
1303 float constant_factor
,
1305 float slope_factor
);
1308 tu6_emit_stencil_compare_mask(struct tu_cs
*cs
,
1313 tu6_emit_stencil_write_mask(struct tu_cs
*cs
, uint32_t front
, uint32_t back
);
1316 tu6_emit_stencil_reference(struct tu_cs
*cs
, uint32_t front
, uint32_t back
);
1319 tu6_emit_blend_constants(struct tu_cs
*cs
, const float constants
[4]);
1321 void tu6_emit_msaa(struct tu_cs
*cs
, VkSampleCountFlagBits samples
);
1323 void tu6_emit_window_scissor(struct tu_cs
*cs
, uint32_t x1
, uint32_t y1
, uint32_t x2
, uint32_t y2
);
1325 void tu6_emit_window_offset(struct tu_cs
*cs
, uint32_t x1
, uint32_t y1
);
1327 struct tu_image_view
;
1330 tu_resolve_sysmem(struct tu_cmd_buffer
*cmd
,
1332 struct tu_image_view
*src
,
1333 struct tu_image_view
*dst
,
1335 const VkRect2D
*rect
);
1338 tu_clear_sysmem_attachment(struct tu_cmd_buffer
*cmd
,
1341 const VkRenderPassBeginInfo
*info
);
1344 tu_clear_gmem_attachment(struct tu_cmd_buffer
*cmd
,
1347 const VkRenderPassBeginInfo
*info
);
1350 tu_load_gmem_attachment(struct tu_cmd_buffer
*cmd
,
1355 /* expose this function to be able to emit load without checking LOAD_OP */
1357 tu_emit_load_gmem_attachment(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
, uint32_t a
);
1359 /* note: gmem store can also resolve */
1361 tu_store_gmem_attachment(struct tu_cmd_buffer
*cmd
,
1366 struct tu_userdata_info
*
1367 tu_lookup_user_sgpr(struct tu_pipeline
*pipeline
,
1368 gl_shader_stage stage
,
1371 struct tu_shader_variant
*
1372 tu_get_shader(struct tu_pipeline
*pipeline
, gl_shader_stage stage
);
1374 struct tu_graphics_pipeline_create_info
1377 bool db_depth_clear
;
1378 bool db_stencil_clear
;
1379 bool db_depth_disable_expclear
;
1380 bool db_stencil_disable_expclear
;
1381 bool db_flush_depth_inplace
;
1382 bool db_flush_stencil_inplace
;
1383 bool db_resummarize
;
1384 uint32_t custom_blend_mode
;
1387 enum tu_supported_formats
{
1393 struct tu_native_format
1395 enum a6xx_format fmt
: 8;
1396 enum a3xx_color_swap swap
: 8;
1397 enum a6xx_tile_mode tile_mode
: 8;
1398 enum tu_supported_formats supported
: 8;
1401 struct tu_native_format
tu6_format_vtx(VkFormat format
);
1402 struct tu_native_format
tu6_format_color(VkFormat format
, enum a6xx_tile_mode tile_mode
);
1403 struct tu_native_format
tu6_format_texture(VkFormat format
, enum a6xx_tile_mode tile_mode
);
1405 static inline enum a6xx_format
1406 tu6_base_format(VkFormat format
)
1408 /* note: tu6_format_color doesn't care about tiling for .fmt field */
1409 return tu6_format_color(format
, TILE6_LINEAR
).fmt
;
1412 enum a6xx_depth_format
tu6_pipe2depth(VkFormat format
);
1417 /* The original VkFormat provided by the client. This may not match any
1418 * of the actual surface formats.
1421 VkImageAspectFlags aspects
;
1422 VkImageUsageFlags usage
; /**< Superset of VkImageCreateInfo::usage. */
1423 VkImageTiling tiling
; /** VkImageCreateInfo::tiling */
1424 VkImageCreateFlags flags
; /** VkImageCreateInfo::flags */
1426 uint32_t level_count
;
1427 uint32_t layer_count
;
1428 VkSampleCountFlagBits samples
;
1430 struct fdl_layout layout
;
1432 unsigned queue_family_mask
;
1436 /* For VK_ANDROID_native_buffer, the WSI image owns the memory, */
1437 VkDeviceMemory owned_memory
;
1439 /* Set when bound */
1441 VkDeviceSize bo_offset
;
1445 tu_image_queue_family_mask(const struct tu_image
*image
,
1447 uint32_t queue_family
);
1449 static inline uint32_t
1450 tu_get_layerCount(const struct tu_image
*image
,
1451 const VkImageSubresourceRange
*range
)
1453 return range
->layerCount
== VK_REMAINING_ARRAY_LAYERS
1454 ? image
->layer_count
- range
->baseArrayLayer
1455 : range
->layerCount
;
1458 static inline uint32_t
1459 tu_get_levelCount(const struct tu_image
*image
,
1460 const VkImageSubresourceRange
*range
)
1462 return range
->levelCount
== VK_REMAINING_MIP_LEVELS
1463 ? image
->level_count
- range
->baseMipLevel
1464 : range
->levelCount
;
1467 enum a3xx_msaa_samples
1468 tu_msaa_samples(uint32_t samples
);
1469 enum a6xx_tex_fetchsize
1470 tu6_fetchsize(VkFormat format
);
1472 struct tu_image_view
1474 struct tu_image
*image
; /**< VkImageViewCreateInfo::image */
1478 uint32_t layer_size
;
1479 uint32_t ubwc_layer_size
;
1481 /* used to determine if fast gmem store path can be used */
1487 uint32_t descriptor
[A6XX_TEX_CONST_DWORDS
];
1489 /* Descriptor for use as a storage image as opposed to a sampled image.
1490 * This has a few differences for cube maps (e.g. type).
1492 uint32_t storage_descriptor
[A6XX_TEX_CONST_DWORDS
];
1494 /* pre-filled register values */
1496 uint32_t FLAG_BUFFER_PITCH
;
1498 uint32_t RB_MRT_BUF_INFO
;
1499 uint32_t SP_FS_MRT_REG
;
1501 uint32_t SP_PS_2D_SRC_INFO
;
1502 uint32_t SP_PS_2D_SRC_SIZE
;
1504 uint32_t RB_2D_DST_INFO
;
1506 uint32_t RB_BLIT_DST_INFO
;
1510 uint32_t descriptor
[A6XX_TEX_SAMP_DWORDS
];
1514 tu_cs_image_ref(struct tu_cs
*cs
, const struct tu_image_view
*iview
, uint32_t layer
);
1517 tu_cs_image_ref_2d(struct tu_cs
*cs
, const struct tu_image_view
*iview
, uint32_t layer
, bool src
);
1520 tu_cs_image_flag_ref(struct tu_cs
*cs
, const struct tu_image_view
*iview
, uint32_t layer
);
1523 tu_image_create(VkDevice _device
,
1524 const VkImageCreateInfo
*pCreateInfo
,
1525 const VkAllocationCallbacks
*alloc
,
1530 tu_image_from_gralloc(VkDevice device_h
,
1531 const VkImageCreateInfo
*base_info
,
1532 const VkNativeBufferANDROID
*gralloc_info
,
1533 const VkAllocationCallbacks
*alloc
,
1534 VkImage
*out_image_h
);
1537 tu_image_view_init(struct tu_image_view
*view
,
1538 const VkImageViewCreateInfo
*pCreateInfo
);
1540 struct tu_buffer_view
1542 uint32_t descriptor
[A6XX_TEX_CONST_DWORDS
];
1544 struct tu_buffer
*buffer
;
1547 tu_buffer_view_init(struct tu_buffer_view
*view
,
1548 struct tu_device
*device
,
1549 const VkBufferViewCreateInfo
*pCreateInfo
);
1551 static inline struct VkExtent3D
1552 tu_sanitize_image_extent(const VkImageType imageType
,
1553 const struct VkExtent3D imageExtent
)
1555 switch (imageType
) {
1556 case VK_IMAGE_TYPE_1D
:
1557 return (VkExtent3D
) { imageExtent
.width
, 1, 1 };
1558 case VK_IMAGE_TYPE_2D
:
1559 return (VkExtent3D
) { imageExtent
.width
, imageExtent
.height
, 1 };
1560 case VK_IMAGE_TYPE_3D
:
1563 unreachable("invalid image type");
1567 static inline struct VkOffset3D
1568 tu_sanitize_image_offset(const VkImageType imageType
,
1569 const struct VkOffset3D imageOffset
)
1571 switch (imageType
) {
1572 case VK_IMAGE_TYPE_1D
:
1573 return (VkOffset3D
) { imageOffset
.x
, 0, 0 };
1574 case VK_IMAGE_TYPE_2D
:
1575 return (VkOffset3D
) { imageOffset
.x
, imageOffset
.y
, 0 };
1576 case VK_IMAGE_TYPE_3D
:
1579 unreachable("invalid image type");
1583 struct tu_attachment_info
1585 struct tu_image_view
*attachment
;
1588 struct tu_framebuffer
1594 uint32_t attachment_count
;
1595 struct tu_attachment_info attachments
[0];
1598 struct tu_subpass_attachment
1600 uint32_t attachment
;
1605 uint32_t input_count
;
1606 uint32_t color_count
;
1607 struct tu_subpass_attachment
*input_attachments
;
1608 struct tu_subpass_attachment
*color_attachments
;
1609 struct tu_subpass_attachment
*resolve_attachments
;
1610 struct tu_subpass_attachment depth_stencil_attachment
;
1612 VkSampleCountFlagBits samples
;
1614 /* pre-filled register values */
1615 uint32_t render_components
;
1619 struct tu_render_pass_attachment
1624 VkImageAspectFlags clear_mask
;
1627 int32_t gmem_offset
;
1630 struct tu_render_pass
1632 uint32_t attachment_count
;
1633 uint32_t subpass_count
;
1634 uint32_t gmem_pixels
;
1635 uint32_t tile_align_w
;
1636 struct tu_subpass_attachment
*subpass_attachments
;
1637 struct tu_render_pass_attachment
*attachments
;
1638 struct tu_subpass subpasses
[0];
1642 tu_device_init_meta(struct tu_device
*device
);
1644 tu_device_finish_meta(struct tu_device
*device
);
1646 struct tu_query_pool
1651 uint32_t pipeline_statistics
;
1658 uint32_t temp_syncobj
;
1662 tu_set_descriptor_set(struct tu_cmd_buffer
*cmd_buffer
,
1663 VkPipelineBindPoint bind_point
,
1664 struct tu_descriptor_set
*set
,
1668 tu_update_descriptor_sets(struct tu_device
*device
,
1669 struct tu_cmd_buffer
*cmd_buffer
,
1670 VkDescriptorSet overrideSet
,
1671 uint32_t descriptorWriteCount
,
1672 const VkWriteDescriptorSet
*pDescriptorWrites
,
1673 uint32_t descriptorCopyCount
,
1674 const VkCopyDescriptorSet
*pDescriptorCopies
);
1677 tu_update_descriptor_set_with_template(
1678 struct tu_device
*device
,
1679 struct tu_cmd_buffer
*cmd_buffer
,
1680 struct tu_descriptor_set
*set
,
1681 VkDescriptorUpdateTemplate descriptorUpdateTemplate
,
1685 tu_meta_push_descriptor_set(struct tu_cmd_buffer
*cmd_buffer
,
1686 VkPipelineBindPoint pipelineBindPoint
,
1687 VkPipelineLayout _layout
,
1689 uint32_t descriptorWriteCount
,
1690 const VkWriteDescriptorSet
*pDescriptorWrites
);
1693 tu_drm_get_gpu_id(const struct tu_physical_device
*dev
, uint32_t *id
);
1696 tu_drm_get_gmem_size(const struct tu_physical_device
*dev
, uint32_t *size
);
1699 tu_drm_get_gmem_base(const struct tu_physical_device
*dev
, uint64_t *base
);
1702 tu_drm_submitqueue_new(const struct tu_device
*dev
,
1704 uint32_t *queue_id
);
1707 tu_drm_submitqueue_close(const struct tu_device
*dev
, uint32_t queue_id
);
1710 tu_gem_new(const struct tu_device
*dev
, uint64_t size
, uint32_t flags
);
1712 tu_gem_import_dmabuf(const struct tu_device
*dev
,
1716 tu_gem_export_dmabuf(const struct tu_device
*dev
, uint32_t gem_handle
);
1718 tu_gem_close(const struct tu_device
*dev
, uint32_t gem_handle
);
1720 tu_gem_info_offset(const struct tu_device
*dev
, uint32_t gem_handle
);
1722 tu_gem_info_iova(const struct tu_device
*dev
, uint32_t gem_handle
);
1724 #define TU_DEFINE_HANDLE_CASTS(__tu_type, __VkType) \
1726 static inline struct __tu_type *__tu_type##_from_handle(__VkType _handle) \
1728 return (struct __tu_type *) _handle; \
1731 static inline __VkType __tu_type##_to_handle(struct __tu_type *_obj) \
1733 return (__VkType) _obj; \
1736 #define TU_DEFINE_NONDISP_HANDLE_CASTS(__tu_type, __VkType) \
1738 static inline struct __tu_type *__tu_type##_from_handle(__VkType _handle) \
1740 return (struct __tu_type *) (uintptr_t) _handle; \
1743 static inline __VkType __tu_type##_to_handle(struct __tu_type *_obj) \
1745 return (__VkType)(uintptr_t) _obj; \
1748 #define TU_FROM_HANDLE(__tu_type, __name, __handle) \
1749 struct __tu_type *__name = __tu_type##_from_handle(__handle)
1751 TU_DEFINE_HANDLE_CASTS(tu_cmd_buffer
, VkCommandBuffer
)
1752 TU_DEFINE_HANDLE_CASTS(tu_device
, VkDevice
)
1753 TU_DEFINE_HANDLE_CASTS(tu_instance
, VkInstance
)
1754 TU_DEFINE_HANDLE_CASTS(tu_physical_device
, VkPhysicalDevice
)
1755 TU_DEFINE_HANDLE_CASTS(tu_queue
, VkQueue
)
1757 TU_DEFINE_NONDISP_HANDLE_CASTS(tu_cmd_pool
, VkCommandPool
)
1758 TU_DEFINE_NONDISP_HANDLE_CASTS(tu_buffer
, VkBuffer
)
1759 TU_DEFINE_NONDISP_HANDLE_CASTS(tu_buffer_view
, VkBufferView
)
1760 TU_DEFINE_NONDISP_HANDLE_CASTS(tu_descriptor_pool
, VkDescriptorPool
)
1761 TU_DEFINE_NONDISP_HANDLE_CASTS(tu_descriptor_set
, VkDescriptorSet
)
1762 TU_DEFINE_NONDISP_HANDLE_CASTS(tu_descriptor_set_layout
,
1763 VkDescriptorSetLayout
)
1764 TU_DEFINE_NONDISP_HANDLE_CASTS(tu_descriptor_update_template
,
1765 VkDescriptorUpdateTemplate
)
1766 TU_DEFINE_NONDISP_HANDLE_CASTS(tu_device_memory
, VkDeviceMemory
)
1767 TU_DEFINE_NONDISP_HANDLE_CASTS(tu_fence
, VkFence
)
1768 TU_DEFINE_NONDISP_HANDLE_CASTS(tu_event
, VkEvent
)
1769 TU_DEFINE_NONDISP_HANDLE_CASTS(tu_framebuffer
, VkFramebuffer
)
1770 TU_DEFINE_NONDISP_HANDLE_CASTS(tu_image
, VkImage
)
1771 TU_DEFINE_NONDISP_HANDLE_CASTS(tu_image_view
, VkImageView
);
1772 TU_DEFINE_NONDISP_HANDLE_CASTS(tu_pipeline_cache
, VkPipelineCache
)
1773 TU_DEFINE_NONDISP_HANDLE_CASTS(tu_pipeline
, VkPipeline
)
1774 TU_DEFINE_NONDISP_HANDLE_CASTS(tu_pipeline_layout
, VkPipelineLayout
)
1775 TU_DEFINE_NONDISP_HANDLE_CASTS(tu_query_pool
, VkQueryPool
)
1776 TU_DEFINE_NONDISP_HANDLE_CASTS(tu_render_pass
, VkRenderPass
)
1777 TU_DEFINE_NONDISP_HANDLE_CASTS(tu_sampler
, VkSampler
)
1778 TU_DEFINE_NONDISP_HANDLE_CASTS(tu_shader_module
, VkShaderModule
)
1779 TU_DEFINE_NONDISP_HANDLE_CASTS(tu_semaphore
, VkSemaphore
)
1781 #endif /* TU_PRIVATE_H */