2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
43 #define VG(x) ((void)0)
46 #include "c11/threads.h"
47 #include "main/macros.h"
48 #include "util/list.h"
49 #include "util/macros.h"
50 #include "util/u_atomic.h"
52 #include "vk_debug_report.h"
53 #include "wsi_common.h"
55 #include "drm-uapi/msm_drm.h"
56 #include "ir3/ir3_compiler.h"
57 #include "ir3/ir3_shader.h"
59 #include "adreno_common.xml.h"
60 #include "adreno_pm4.xml.h"
62 #include "fdl/freedreno_layout.h"
64 #include "tu_descriptor_set.h"
65 #include "tu_extensions.h"
68 /* Pre-declarations needed for WSI entrypoints */
71 typedef struct xcb_connection_t xcb_connection_t
;
72 typedef uint32_t xcb_visualid_t
;
73 typedef uint32_t xcb_window_t
;
75 #include <vulkan/vk_android_native_buffer.h>
76 #include <vulkan/vk_icd.h>
77 #include <vulkan/vulkan.h>
78 #include <vulkan/vulkan_intel.h>
80 #include "tu_entrypoints.h"
82 #include "vk_format.h"
85 #define MAX_VERTEX_ATTRIBS 32
87 #define MAX_VSC_PIPES 32
88 #define MAX_VIEWPORTS 1
89 #define MAX_SCISSORS 16
90 #define MAX_DISCARD_RECTANGLES 4
91 #define MAX_PUSH_CONSTANTS_SIZE 128
92 #define MAX_PUSH_DESCRIPTORS 32
93 #define MAX_DYNAMIC_UNIFORM_BUFFERS 16
94 #define MAX_DYNAMIC_STORAGE_BUFFERS 8
95 #define MAX_DYNAMIC_BUFFERS \
96 (MAX_DYNAMIC_UNIFORM_BUFFERS + MAX_DYNAMIC_STORAGE_BUFFERS)
97 #define TU_MAX_DRM_DEVICES 8
99 #define MAX_BIND_POINTS 2 /* compute + graphics */
100 /* The Qualcomm driver exposes 0x20000058 */
101 #define MAX_STORAGE_BUFFER_RANGE 0x20000000
102 /* We use ldc for uniform buffer loads, just like the Qualcomm driver, so
103 * expose the same maximum range.
104 * TODO: The SIZE bitfield is 15 bits, and in 4-dword units, so the actual
105 * range might be higher.
107 #define MAX_UNIFORM_BUFFER_RANGE 0x10000
109 #define A6XX_TEX_CONST_DWORDS 16
110 #define A6XX_TEX_SAMP_DWORDS 4
112 #define tu_printflike(a, b) __attribute__((__format__(__printf__, a, b)))
114 static inline uint32_t
115 tu_minify(uint32_t n
, uint32_t levels
)
117 if (unlikely(n
== 0))
120 return MAX2(n
>> levels
, 1);
123 #define for_each_bit(b, dword) \
124 for (uint32_t __dword = (dword); \
125 (b) = __builtin_ffs(__dword) - 1, __dword; __dword &= ~(1 << (b)))
127 #define typed_memcpy(dest, src, count) \
129 STATIC_ASSERT(sizeof(*src) == sizeof(*dest)); \
130 memcpy((dest), (src), (count) * sizeof(*(src))); \
133 #define COND(bool, val) ((bool) ? (val) : 0)
134 #define BIT(bit) (1u << (bit))
136 /* Whenever we generate an error, pass it through this function. Useful for
137 * debugging, where we can break on it. Only call at error site, not when
138 * propagating errors. Might be useful to plug in a stack trace here.
144 __vk_errorf(struct tu_instance
*instance
,
151 #define vk_error(instance, error) \
152 __vk_errorf(instance, error, __FILE__, __LINE__, NULL);
153 #define vk_errorf(instance, error, format, ...) \
154 __vk_errorf(instance, error, __FILE__, __LINE__, format, ##__VA_ARGS__);
157 __tu_finishme(const char *file
, int line
, const char *format
, ...)
160 tu_loge(const char *format
, ...) tu_printflike(1, 2);
162 tu_logi(const char *format
, ...) tu_printflike(1, 2);
165 * Print a FINISHME message, including its source location.
167 #define tu_finishme(format, ...) \
169 static bool reported = false; \
171 __tu_finishme(__FILE__, __LINE__, format, ##__VA_ARGS__); \
178 tu_finishme("stub %s", __func__); \
182 tu_lookup_entrypoint_unchecked(const char *name
);
184 tu_lookup_entrypoint_checked(
186 uint32_t core_version
,
187 const struct tu_instance_extension_table
*instance
,
188 const struct tu_device_extension_table
*device
);
190 struct tu_physical_device
192 VK_LOADER_DATA _loader_data
;
194 struct tu_instance
*instance
;
197 char name
[VK_MAX_PHYSICAL_DEVICE_NAME_SIZE
];
198 uint8_t driver_uuid
[VK_UUID_SIZE
];
199 uint8_t device_uuid
[VK_UUID_SIZE
];
200 uint8_t cache_uuid
[VK_UUID_SIZE
];
202 struct wsi_device wsi_device
;
210 uint32_t ccu_offset_gmem
;
211 uint32_t ccu_offset_bypass
;
212 /* alignment for size of tiles */
213 uint32_t tile_align_w
;
214 #define TILE_ALIGN_H 16
215 /* gmem store/load granularity */
216 #define GMEM_ALIGN_W 16
217 #define GMEM_ALIGN_H 4
220 uint32_t PC_UNKNOWN_9805
;
221 uint32_t SP_UNKNOWN_A0F8
;
224 int msm_major_version
;
225 int msm_minor_version
;
227 /* This is the drivers on-disk cache used as a fallback as opposed to
228 * the pipeline cache defined by apps.
230 struct disk_cache
*disk_cache
;
232 struct tu_device_extension_table supported_extensions
;
237 TU_DEBUG_STARTUP
= 1 << 0,
238 TU_DEBUG_NIR
= 1 << 1,
239 TU_DEBUG_IR3
= 1 << 2,
240 TU_DEBUG_NOBIN
= 1 << 3,
241 TU_DEBUG_SYSMEM
= 1 << 4,
242 TU_DEBUG_FORCEBIN
= 1 << 5,
243 TU_DEBUG_NOUBWC
= 1 << 6,
248 VK_LOADER_DATA _loader_data
;
250 VkAllocationCallbacks alloc
;
252 uint32_t api_version
;
253 int physical_device_count
;
254 struct tu_physical_device physical_devices
[TU_MAX_DRM_DEVICES
];
256 enum tu_debug_flags debug_flags
;
258 struct vk_debug_report_instance debug_report_callbacks
;
260 struct tu_instance_extension_table enabled_extensions
;
264 tu_wsi_init(struct tu_physical_device
*physical_device
);
266 tu_wsi_finish(struct tu_physical_device
*physical_device
);
269 tu_instance_extension_supported(const char *name
);
271 tu_physical_device_api_version(struct tu_physical_device
*dev
);
273 tu_physical_device_extension_supported(struct tu_physical_device
*dev
,
278 struct tu_pipeline_cache
280 struct tu_device
*device
;
281 pthread_mutex_t mutex
;
285 uint32_t kernel_count
;
286 struct cache_entry
**hash_table
;
289 VkAllocationCallbacks alloc
;
292 struct tu_pipeline_key
298 #define TU_QUEUE_GENERAL 0
300 #define TU_MAX_QUEUE_FAMILIES 1
304 struct wsi_fence
*fence_wsi
;
310 tu_fence_init(struct tu_fence
*fence
, bool signaled
);
312 tu_fence_finish(struct tu_fence
*fence
);
314 tu_fence_update_fd(struct tu_fence
*fence
, int fd
);
316 tu_fence_copy(struct tu_fence
*fence
, const struct tu_fence
*src
);
318 tu_fence_signal(struct tu_fence
*fence
);
320 tu_fence_wait_idle(struct tu_fence
*fence
);
324 VK_LOADER_DATA _loader_data
;
325 struct tu_device
*device
;
326 uint32_t queue_family_index
;
328 VkDeviceQueueCreateFlags flags
;
330 uint32_t msm_queue_id
;
331 struct tu_fence submit_fence
;
344 VK_LOADER_DATA _loader_data
;
346 VkAllocationCallbacks alloc
;
348 struct tu_instance
*instance
;
350 struct tu_queue
*queues
[TU_MAX_QUEUE_FAMILIES
];
351 int queue_count
[TU_MAX_QUEUE_FAMILIES
];
353 struct tu_physical_device
*physical_device
;
356 struct ir3_compiler
*compiler
;
358 /* Backup in-memory cache to be used if the app doesn't provide one */
359 struct tu_pipeline_cache
*mem_cache
;
361 struct tu_bo vsc_draw_strm
;
362 struct tu_bo vsc_prim_strm
;
363 uint32_t vsc_draw_strm_pitch
;
364 uint32_t vsc_prim_strm_pitch
;
366 #define MIN_SCRATCH_BO_SIZE_LOG2 12 /* A page */
368 /* Currently the kernel driver uses a 32-bit GPU address space, but it
369 * should be impossible to go beyond 48 bits.
375 } scratch_bos
[48 - MIN_SCRATCH_BO_SIZE_LOG2
];
377 struct tu_bo border_color
;
379 struct tu_device_extension_table enabled_extensions
;
382 VkResult
_tu_device_set_lost(struct tu_device
*device
,
383 const char *file
, int line
,
384 const char *msg
, ...) PRINTFLIKE(4, 5);
385 #define tu_device_set_lost(dev, ...) \
386 _tu_device_set_lost(dev, __FILE__, __LINE__, __VA_ARGS__)
389 tu_device_is_lost(struct tu_device
*device
)
391 return unlikely(p_atomic_read(&device
->_lost
));
395 tu_bo_init_new(struct tu_device
*dev
, struct tu_bo
*bo
, uint64_t size
);
397 tu_bo_init_dmabuf(struct tu_device
*dev
,
402 tu_bo_export_dmabuf(struct tu_device
*dev
, struct tu_bo
*bo
);
404 tu_bo_finish(struct tu_device
*dev
, struct tu_bo
*bo
);
406 tu_bo_map(struct tu_device
*dev
, struct tu_bo
*bo
);
408 /* Get a scratch bo for use inside a command buffer. This will always return
409 * the same bo given the same size or similar sizes, so only one scratch bo
410 * can be used at the same time. It's meant for short-lived things where we
411 * need to write to some piece of memory, read from it, and then immediately
415 tu_get_scratch_bo(struct tu_device
*dev
, uint64_t size
, struct tu_bo
**bo
);
420 const struct tu_bo
*bo
;
426 struct tu_cs_memory
{
431 struct tu_draw_state
{
436 enum tu_dynamic_state
438 /* re-use VK_DYNAMIC_STATE_ enums for non-extended dynamic states */
439 TU_DYNAMIC_STATE_SAMPLE_LOCATIONS
= VK_DYNAMIC_STATE_STENCIL_REFERENCE
+ 1,
440 TU_DYNAMIC_STATE_COUNT
,
443 enum tu_draw_state_group_id
445 TU_DRAW_STATE_PROGRAM
,
446 TU_DRAW_STATE_PROGRAM_BINNING
,
450 TU_DRAW_STATE_VI_BINNING
,
454 TU_DRAW_STATE_VS_CONST
,
455 TU_DRAW_STATE_HS_CONST
,
456 TU_DRAW_STATE_DS_CONST
,
457 TU_DRAW_STATE_GS_CONST
,
458 TU_DRAW_STATE_FS_CONST
,
459 TU_DRAW_STATE_DESC_SETS
,
460 TU_DRAW_STATE_DESC_SETS_LOAD
,
461 TU_DRAW_STATE_VS_PARAMS
,
462 TU_DRAW_STATE_INPUT_ATTACHMENTS_GMEM
,
463 TU_DRAW_STATE_INPUT_ATTACHMENTS_SYSMEM
,
465 /* dynamic state related draw states */
466 TU_DRAW_STATE_DYNAMIC
,
467 TU_DRAW_STATE_COUNT
= TU_DRAW_STATE_DYNAMIC
+ TU_DYNAMIC_STATE_COUNT
,
474 * A command stream in TU_CS_MODE_GROW mode grows automatically whenever it
475 * is full. tu_cs_begin must be called before command packet emission and
476 * tu_cs_end must be called after.
478 * This mode may create multiple entries internally. The entries must be
479 * submitted together.
484 * A command stream in TU_CS_MODE_EXTERNAL mode wraps an external,
485 * fixed-size buffer. tu_cs_begin and tu_cs_end are optional and have no
488 * This mode does not create any entry or any BO.
493 * A command stream in TU_CS_MODE_SUB_STREAM mode does not support direct
494 * command packet emission. tu_cs_begin_sub_stream must be called to get a
495 * sub-stream to emit comamnd packets to. When done with the sub-stream,
496 * tu_cs_end_sub_stream must be called.
498 * This mode does not create any entry internally.
500 TU_CS_MODE_SUB_STREAM
,
507 uint32_t *reserved_end
;
510 struct tu_device
*device
;
511 enum tu_cs_mode mode
;
512 uint32_t next_bo_size
;
514 struct tu_cs_entry
*entries
;
515 uint32_t entry_count
;
516 uint32_t entry_capacity
;
520 uint32_t bo_capacity
;
522 /* state for cond_exec_start/cond_exec_end */
524 uint32_t *cond_dwords
;
527 struct tu_device_memory
532 /* for dedicated allocations */
533 struct tu_image
*image
;
534 struct tu_buffer
*buffer
;
541 struct tu_descriptor_range
547 struct tu_descriptor_set
549 const struct tu_descriptor_set_layout
*layout
;
550 struct tu_descriptor_pool
*pool
;
554 uint32_t *mapped_ptr
;
556 uint32_t *dynamic_descriptors
;
558 struct tu_bo
*buffers
[0];
561 struct tu_push_descriptor_set
563 struct tu_descriptor_set set
;
567 struct tu_descriptor_pool_entry
571 struct tu_descriptor_set
*set
;
574 struct tu_descriptor_pool
577 uint64_t current_offset
;
580 uint8_t *host_memory_base
;
581 uint8_t *host_memory_ptr
;
582 uint8_t *host_memory_end
;
584 uint32_t entry_count
;
585 uint32_t max_entry_count
;
586 struct tu_descriptor_pool_entry entries
[0];
589 struct tu_descriptor_update_template_entry
591 VkDescriptorType descriptor_type
;
593 /* The number of descriptors to update */
594 uint32_t descriptor_count
;
596 /* Into mapped_ptr or dynamic_descriptors, in units of the respective array
600 /* In dwords. Not valid/used for dynamic descriptors */
603 uint32_t buffer_offset
;
605 /* Only valid for combined image samplers and samplers */
606 uint16_t has_sampler
;
612 /* For push descriptors */
613 const uint32_t *immutable_samplers
;
616 struct tu_descriptor_update_template
618 uint32_t entry_count
;
619 struct tu_descriptor_update_template_entry entry
[0];
626 VkBufferUsageFlags usage
;
627 VkBufferCreateFlags flags
;
630 VkDeviceSize bo_offset
;
633 static inline uint64_t
634 tu_buffer_iova(struct tu_buffer
*buffer
)
636 return buffer
->bo
->iova
+ buffer
->bo_offset
;
639 struct tu_vertex_binding
641 struct tu_buffer
*buffer
;
646 tu_get_debug_option_name(int id
);
649 tu_get_perftest_option_name(int id
);
651 struct tu_descriptor_state
653 struct tu_descriptor_set
*sets
[MAX_SETS
];
654 uint32_t dynamic_descriptors
[MAX_DYNAMIC_BUFFERS
* A6XX_TEX_CONST_DWORDS
];
657 enum tu_cmd_dirty_bits
659 TU_CMD_DIRTY_COMPUTE_PIPELINE
= 1 << 1,
660 TU_CMD_DIRTY_VERTEX_BUFFERS
= 1 << 2,
661 TU_CMD_DIRTY_DESCRIPTOR_SETS
= 1 << 3,
662 TU_CMD_DIRTY_COMPUTE_DESCRIPTOR_SETS
= 1 << 4,
663 TU_CMD_DIRTY_SHADER_CONSTS
= 1 << 5,
664 /* all draw states were disabled and need to be re-enabled: */
665 TU_CMD_DIRTY_DRAW_STATE
= 1 << 7,
668 /* There are only three cache domains we have to care about: the CCU, or
669 * color cache unit, which is used for color and depth/stencil attachments
670 * and copy/blit destinations, and is split conceptually into color and depth,
671 * and the universal cache or UCHE which is used for pretty much everything
672 * else, except for the CP (uncached) and host. We need to flush whenever data
673 * crosses these boundaries.
676 enum tu_cmd_access_mask
{
677 TU_ACCESS_UCHE_READ
= 1 << 0,
678 TU_ACCESS_UCHE_WRITE
= 1 << 1,
679 TU_ACCESS_CCU_COLOR_READ
= 1 << 2,
680 TU_ACCESS_CCU_COLOR_WRITE
= 1 << 3,
681 TU_ACCESS_CCU_DEPTH_READ
= 1 << 4,
682 TU_ACCESS_CCU_DEPTH_WRITE
= 1 << 5,
684 /* Experiments have shown that while it's safe to avoid flushing the CCU
685 * after each blit/renderpass, it's not safe to assume that subsequent
686 * lookups with a different attachment state will hit unflushed cache
687 * entries. That is, the CCU needs to be flushed and possibly invalidated
688 * when accessing memory with a different attachment state. Writing to an
689 * attachment under the following conditions after clearing using the
690 * normal 2d engine path is known to have issues:
692 * - It isn't the 0'th layer.
693 * - There are more than one attachment, and this isn't the 0'th attachment
694 * (this seems to also depend on the cpp of the attachments).
696 * Our best guess is that the layer/MRT state is used when computing
697 * the location of a cache entry in CCU, to avoid conflicts. We assume that
698 * any access in a renderpass after or before an access by a transfer needs
699 * a flush/invalidate, and use the _INCOHERENT variants to represent access
702 TU_ACCESS_CCU_COLOR_INCOHERENT_READ
= 1 << 6,
703 TU_ACCESS_CCU_COLOR_INCOHERENT_WRITE
= 1 << 7,
704 TU_ACCESS_CCU_DEPTH_INCOHERENT_READ
= 1 << 8,
705 TU_ACCESS_CCU_DEPTH_INCOHERENT_WRITE
= 1 << 9,
707 TU_ACCESS_SYSMEM_READ
= 1 << 10,
708 TU_ACCESS_SYSMEM_WRITE
= 1 << 11,
710 /* Set if a WFI is required due to data being read by the CP or the 2D
713 TU_ACCESS_WFI_READ
= 1 << 12,
716 TU_ACCESS_UCHE_READ
|
717 TU_ACCESS_CCU_COLOR_READ
|
718 TU_ACCESS_CCU_DEPTH_READ
|
719 TU_ACCESS_CCU_COLOR_INCOHERENT_READ
|
720 TU_ACCESS_CCU_DEPTH_INCOHERENT_READ
|
721 TU_ACCESS_SYSMEM_READ
,
724 TU_ACCESS_UCHE_WRITE
|
725 TU_ACCESS_CCU_COLOR_WRITE
|
726 TU_ACCESS_CCU_COLOR_INCOHERENT_WRITE
|
727 TU_ACCESS_CCU_DEPTH_WRITE
|
728 TU_ACCESS_CCU_DEPTH_INCOHERENT_WRITE
|
729 TU_ACCESS_SYSMEM_WRITE
,
736 enum tu_cmd_flush_bits
{
737 TU_CMD_FLAG_CCU_FLUSH_DEPTH
= 1 << 0,
738 TU_CMD_FLAG_CCU_FLUSH_COLOR
= 1 << 1,
739 TU_CMD_FLAG_CCU_INVALIDATE_DEPTH
= 1 << 2,
740 TU_CMD_FLAG_CCU_INVALIDATE_COLOR
= 1 << 3,
741 TU_CMD_FLAG_CACHE_FLUSH
= 1 << 4,
742 TU_CMD_FLAG_CACHE_INVALIDATE
= 1 << 5,
744 TU_CMD_FLAG_ALL_FLUSH
=
745 TU_CMD_FLAG_CCU_FLUSH_DEPTH
|
746 TU_CMD_FLAG_CCU_FLUSH_COLOR
|
747 TU_CMD_FLAG_CACHE_FLUSH
,
749 TU_CMD_FLAG_ALL_INVALIDATE
=
750 TU_CMD_FLAG_CCU_INVALIDATE_DEPTH
|
751 TU_CMD_FLAG_CCU_INVALIDATE_COLOR
|
752 TU_CMD_FLAG_CACHE_INVALIDATE
,
754 TU_CMD_FLAG_WFI
= 1 << 6,
757 /* Changing the CCU from sysmem mode to gmem mode or vice-versa is pretty
758 * heavy, involving a CCU cache flush/invalidate and a WFI in order to change
759 * which part of the gmem is used by the CCU. Here we keep track of what the
762 enum tu_cmd_ccu_state
{
768 struct tu_cache_state
{
769 /* Caches which must be made available (flushed) eventually if there are
770 * any users outside that cache domain, and caches which must be
771 * invalidated eventually if there are any reads.
773 enum tu_cmd_flush_bits pending_flush_bits
;
774 /* Pending flushes */
775 enum tu_cmd_flush_bits flush_bits
;
782 struct tu_pipeline
*pipeline
;
783 struct tu_pipeline
*compute_pipeline
;
788 struct tu_buffer
*buffers
[MAX_VBS
];
789 VkDeviceSize offsets
[MAX_VBS
];
792 /* for dynamic states that can't be emitted directly */
793 uint32_t dynamic_stencil_mask
;
794 uint32_t dynamic_stencil_wrmask
;
795 uint32_t dynamic_stencil_ref
;
796 uint32_t dynamic_gras_su_cntl
;
798 /* saved states to re-emit in TU_CMD_DIRTY_DRAW_STATE case */
799 struct tu_draw_state dynamic_state
[TU_DYNAMIC_STATE_COUNT
];
800 struct tu_cs_entry vertex_buffers_ib
;
801 struct tu_cs_entry shader_const_ib
[MESA_SHADER_STAGES
];
802 struct tu_cs_entry desc_sets_ib
, desc_sets_load_ib
;
803 struct tu_cs_entry ia_gmem_ib
, ia_sysmem_ib
;
805 struct tu_draw_state vs_params
;
809 uint32_t max_index_count
;
812 /* because streamout base has to be 32-byte aligned
813 * there is an extra offset to deal with when it is
816 uint8_t streamout_offset
[IR3_MAX_SO_BUFFERS
];
818 /* Renderpasses are tricky, because we may need to flush differently if
819 * using sysmem vs. gmem and therefore we have to delay any flushing that
820 * happens before a renderpass. So we have to have two copies of the flush
821 * state, one for intra-renderpass flushes (i.e. renderpass dependencies)
822 * and one for outside a renderpass.
824 struct tu_cache_state cache
;
825 struct tu_cache_state renderpass_cache
;
827 enum tu_cmd_ccu_state ccu_state
;
829 const struct tu_render_pass
*pass
;
830 const struct tu_subpass
*subpass
;
831 const struct tu_framebuffer
*framebuffer
;
832 VkRect2D render_area
;
834 struct tu_cs_entry tile_store_ib
;
841 VkAllocationCallbacks alloc
;
842 struct list_head cmd_buffers
;
843 struct list_head free_cmd_buffers
;
844 uint32_t queue_family_index
;
847 struct tu_cmd_buffer_upload
852 struct list_head list
;
855 enum tu_cmd_buffer_status
857 TU_CMD_BUFFER_STATUS_INVALID
,
858 TU_CMD_BUFFER_STATUS_INITIAL
,
859 TU_CMD_BUFFER_STATUS_RECORDING
,
860 TU_CMD_BUFFER_STATUS_EXECUTABLE
,
861 TU_CMD_BUFFER_STATUS_PENDING
,
868 struct drm_msm_gem_submit_bo
*bo_infos
;
871 #define TU_BO_LIST_FAILED (~0)
874 tu_bo_list_init(struct tu_bo_list
*list
);
876 tu_bo_list_destroy(struct tu_bo_list
*list
);
878 tu_bo_list_reset(struct tu_bo_list
*list
);
880 tu_bo_list_add(struct tu_bo_list
*list
,
881 const struct tu_bo
*bo
,
884 tu_bo_list_merge(struct tu_bo_list
*list
, const struct tu_bo_list
*other
);
886 /* This struct defines the layout of the scratch_bo */
889 uint32_t seqno_dummy
; /* dummy seqno for CP_EVENT_WRITE */
891 volatile uint32_t vsc_overflow
;
893 /* flag set from cmdstream when VSC overflow detected: */
894 uint32_t vsc_scratch
;
899 /* scratch space for VPC_SO[i].FLUSH_BASE_LO/HI, start on 32 byte boundary. */
906 #define ctrl_offset(member) offsetof(struct tu6_control, member)
910 VK_LOADER_DATA _loader_data
;
912 struct tu_device
*device
;
914 struct tu_cmd_pool
*pool
;
915 struct list_head pool_link
;
917 VkCommandBufferUsageFlags usage_flags
;
918 VkCommandBufferLevel level
;
919 enum tu_cmd_buffer_status status
;
921 struct tu_cmd_state state
;
922 struct tu_vertex_binding vertex_bindings
[MAX_VBS
];
923 uint32_t vertex_bindings_set
;
924 uint32_t queue_family_index
;
926 uint32_t push_constants
[MAX_PUSH_CONSTANTS_SIZE
/ 4];
927 VkShaderStageFlags push_constant_stages
;
928 struct tu_descriptor_set meta_push_descriptors
;
930 struct tu_descriptor_state descriptors
[MAX_BIND_POINTS
];
932 struct tu_cmd_buffer_upload upload
;
934 VkResult record_result
;
936 struct tu_bo_list bo_list
;
938 struct tu_cs draw_cs
;
939 struct tu_cs draw_epilogue_cs
;
942 struct tu_bo scratch_bo
;
946 struct tu_bo vsc_draw_strm
;
947 struct tu_bo vsc_prim_strm
;
948 uint32_t vsc_draw_strm_pitch
;
949 uint32_t vsc_prim_strm_pitch
;
953 /* Temporary struct for tracking a register state to be written, used by
954 * a6xx-pack.h and tu_cs_emit_regs()
956 struct tu_reg_value
{
967 void tu_emit_cache_flush_renderpass(struct tu_cmd_buffer
*cmd_buffer
,
970 void tu_emit_cache_flush_ccu(struct tu_cmd_buffer
*cmd_buffer
,
972 enum tu_cmd_ccu_state ccu_state
);
975 tu6_emit_event_write(struct tu_cmd_buffer
*cmd
,
977 enum vgt_event_type event
);
979 static inline struct tu_descriptor_state
*
980 tu_get_descriptors_state(struct tu_cmd_buffer
*cmd_buffer
,
981 VkPipelineBindPoint bind_point
)
983 return &cmd_buffer
->descriptors
[bind_point
];
991 struct tu_shader_module
993 unsigned char sha1
[20];
996 const uint32_t *code
[0];
999 struct tu_push_constant_range
1007 struct ir3_shader
*ir3_shader
;
1009 struct tu_push_constant_range push_consts
;
1010 uint8_t active_desc_sets
;
1014 tu_shader_create(struct tu_device
*dev
,
1015 gl_shader_stage stage
,
1016 const VkPipelineShaderStageCreateInfo
*stage_info
,
1017 struct tu_pipeline_layout
*layout
,
1018 const VkAllocationCallbacks
*alloc
);
1021 tu_shader_destroy(struct tu_device
*dev
,
1022 struct tu_shader
*shader
,
1023 const VkAllocationCallbacks
*alloc
);
1025 struct tu_program_descriptor_linkage
1027 struct ir3_const_state const_state
;
1031 struct tu_push_constant_range push_consts
;
1038 struct tu_pipeline_layout
*layout
;
1040 bool need_indirect_descriptor_sets
;
1041 VkShaderStageFlags active_stages
;
1042 uint32_t active_desc_sets
;
1044 /* mask of enabled dynamic states
1045 * if BIT(i) is set, pipeline->dynamic_state[i] is *NOT* used
1047 uint32_t dynamic_state_mask
;
1048 struct tu_draw_state dynamic_state
[TU_DYNAMIC_STATE_COUNT
];
1050 /* gras_su_cntl without line width, used for dynamic line width state */
1051 uint32_t gras_su_cntl
;
1055 struct tu_cs_entry state_ib
;
1056 struct tu_cs_entry binning_state_ib
;
1058 struct tu_program_descriptor_linkage link
[MESA_SHADER_STAGES
];
1063 struct tu_cs_entry state_ib
;
1068 struct tu_cs_entry state_ib
;
1069 struct tu_cs_entry binning_state_ib
;
1070 uint32_t bindings_used
;
1075 enum pc_di_primtype primtype
;
1076 bool primitive_restart
;
1081 uint32_t patch_type
;
1082 uint32_t per_vertex_output_size
;
1083 uint32_t per_patch_output_size
;
1084 uint32_t hs_bo_regid
;
1085 uint32_t ds_bo_regid
;
1086 bool upper_left_domain_origin
;
1091 struct tu_cs_entry state_ib
;
1096 struct tu_cs_entry state_ib
;
1101 struct tu_cs_entry state_ib
;
1106 uint32_t local_size
[3];
1111 tu6_emit_viewport(struct tu_cs
*cs
, const VkViewport
*viewport
);
1114 tu6_emit_scissor(struct tu_cs
*cs
, const VkRect2D
*scissor
);
1117 tu6_emit_sample_locations(struct tu_cs
*cs
, const VkSampleLocationsInfoEXT
*samp_loc
);
1120 tu6_emit_depth_bias(struct tu_cs
*cs
,
1121 float constant_factor
,
1123 float slope_factor
);
1125 void tu6_emit_msaa(struct tu_cs
*cs
, VkSampleCountFlagBits samples
);
1127 void tu6_emit_window_scissor(struct tu_cs
*cs
, uint32_t x1
, uint32_t y1
, uint32_t x2
, uint32_t y2
);
1129 void tu6_emit_window_offset(struct tu_cs
*cs
, uint32_t x1
, uint32_t y1
);
1132 tu6_emit_xs_config(struct tu_cs
*cs
,
1133 gl_shader_stage stage
,
1134 const struct ir3_shader_variant
*xs
,
1135 uint64_t binary_iova
);
1138 tu6_emit_vpc(struct tu_cs
*cs
,
1139 const struct ir3_shader_variant
*vs
,
1140 const struct ir3_shader_variant
*hs
,
1141 const struct ir3_shader_variant
*ds
,
1142 const struct ir3_shader_variant
*gs
,
1143 const struct ir3_shader_variant
*fs
);
1146 tu6_emit_fs_inputs(struct tu_cs
*cs
, const struct ir3_shader_variant
*fs
);
1148 struct tu_image_view
;
1151 tu_resolve_sysmem(struct tu_cmd_buffer
*cmd
,
1153 struct tu_image_view
*src
,
1154 struct tu_image_view
*dst
,
1156 const VkRect2D
*rect
);
1159 tu_clear_sysmem_attachment(struct tu_cmd_buffer
*cmd
,
1162 const VkRenderPassBeginInfo
*info
);
1165 tu_clear_gmem_attachment(struct tu_cmd_buffer
*cmd
,
1168 const VkRenderPassBeginInfo
*info
);
1171 tu_load_gmem_attachment(struct tu_cmd_buffer
*cmd
,
1176 /* expose this function to be able to emit load without checking LOAD_OP */
1178 tu_emit_load_gmem_attachment(struct tu_cmd_buffer
*cmd
, struct tu_cs
*cs
, uint32_t a
);
1180 /* note: gmem store can also resolve */
1182 tu_store_gmem_attachment(struct tu_cmd_buffer
*cmd
,
1187 enum tu_supported_formats
{
1193 struct tu_native_format
1195 enum a6xx_format fmt
: 8;
1196 enum a3xx_color_swap swap
: 8;
1197 enum a6xx_tile_mode tile_mode
: 8;
1198 enum tu_supported_formats supported
: 8;
1201 struct tu_native_format
tu6_format_vtx(VkFormat format
);
1202 struct tu_native_format
tu6_format_color(VkFormat format
, enum a6xx_tile_mode tile_mode
);
1203 struct tu_native_format
tu6_format_texture(VkFormat format
, enum a6xx_tile_mode tile_mode
);
1205 static inline enum a6xx_format
1206 tu6_base_format(VkFormat format
)
1208 /* note: tu6_format_color doesn't care about tiling for .fmt field */
1209 return tu6_format_color(format
, TILE6_LINEAR
).fmt
;
1215 /* The original VkFormat provided by the client. This may not match any
1216 * of the actual surface formats.
1219 VkImageAspectFlags aspects
;
1220 VkImageUsageFlags usage
; /**< Superset of VkImageCreateInfo::usage. */
1221 VkImageTiling tiling
; /** VkImageCreateInfo::tiling */
1222 VkImageCreateFlags flags
; /** VkImageCreateInfo::flags */
1224 uint32_t level_count
;
1225 uint32_t layer_count
;
1226 VkSampleCountFlagBits samples
;
1228 struct fdl_layout layout
;
1230 unsigned queue_family_mask
;
1234 /* For VK_ANDROID_native_buffer, the WSI image owns the memory, */
1235 VkDeviceMemory owned_memory
;
1237 /* Set when bound */
1239 VkDeviceSize bo_offset
;
1242 static inline uint32_t
1243 tu_get_layerCount(const struct tu_image
*image
,
1244 const VkImageSubresourceRange
*range
)
1246 return range
->layerCount
== VK_REMAINING_ARRAY_LAYERS
1247 ? image
->layer_count
- range
->baseArrayLayer
1248 : range
->layerCount
;
1251 static inline uint32_t
1252 tu_get_levelCount(const struct tu_image
*image
,
1253 const VkImageSubresourceRange
*range
)
1255 return range
->levelCount
== VK_REMAINING_MIP_LEVELS
1256 ? image
->level_count
- range
->baseMipLevel
1257 : range
->levelCount
;
1260 struct tu_image_view
1262 struct tu_image
*image
; /**< VkImageViewCreateInfo::image */
1266 uint32_t layer_size
;
1267 uint32_t ubwc_layer_size
;
1269 /* used to determine if fast gmem store path can be used */
1275 uint32_t descriptor
[A6XX_TEX_CONST_DWORDS
];
1277 /* Descriptor for use as a storage image as opposed to a sampled image.
1278 * This has a few differences for cube maps (e.g. type).
1280 uint32_t storage_descriptor
[A6XX_TEX_CONST_DWORDS
];
1282 /* pre-filled register values */
1284 uint32_t FLAG_BUFFER_PITCH
;
1286 uint32_t RB_MRT_BUF_INFO
;
1287 uint32_t SP_FS_MRT_REG
;
1289 uint32_t SP_PS_2D_SRC_INFO
;
1290 uint32_t SP_PS_2D_SRC_SIZE
;
1292 uint32_t RB_2D_DST_INFO
;
1294 uint32_t RB_BLIT_DST_INFO
;
1297 struct tu_sampler_ycbcr_conversion
{
1299 VkSamplerYcbcrModelConversion ycbcr_model
;
1300 VkSamplerYcbcrRange ycbcr_range
;
1301 VkComponentMapping components
;
1302 VkChromaLocation chroma_offsets
[2];
1303 VkFilter chroma_filter
;
1307 uint32_t descriptor
[A6XX_TEX_SAMP_DWORDS
];
1308 struct tu_sampler_ycbcr_conversion
*ycbcr_sampler
;
1312 tu_cs_image_ref(struct tu_cs
*cs
, const struct tu_image_view
*iview
, uint32_t layer
);
1315 tu_cs_image_ref_2d(struct tu_cs
*cs
, const struct tu_image_view
*iview
, uint32_t layer
, bool src
);
1318 tu_cs_image_flag_ref(struct tu_cs
*cs
, const struct tu_image_view
*iview
, uint32_t layer
);
1321 tu_image_create(VkDevice _device
,
1322 const VkImageCreateInfo
*pCreateInfo
,
1323 const VkAllocationCallbacks
*alloc
,
1326 const VkSubresourceLayout
*plane_layouts
);
1329 tu_image_from_gralloc(VkDevice device_h
,
1330 const VkImageCreateInfo
*base_info
,
1331 const VkNativeBufferANDROID
*gralloc_info
,
1332 const VkAllocationCallbacks
*alloc
,
1333 VkImage
*out_image_h
);
1336 tu_image_view_init(struct tu_image_view
*view
,
1337 const VkImageViewCreateInfo
*pCreateInfo
);
1339 struct tu_buffer_view
1341 uint32_t descriptor
[A6XX_TEX_CONST_DWORDS
];
1343 struct tu_buffer
*buffer
;
1346 tu_buffer_view_init(struct tu_buffer_view
*view
,
1347 struct tu_device
*device
,
1348 const VkBufferViewCreateInfo
*pCreateInfo
);
1350 struct tu_attachment_info
1352 struct tu_image_view
*attachment
;
1355 struct tu_framebuffer
1361 /* size of the first tile */
1363 /* number of tiles */
1364 VkExtent2D tile_count
;
1366 /* size of the first VSC pipe */
1368 /* number of VSC pipes */
1369 VkExtent2D pipe_count
;
1371 /* pipe register values */
1372 uint32_t pipe_config
[MAX_VSC_PIPES
];
1373 uint32_t pipe_sizes
[MAX_VSC_PIPES
];
1375 uint32_t attachment_count
;
1376 struct tu_attachment_info attachments
[0];
1380 tu_framebuffer_tiling_config(struct tu_framebuffer
*fb
,
1381 const struct tu_device
*device
,
1382 const struct tu_render_pass
*pass
);
1384 struct tu_subpass_barrier
{
1385 VkPipelineStageFlags src_stage_mask
;
1386 VkAccessFlags src_access_mask
;
1387 VkAccessFlags dst_access_mask
;
1388 bool incoherent_ccu_color
, incoherent_ccu_depth
;
1391 struct tu_subpass_attachment
1393 uint32_t attachment
;
1398 uint32_t input_count
;
1399 uint32_t color_count
;
1400 struct tu_subpass_attachment
*input_attachments
;
1401 struct tu_subpass_attachment
*color_attachments
;
1402 struct tu_subpass_attachment
*resolve_attachments
;
1403 struct tu_subpass_attachment depth_stencil_attachment
;
1405 VkSampleCountFlagBits samples
;
1409 struct tu_subpass_barrier start_barrier
;
1412 struct tu_render_pass_attachment
1417 VkImageAspectFlags clear_mask
;
1420 int32_t gmem_offset
;
1423 struct tu_render_pass
1425 uint32_t attachment_count
;
1426 uint32_t subpass_count
;
1427 uint32_t gmem_pixels
;
1428 uint32_t tile_align_w
;
1429 struct tu_subpass_attachment
*subpass_attachments
;
1430 struct tu_render_pass_attachment
*attachments
;
1431 struct tu_subpass_barrier end_barrier
;
1432 struct tu_subpass subpasses
[0];
1435 struct tu_query_pool
1440 uint32_t pipeline_statistics
;
1444 enum tu_semaphore_kind
1447 TU_SEMAPHORE_SYNCOBJ
,
1450 struct tu_semaphore_part
1452 enum tu_semaphore_kind kind
;
1460 struct tu_semaphore_part permanent
;
1461 struct tu_semaphore_part temporary
;
1465 tu_set_descriptor_set(struct tu_cmd_buffer
*cmd_buffer
,
1466 VkPipelineBindPoint bind_point
,
1467 struct tu_descriptor_set
*set
,
1471 tu_update_descriptor_sets(struct tu_device
*device
,
1472 struct tu_cmd_buffer
*cmd_buffer
,
1473 VkDescriptorSet overrideSet
,
1474 uint32_t descriptorWriteCount
,
1475 const VkWriteDescriptorSet
*pDescriptorWrites
,
1476 uint32_t descriptorCopyCount
,
1477 const VkCopyDescriptorSet
*pDescriptorCopies
);
1480 tu_update_descriptor_set_with_template(
1481 struct tu_device
*device
,
1482 struct tu_cmd_buffer
*cmd_buffer
,
1483 struct tu_descriptor_set
*set
,
1484 VkDescriptorUpdateTemplate descriptorUpdateTemplate
,
1488 tu_drm_get_gpu_id(const struct tu_physical_device
*dev
, uint32_t *id
);
1491 tu_drm_get_gmem_size(const struct tu_physical_device
*dev
, uint32_t *size
);
1494 tu_drm_get_gmem_base(const struct tu_physical_device
*dev
, uint64_t *base
);
1497 tu_drm_submitqueue_new(const struct tu_device
*dev
,
1499 uint32_t *queue_id
);
1502 tu_drm_submitqueue_close(const struct tu_device
*dev
, uint32_t queue_id
);
1505 tu_gem_new(const struct tu_device
*dev
, uint64_t size
, uint32_t flags
);
1507 tu_gem_import_dmabuf(const struct tu_device
*dev
,
1511 tu_gem_export_dmabuf(const struct tu_device
*dev
, uint32_t gem_handle
);
1513 tu_gem_close(const struct tu_device
*dev
, uint32_t gem_handle
);
1515 tu_gem_info_offset(const struct tu_device
*dev
, uint32_t gem_handle
);
1517 tu_gem_info_iova(const struct tu_device
*dev
, uint32_t gem_handle
);
1519 #define TU_DEFINE_HANDLE_CASTS(__tu_type, __VkType) \
1521 static inline struct __tu_type *__tu_type##_from_handle(__VkType _handle) \
1523 return (struct __tu_type *) _handle; \
1526 static inline __VkType __tu_type##_to_handle(struct __tu_type *_obj) \
1528 return (__VkType) _obj; \
1531 #define TU_DEFINE_NONDISP_HANDLE_CASTS(__tu_type, __VkType) \
1533 static inline struct __tu_type *__tu_type##_from_handle(__VkType _handle) \
1535 return (struct __tu_type *) (uintptr_t) _handle; \
1538 static inline __VkType __tu_type##_to_handle(struct __tu_type *_obj) \
1540 return (__VkType)(uintptr_t) _obj; \
1543 #define TU_FROM_HANDLE(__tu_type, __name, __handle) \
1544 struct __tu_type *__name = __tu_type##_from_handle(__handle)
1546 TU_DEFINE_HANDLE_CASTS(tu_cmd_buffer
, VkCommandBuffer
)
1547 TU_DEFINE_HANDLE_CASTS(tu_device
, VkDevice
)
1548 TU_DEFINE_HANDLE_CASTS(tu_instance
, VkInstance
)
1549 TU_DEFINE_HANDLE_CASTS(tu_physical_device
, VkPhysicalDevice
)
1550 TU_DEFINE_HANDLE_CASTS(tu_queue
, VkQueue
)
1552 TU_DEFINE_NONDISP_HANDLE_CASTS(tu_cmd_pool
, VkCommandPool
)
1553 TU_DEFINE_NONDISP_HANDLE_CASTS(tu_buffer
, VkBuffer
)
1554 TU_DEFINE_NONDISP_HANDLE_CASTS(tu_buffer_view
, VkBufferView
)
1555 TU_DEFINE_NONDISP_HANDLE_CASTS(tu_descriptor_pool
, VkDescriptorPool
)
1556 TU_DEFINE_NONDISP_HANDLE_CASTS(tu_descriptor_set
, VkDescriptorSet
)
1557 TU_DEFINE_NONDISP_HANDLE_CASTS(tu_descriptor_set_layout
,
1558 VkDescriptorSetLayout
)
1559 TU_DEFINE_NONDISP_HANDLE_CASTS(tu_descriptor_update_template
,
1560 VkDescriptorUpdateTemplate
)
1561 TU_DEFINE_NONDISP_HANDLE_CASTS(tu_device_memory
, VkDeviceMemory
)
1562 TU_DEFINE_NONDISP_HANDLE_CASTS(tu_fence
, VkFence
)
1563 TU_DEFINE_NONDISP_HANDLE_CASTS(tu_event
, VkEvent
)
1564 TU_DEFINE_NONDISP_HANDLE_CASTS(tu_framebuffer
, VkFramebuffer
)
1565 TU_DEFINE_NONDISP_HANDLE_CASTS(tu_image
, VkImage
)
1566 TU_DEFINE_NONDISP_HANDLE_CASTS(tu_image_view
, VkImageView
);
1567 TU_DEFINE_NONDISP_HANDLE_CASTS(tu_pipeline_cache
, VkPipelineCache
)
1568 TU_DEFINE_NONDISP_HANDLE_CASTS(tu_pipeline
, VkPipeline
)
1569 TU_DEFINE_NONDISP_HANDLE_CASTS(tu_pipeline_layout
, VkPipelineLayout
)
1570 TU_DEFINE_NONDISP_HANDLE_CASTS(tu_query_pool
, VkQueryPool
)
1571 TU_DEFINE_NONDISP_HANDLE_CASTS(tu_render_pass
, VkRenderPass
)
1572 TU_DEFINE_NONDISP_HANDLE_CASTS(tu_sampler
, VkSampler
)
1573 TU_DEFINE_NONDISP_HANDLE_CASTS(tu_sampler_ycbcr_conversion
, VkSamplerYcbcrConversion
)
1574 TU_DEFINE_NONDISP_HANDLE_CASTS(tu_shader_module
, VkShaderModule
)
1575 TU_DEFINE_NONDISP_HANDLE_CASTS(tu_semaphore
, VkSemaphore
)
1577 #endif /* TU_PRIVATE_H */