#include "main/macros.h"
#include "util/list.h"
#include "util/macros.h"
+#include "util/u_atomic.h"
#include "vk_alloc.h"
+#include "vk_object.h"
#include "vk_debug_report.h"
#include "wsi_common.h"
-#include "drm-uapi/msm_drm.h"
#include "ir3/ir3_compiler.h"
#include "ir3/ir3_shader.h"
#include "tu_descriptor_set.h"
#include "tu_extensions.h"
+#include "tu_util.h"
/* Pre-declarations needed for WSI entrypoints */
struct wl_surface;
#define MAX_DYNAMIC_STORAGE_BUFFERS 8
#define MAX_DYNAMIC_BUFFERS \
(MAX_DYNAMIC_UNIFORM_BUFFERS + MAX_DYNAMIC_STORAGE_BUFFERS)
-#define MAX_SAMPLES_LOG2 4
-#define NUM_META_FS_KEYS 13
#define TU_MAX_DRM_DEVICES 8
#define MAX_VIEWS 8
+#define MAX_BIND_POINTS 2 /* compute + graphics */
/* The Qualcomm driver exposes 0x20000058 */
#define MAX_STORAGE_BUFFER_RANGE 0x20000000
/* We use ldc for uniform buffer loads, just like the Qualcomm driver, so
*/
#define MAX_UNIFORM_BUFFER_RANGE 0x10000
-#define NUM_DEPTH_CLEAR_PIPELINES 3
-
-/*
- * This is the point we switch from using CP to compute shader
- * for certain buffer operations.
- */
-#define TU_BUFFER_OPS_CS_THRESHOLD 4096
-
#define A6XX_TEX_CONST_DWORDS 16
#define A6XX_TEX_SAMP_DWORDS 4
-enum tu_mem_heap
-{
- TU_MEM_HEAP_VRAM,
- TU_MEM_HEAP_VRAM_CPU_ACCESS,
- TU_MEM_HEAP_GTT,
- TU_MEM_HEAP_COUNT
-};
-
-enum tu_mem_type
-{
- TU_MEM_TYPE_VRAM,
- TU_MEM_TYPE_GTT_WRITE_COMBINE,
- TU_MEM_TYPE_VRAM_CPU_ACCESS,
- TU_MEM_TYPE_GTT_CACHED,
- TU_MEM_TYPE_COUNT
-};
-
#define tu_printflike(a, b) __attribute__((__format__(__printf__, a, b)))
-static inline uint32_t
-align_u32(uint32_t v, uint32_t a)
-{
- assert(a != 0 && a == (a & -a));
- return (v + a - 1) & ~(a - 1);
-}
-
-static inline uint32_t
-align_u32_npot(uint32_t v, uint32_t a)
-{
- return (v + a - 1) / a * a;
-}
-
-static inline uint64_t
-align_u64(uint64_t v, uint64_t a)
-{
- assert(a != 0 && a == (a & -a));
- return (v + a - 1) & ~(a - 1);
-}
-
-static inline int32_t
-align_i32(int32_t v, int32_t a)
-{
- assert(a != 0 && a == (a & -a));
- return (v + a - 1) & ~(a - 1);
-}
-
-/** Alignment must be a power of 2. */
-static inline bool
-tu_is_aligned(uintmax_t n, uintmax_t a)
-{
- assert(a == (a & -a));
- return (n & (a - 1)) == 0;
-}
-
-static inline uint32_t
-round_up_u32(uint32_t v, uint32_t a)
-{
- return (v + a - 1) / a;
-}
-
-static inline uint64_t
-round_up_u64(uint64_t v, uint64_t a)
-{
- return (v + a - 1) / a;
-}
-
-static inline uint32_t
-tu_minify(uint32_t n, uint32_t levels)
-{
- if (unlikely(n == 0))
- return 0;
- else
- return MAX2(n >> levels, 1);
-}
-static inline float
-tu_clamp_f(float f, float min, float max)
-{
- assert(min < max);
-
- if (f > max)
- return max;
- else if (f < min)
- return min;
- else
- return f;
-}
-
-static inline bool
-tu_clear_mask(uint32_t *inout_mask, uint32_t clear_mask)
-{
- if (*inout_mask & clear_mask) {
- *inout_mask &= ~clear_mask;
- return true;
- } else {
- return false;
- }
-}
-
#define for_each_bit(b, dword) \
for (uint32_t __dword = (dword); \
(b) = __builtin_ffs(__dword) - 1, __dword; __dword &= ~(1 << (b)))
-#define typed_memcpy(dest, src, count) \
- ({ \
- STATIC_ASSERT(sizeof(*src) == sizeof(*dest)); \
- memcpy((dest), (src), (count) * sizeof(*(src))); \
- })
-
#define COND(bool, val) ((bool) ? (val) : 0)
+#define BIT(bit) (1u << (bit))
/* Whenever we generate an error, pass it through this function. Useful for
* debugging, where we can break on it. Only call at error site, not when
void
tu_loge(const char *format, ...) tu_printflike(1, 2);
void
-tu_loge_v(const char *format, va_list va);
-void
tu_logi(const char *format, ...) tu_printflike(1, 2);
-void
-tu_logi_v(const char *format, va_list va);
/**
* Print a FINISHME message, including its source location.
} \
} while (0)
-/* A non-fatal assert. Useful for debugging. */
-#ifdef DEBUG
-#define tu_assert(x) \
- ({ \
- if (unlikely(!(x))) \
- fprintf(stderr, "%s:%d ASSERT: %s\n", __FILE__, __LINE__, #x); \
- })
-#else
-#define tu_assert(x)
-#endif
-
-/* Suppress -Wunused in stub functions */
-#define tu_use_args(...) __tu_use_args(0, ##__VA_ARGS__)
-static inline void
-__tu_use_args(int ignore, ...)
-{
-}
-
#define tu_stub() \
do { \
tu_finishme("stub %s", __func__); \
struct tu_physical_device
{
- VK_LOADER_DATA _loader_data;
+ struct vk_object_base base;
struct tu_instance *instance;
uint32_t SP_UNKNOWN_A0F8;
} magic;
+ int msm_major_version;
+ int msm_minor_version;
+
+ bool limited_z24s8;
+
/* This is the drivers on-disk cache used as a fallback as opposed to
* the pipeline cache defined by apps.
*/
TU_DEBUG_NOBIN = 1 << 3,
TU_DEBUG_SYSMEM = 1 << 4,
TU_DEBUG_FORCEBIN = 1 << 5,
+ TU_DEBUG_NOUBWC = 1 << 6,
};
struct tu_instance
{
- VK_LOADER_DATA _loader_data;
+ struct vk_object_base base;
VkAllocationCallbacks alloc;
struct tu_pipeline_cache
{
+ struct vk_object_base base;
+
struct tu_device *device;
pthread_mutex_t mutex;
{
};
-void
-tu_pipeline_cache_init(struct tu_pipeline_cache *cache,
- struct tu_device *device);
-void
-tu_pipeline_cache_finish(struct tu_pipeline_cache *cache);
-void
-tu_pipeline_cache_load(struct tu_pipeline_cache *cache,
- const void *data,
- size_t size);
-
-struct tu_shader_variant;
-
-bool
-tu_create_shader_variants_from_pipeline_cache(
- struct tu_device *device,
- struct tu_pipeline_cache *cache,
- const unsigned char *sha1,
- struct tu_shader_variant **variants);
-
-void
-tu_pipeline_cache_insert_shaders(struct tu_device *device,
- struct tu_pipeline_cache *cache,
- const unsigned char *sha1,
- struct tu_shader_variant **variants,
- const void *const *codes,
- const unsigned *code_sizes);
-
-struct tu_meta_state
-{
- VkAllocationCallbacks alloc;
-
- struct tu_pipeline_cache cache;
-};
/* queue types */
#define TU_QUEUE_GENERAL 0
struct tu_fence
{
+ struct vk_object_base base;
struct wsi_fence *fence_wsi;
bool signaled;
int fd;
struct tu_queue
{
- VK_LOADER_DATA _loader_data;
+ struct vk_object_base base;
+
struct tu_device *device;
uint32_t queue_family_index;
int queue_idx;
void *map;
};
-struct tu_device
+enum global_shader {
+ GLOBAL_SH_VS,
+ GLOBAL_SH_FS_BLIT,
+ GLOBAL_SH_FS_CLEAR0,
+ GLOBAL_SH_FS_CLEAR_MAX = GLOBAL_SH_FS_CLEAR0 + MAX_RTS,
+ GLOBAL_SH_COUNT,
+};
+
+#define TU_BORDER_COLOR_COUNT 4096
+#define TU_BORDER_COLOR_BUILTIN 6
+
+/* This struct defines the layout of the global_bo */
+struct tu6_global
{
- VK_LOADER_DATA _loader_data;
+ /* clear/blit shaders, all <= 16 instrs (16 instr = 1 instrlen unit) */
+ instr_t shaders[GLOBAL_SH_COUNT][16];
- VkAllocationCallbacks alloc;
+ uint32_t seqno_dummy; /* dummy seqno for CP_EVENT_WRITE */
+ uint32_t _pad0;
+ volatile uint32_t vsc_draw_overflow;
+ uint32_t _pad1;
+ volatile uint32_t vsc_prim_overflow;
+ uint32_t _pad2;
+ uint64_t predicate;
- struct tu_instance *instance;
+ /* scratch space for VPC_SO[i].FLUSH_BASE_LO/HI, start on 32 byte boundary. */
+ struct {
+ uint32_t offset;
+ uint32_t pad[7];
+ } flush_base[4];
- struct tu_meta_state meta_state;
+ /* note: larger global bo will be used for customBorderColors */
+ struct bcolor_entry bcolor_builtin[TU_BORDER_COLOR_BUILTIN], bcolor[];
+};
+#define gb_offset(member) offsetof(struct tu6_global, member)
+#define global_iova(cmd, member) ((cmd)->device->global_bo.iova + gb_offset(member))
+
+void tu_init_clear_blit_shaders(struct tu6_global *global);
+
+/* extra space in vsc draw/prim streams */
+#define VSC_PAD 0x40
+
+struct tu_device
+{
+ struct vk_device vk;
+ struct tu_instance *instance;
struct tu_queue *queues[TU_MAX_QUEUE_FAMILIES];
int queue_count[TU_MAX_QUEUE_FAMILIES];
struct tu_physical_device *physical_device;
+ int _lost;
struct ir3_compiler *compiler;
/* Backup in-memory cache to be used if the app doesn't provide one */
struct tu_pipeline_cache *mem_cache;
- struct tu_bo vsc_draw_strm;
- struct tu_bo vsc_prim_strm;
- uint32_t vsc_draw_strm_pitch;
- uint32_t vsc_prim_strm_pitch;
+#define MIN_SCRATCH_BO_SIZE_LOG2 12 /* A page */
- struct tu_bo border_color;
+ /* Currently the kernel driver uses a 32-bit GPU address space, but it
+ * should be impossible to go beyond 48 bits.
+ */
+ struct {
+ struct tu_bo bo;
+ mtx_t construct_mtx;
+ bool initialized;
+ } scratch_bos[48 - MIN_SCRATCH_BO_SIZE_LOG2];
- struct list_head shader_slabs;
- mtx_t shader_slab_mutex;
+ struct tu_bo global_bo;
struct tu_device_extension_table enabled_extensions;
+
+ uint32_t vsc_draw_strm_pitch;
+ uint32_t vsc_prim_strm_pitch;
+ BITSET_DECLARE(custom_border_color, TU_BORDER_COLOR_COUNT);
+ mtx_t mutex;
};
+VkResult _tu_device_set_lost(struct tu_device *device,
+ const char *file, int line,
+ const char *msg, ...) PRINTFLIKE(4, 5);
+#define tu_device_set_lost(dev, ...) \
+ _tu_device_set_lost(dev, __FILE__, __LINE__, __VA_ARGS__)
+
+static inline bool
+tu_device_is_lost(struct tu_device *device)
+{
+ return unlikely(p_atomic_read(&device->_lost));
+}
+
VkResult
tu_bo_init_new(struct tu_device *dev, struct tu_bo *bo, uint64_t size);
VkResult
VkResult
tu_bo_map(struct tu_device *dev, struct tu_bo *bo);
+/* Get a scratch bo for use inside a command buffer. This will always return
+ * the same bo given the same size or similar sizes, so only one scratch bo
+ * can be used at the same time. It's meant for short-lived things where we
+ * need to write to some piece of memory, read from it, and then immediately
+ * discard it.
+ */
+VkResult
+tu_get_scratch_bo(struct tu_device *dev, uint64_t size, struct tu_bo **bo);
+
struct tu_cs_entry
{
/* No ownership */
uint32_t offset;
};
-struct ts_cs_memory {
+struct tu_cs_memory {
uint32_t *map;
uint64_t iova;
};
+struct tu_draw_state {
+ uint64_t iova : 48;
+ uint32_t size : 16;
+};
+
+enum tu_dynamic_state
+{
+ /* re-use VK_DYNAMIC_STATE_ enums for non-extended dynamic states */
+ TU_DYNAMIC_STATE_SAMPLE_LOCATIONS = VK_DYNAMIC_STATE_STENCIL_REFERENCE + 1,
+ TU_DYNAMIC_STATE_COUNT,
+};
+
+enum tu_draw_state_group_id
+{
+ TU_DRAW_STATE_PROGRAM,
+ TU_DRAW_STATE_PROGRAM_BINNING,
+ TU_DRAW_STATE_TESS,
+ TU_DRAW_STATE_VB,
+ TU_DRAW_STATE_VI,
+ TU_DRAW_STATE_VI_BINNING,
+ TU_DRAW_STATE_RAST,
+ TU_DRAW_STATE_DS,
+ TU_DRAW_STATE_BLEND,
+ TU_DRAW_STATE_VS_CONST,
+ TU_DRAW_STATE_HS_CONST,
+ TU_DRAW_STATE_DS_CONST,
+ TU_DRAW_STATE_GS_CONST,
+ TU_DRAW_STATE_FS_CONST,
+ TU_DRAW_STATE_DESC_SETS,
+ TU_DRAW_STATE_DESC_SETS_LOAD,
+ TU_DRAW_STATE_VS_PARAMS,
+ TU_DRAW_STATE_INPUT_ATTACHMENTS_GMEM,
+ TU_DRAW_STATE_INPUT_ATTACHMENTS_SYSMEM,
+
+ /* dynamic state related draw states */
+ TU_DRAW_STATE_DYNAMIC,
+ TU_DRAW_STATE_COUNT = TU_DRAW_STATE_DYNAMIC + TU_DYNAMIC_STATE_COUNT,
+};
+
enum tu_cs_mode
{
struct tu_device_memory
{
+ struct vk_object_base base;
+
struct tu_bo bo;
VkDeviceSize size;
struct tu_descriptor_set
{
+ struct vk_object_base base;
+
const struct tu_descriptor_set_layout *layout;
struct tu_descriptor_pool *pool;
uint32_t size;
struct tu_descriptor_pool
{
+ struct vk_object_base base;
+
struct tu_bo bo;
uint64_t current_offset;
uint64_t size;
struct tu_descriptor_update_template
{
+ struct vk_object_base base;
+
uint32_t entry_count;
struct tu_descriptor_update_template_entry entry[0];
};
struct tu_buffer
{
+ struct vk_object_base base;
+
VkDeviceSize size;
VkBufferUsageFlags usage;
return buffer->bo->iova + buffer->bo_offset;
}
-enum tu_dynamic_state_bits
-{
- TU_DYNAMIC_VIEWPORT = 1 << 0,
- TU_DYNAMIC_SCISSOR = 1 << 1,
- TU_DYNAMIC_LINE_WIDTH = 1 << 2,
- TU_DYNAMIC_DEPTH_BIAS = 1 << 3,
- TU_DYNAMIC_BLEND_CONSTANTS = 1 << 4,
- TU_DYNAMIC_DEPTH_BOUNDS = 1 << 5,
- TU_DYNAMIC_STENCIL_COMPARE_MASK = 1 << 6,
- TU_DYNAMIC_STENCIL_WRITE_MASK = 1 << 7,
- TU_DYNAMIC_STENCIL_REFERENCE = 1 << 8,
- TU_DYNAMIC_DISCARD_RECTANGLE = 1 << 9,
- TU_DYNAMIC_SAMPLE_LOCATIONS = 1 << 10,
- TU_DYNAMIC_ALL = (1 << 11) - 1,
-};
-
struct tu_vertex_binding
{
struct tu_buffer *buffer;
VkDeviceSize offset;
};
-struct tu_viewport_state
-{
- uint32_t count;
- VkViewport viewports[MAX_VIEWPORTS];
-};
-
-struct tu_scissor_state
-{
- uint32_t count;
- VkRect2D scissors[MAX_SCISSORS];
-};
-
-struct tu_discard_rectangle_state
-{
- uint32_t count;
- VkRect2D rectangles[MAX_DISCARD_RECTANGLES];
-};
-
-struct tu_dynamic_state
-{
- /**
- * Bitmask of (1 << VK_DYNAMIC_STATE_*).
- * Defines the set of saved dynamic state.
- */
- uint32_t mask;
-
- struct tu_viewport_state viewport;
-
- struct tu_scissor_state scissor;
-
- float line_width;
-
- struct
- {
- float bias;
- float clamp;
- float slope;
- } depth_bias;
-
- float blend_constants[4];
-
- struct
- {
- float min;
- float max;
- } depth_bounds;
-
- struct
- {
- uint32_t front;
- uint32_t back;
- } stencil_compare_mask;
-
- struct
- {
- uint32_t front;
- uint32_t back;
- } stencil_write_mask;
-
- struct
- {
- uint32_t front;
- uint32_t back;
- } stencil_reference;
-
- struct tu_discard_rectangle_state discard_rectangle;
-};
-
-extern const struct tu_dynamic_state default_dynamic_state;
-
const char *
tu_get_debug_option_name(int id);
struct tu_descriptor_state
{
struct tu_descriptor_set *sets[MAX_SETS];
- uint32_t valid;
- struct tu_push_descriptor_set push_set;
- bool push_dirty;
uint32_t dynamic_descriptors[MAX_DYNAMIC_BUFFERS * A6XX_TEX_CONST_DWORDS];
- uint32_t input_attachments[MAX_RTS * A6XX_TEX_CONST_DWORDS];
-};
-
-struct tu_tile
-{
- uint8_t pipe;
- uint8_t slot;
- VkOffset2D begin;
- VkOffset2D end;
};
-struct tu_tiling_config
+enum tu_cmd_dirty_bits
{
- VkRect2D render_area;
-
- /* position and size of the first tile */
- VkRect2D tile0;
- /* number of tiles */
- VkExtent2D tile_count;
+ TU_CMD_DIRTY_VERTEX_BUFFERS = 1 << 2,
+ TU_CMD_DIRTY_DESC_SETS_LOAD = 1 << 3,
+ TU_CMD_DIRTY_COMPUTE_DESC_SETS_LOAD = 1 << 4,
+ TU_CMD_DIRTY_SHADER_CONSTS = 1 << 5,
+ /* all draw states were disabled and need to be re-enabled: */
+ TU_CMD_DIRTY_DRAW_STATE = 1 << 7,
+};
+
+/* There are only three cache domains we have to care about: the CCU, or
+ * color cache unit, which is used for color and depth/stencil attachments
+ * and copy/blit destinations, and is split conceptually into color and depth,
+ * and the universal cache or UCHE which is used for pretty much everything
+ * else, except for the CP (uncached) and host. We need to flush whenever data
+ * crosses these boundaries.
+ */
- /* size of the first VSC pipe */
- VkExtent2D pipe0;
- /* number of VSC pipes */
- VkExtent2D pipe_count;
+enum tu_cmd_access_mask {
+ TU_ACCESS_UCHE_READ = 1 << 0,
+ TU_ACCESS_UCHE_WRITE = 1 << 1,
+ TU_ACCESS_CCU_COLOR_READ = 1 << 2,
+ TU_ACCESS_CCU_COLOR_WRITE = 1 << 3,
+ TU_ACCESS_CCU_DEPTH_READ = 1 << 4,
+ TU_ACCESS_CCU_DEPTH_WRITE = 1 << 5,
+
+ /* Experiments have shown that while it's safe to avoid flushing the CCU
+ * after each blit/renderpass, it's not safe to assume that subsequent
+ * lookups with a different attachment state will hit unflushed cache
+ * entries. That is, the CCU needs to be flushed and possibly invalidated
+ * when accessing memory with a different attachment state. Writing to an
+ * attachment under the following conditions after clearing using the
+ * normal 2d engine path is known to have issues:
+ *
+ * - It isn't the 0'th layer.
+ * - There are more than one attachment, and this isn't the 0'th attachment
+ * (this seems to also depend on the cpp of the attachments).
+ *
+ * Our best guess is that the layer/MRT state is used when computing
+ * the location of a cache entry in CCU, to avoid conflicts. We assume that
+ * any access in a renderpass after or before an access by a transfer needs
+ * a flush/invalidate, and use the _INCOHERENT variants to represent access
+ * by a transfer.
+ */
+ TU_ACCESS_CCU_COLOR_INCOHERENT_READ = 1 << 6,
+ TU_ACCESS_CCU_COLOR_INCOHERENT_WRITE = 1 << 7,
+ TU_ACCESS_CCU_DEPTH_INCOHERENT_READ = 1 << 8,
+ TU_ACCESS_CCU_DEPTH_INCOHERENT_WRITE = 1 << 9,
- /* pipe register values */
- uint32_t pipe_config[MAX_VSC_PIPES];
- uint32_t pipe_sizes[MAX_VSC_PIPES];
+ /* Accesses by the host */
+ TU_ACCESS_HOST_READ = 1 << 10,
+ TU_ACCESS_HOST_WRITE = 1 << 11,
- /* Whether sysmem rendering must be used */
- bool force_sysmem;
-};
+ /* Accesses by a GPU engine which bypasses any cache. e.g. writes via
+ * CP_EVENT_WRITE::BLIT and the CP are SYSMEM_WRITE.
+ */
+ TU_ACCESS_SYSMEM_READ = 1 << 12,
+ TU_ACCESS_SYSMEM_WRITE = 1 << 13,
+
+ /* Set if a WFI is required. This can be required for:
+ * - 2D engine which (on some models) doesn't wait for flushes to complete
+ * before starting
+ * - CP draw indirect opcodes, where we need to wait for any flushes to
+ * complete but the CP implicitly waits for WFI's to complete and
+ * therefore we only need a WFI after the flushes.
+ */
+ TU_ACCESS_WFI_READ = 1 << 14,
-enum tu_cmd_dirty_bits
-{
- TU_CMD_DIRTY_PIPELINE = 1 << 0,
- TU_CMD_DIRTY_COMPUTE_PIPELINE = 1 << 1,
- TU_CMD_DIRTY_VERTEX_BUFFERS = 1 << 2,
- TU_CMD_DIRTY_DESCRIPTOR_SETS = 1 << 3,
- TU_CMD_DIRTY_COMPUTE_DESCRIPTOR_SETS = 1 << 4,
- TU_CMD_DIRTY_PUSH_CONSTANTS = 1 << 5,
- TU_CMD_DIRTY_STREAMOUT_BUFFERS = 1 << 6,
- TU_CMD_DIRTY_INPUT_ATTACHMENTS = 1 << 7,
+ /* Set if a CP_WAIT_FOR_ME is required due to the data being read by the CP
+ * without it waiting for any WFI.
+ */
+ TU_ACCESS_WFM_READ = 1 << 15,
- TU_CMD_DIRTY_DYNAMIC_LINE_WIDTH = 1 << 16,
- TU_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK = 1 << 17,
- TU_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK = 1 << 18,
- TU_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE = 1 << 19,
- TU_CMD_DIRTY_DYNAMIC_VIEWPORT = 1 << 20,
- TU_CMD_DIRTY_DYNAMIC_SCISSOR = 1 << 21,
+ /* Memory writes from the CP start in-order with draws and event writes,
+ * but execute asynchronously and hence need a CP_WAIT_MEM_WRITES if read.
+ */
+ TU_ACCESS_CP_WRITE = 1 << 16,
+
+ TU_ACCESS_READ =
+ TU_ACCESS_UCHE_READ |
+ TU_ACCESS_CCU_COLOR_READ |
+ TU_ACCESS_CCU_DEPTH_READ |
+ TU_ACCESS_CCU_COLOR_INCOHERENT_READ |
+ TU_ACCESS_CCU_DEPTH_INCOHERENT_READ |
+ TU_ACCESS_HOST_READ |
+ TU_ACCESS_SYSMEM_READ |
+ TU_ACCESS_WFI_READ |
+ TU_ACCESS_WFM_READ,
+
+ TU_ACCESS_WRITE =
+ TU_ACCESS_UCHE_WRITE |
+ TU_ACCESS_CCU_COLOR_WRITE |
+ TU_ACCESS_CCU_COLOR_INCOHERENT_WRITE |
+ TU_ACCESS_CCU_DEPTH_WRITE |
+ TU_ACCESS_CCU_DEPTH_INCOHERENT_WRITE |
+ TU_ACCESS_HOST_WRITE |
+ TU_ACCESS_SYSMEM_WRITE |
+ TU_ACCESS_CP_WRITE,
+
+ TU_ACCESS_ALL =
+ TU_ACCESS_READ |
+ TU_ACCESS_WRITE,
+};
+
+enum tu_cmd_flush_bits {
+ TU_CMD_FLAG_CCU_FLUSH_DEPTH = 1 << 0,
+ TU_CMD_FLAG_CCU_FLUSH_COLOR = 1 << 1,
+ TU_CMD_FLAG_CCU_INVALIDATE_DEPTH = 1 << 2,
+ TU_CMD_FLAG_CCU_INVALIDATE_COLOR = 1 << 3,
+ TU_CMD_FLAG_CACHE_FLUSH = 1 << 4,
+ TU_CMD_FLAG_CACHE_INVALIDATE = 1 << 5,
+ TU_CMD_FLAG_WAIT_MEM_WRITES = 1 << 6,
+ TU_CMD_FLAG_WAIT_FOR_IDLE = 1 << 7,
+ TU_CMD_FLAG_WAIT_FOR_ME = 1 << 8,
+
+ TU_CMD_FLAG_ALL_FLUSH =
+ TU_CMD_FLAG_CCU_FLUSH_DEPTH |
+ TU_CMD_FLAG_CCU_FLUSH_COLOR |
+ TU_CMD_FLAG_CACHE_FLUSH |
+ /* Treat the CP as a sort of "cache" which may need to be "flushed" via
+ * waiting for writes to land with WAIT_FOR_MEM_WRITES.
+ */
+ TU_CMD_FLAG_WAIT_MEM_WRITES,
+
+ TU_CMD_FLAG_GPU_INVALIDATE =
+ TU_CMD_FLAG_CCU_INVALIDATE_DEPTH |
+ TU_CMD_FLAG_CCU_INVALIDATE_COLOR |
+ TU_CMD_FLAG_CACHE_INVALIDATE,
+
+ TU_CMD_FLAG_ALL_INVALIDATE =
+ TU_CMD_FLAG_GPU_INVALIDATE |
+ /* Treat the CP as a sort of "cache" which may need to be "invalidated"
+ * via waiting for UCHE/CCU flushes to land with WFI/WFM.
+ */
+ TU_CMD_FLAG_WAIT_FOR_IDLE |
+ TU_CMD_FLAG_WAIT_FOR_ME,
+};
+
+/* Changing the CCU from sysmem mode to gmem mode or vice-versa is pretty
+ * heavy, involving a CCU cache flush/invalidate and a WFI in order to change
+ * which part of the gmem is used by the CCU. Here we keep track of what the
+ * state of the CCU.
+ */
+enum tu_cmd_ccu_state {
+ TU_CMD_CCU_SYSMEM,
+ TU_CMD_CCU_GMEM,
+ TU_CMD_CCU_UNKNOWN,
};
-struct tu_streamout_state {
- uint16_t stride[IR3_MAX_SO_BUFFERS];
- uint32_t ncomp[IR3_MAX_SO_BUFFERS];
- uint32_t prog[IR3_MAX_SO_OUTPUTS * 2];
- uint32_t prog_count;
- uint32_t vpc_so_buf_cntl;
+struct tu_cache_state {
+ /* Caches which must be made available (flushed) eventually if there are
+ * any users outside that cache domain, and caches which must be
+ * invalidated eventually if there are any reads.
+ */
+ enum tu_cmd_flush_bits pending_flush_bits;
+ /* Pending flushes */
+ enum tu_cmd_flush_bits flush_bits;
};
struct tu_cmd_state
VkDeviceSize offsets[MAX_VBS];
} vb;
- struct tu_dynamic_state dynamic;
+ /* for dynamic states that can't be emitted directly */
+ uint32_t dynamic_stencil_mask;
+ uint32_t dynamic_stencil_wrmask;
+ uint32_t dynamic_stencil_ref;
+ uint32_t dynamic_gras_su_cntl;
- /* Stream output buffers */
- struct
- {
- struct tu_buffer *buffers[IR3_MAX_SO_BUFFERS];
- VkDeviceSize offsets[IR3_MAX_SO_BUFFERS];
- VkDeviceSize sizes[IR3_MAX_SO_BUFFERS];
- } streamout_buf;
+ /* saved states to re-emit in TU_CMD_DIRTY_DRAW_STATE case */
+ struct tu_draw_state dynamic_state[TU_DYNAMIC_STATE_COUNT];
+ struct tu_draw_state vertex_buffers;
+ struct tu_draw_state shader_const[MESA_SHADER_STAGES];
+ struct tu_draw_state desc_sets;
- uint8_t streamout_reset;
- uint8_t streamout_enabled;
+ struct tu_draw_state vs_params;
/* Index buffer */
- struct tu_buffer *index_buffer;
- uint64_t index_offset;
- uint32_t index_type;
- uint32_t max_index_count;
uint64_t index_va;
+ uint32_t max_index_count;
+ uint8_t index_size;
+
+ /* because streamout base has to be 32-byte aligned
+ * there is an extra offset to deal with when it is
+ * unaligned
+ */
+ uint8_t streamout_offset[IR3_MAX_SO_BUFFERS];
+
+ /* Renderpasses are tricky, because we may need to flush differently if
+ * using sysmem vs. gmem and therefore we have to delay any flushing that
+ * happens before a renderpass. So we have to have two copies of the flush
+ * state, one for intra-renderpass flushes (i.e. renderpass dependencies)
+ * and one for outside a renderpass.
+ */
+ struct tu_cache_state cache;
+ struct tu_cache_state renderpass_cache;
+
+ enum tu_cmd_ccu_state ccu_state;
const struct tu_render_pass *pass;
const struct tu_subpass *subpass;
const struct tu_framebuffer *framebuffer;
-
- struct tu_tiling_config tiling_config;
+ VkRect2D render_area;
struct tu_cs_entry tile_store_ib;
+
+ bool xfb_used;
+ bool has_tess;
+ bool has_subpass_predication;
+ bool predication_active;
};
struct tu_cmd_pool
{
+ struct vk_object_base base;
+
VkAllocationCallbacks alloc;
struct list_head cmd_buffers;
struct list_head free_cmd_buffers;
TU_CMD_BUFFER_STATUS_PENDING,
};
+#ifndef MSM_SUBMIT_BO_READ
+#define MSM_SUBMIT_BO_READ 0x0001
+#define MSM_SUBMIT_BO_WRITE 0x0002
+#define MSM_SUBMIT_BO_DUMP 0x0004
+
+struct drm_msm_gem_submit_bo {
+ uint32_t flags; /* in, mask of MSM_SUBMIT_BO_x */
+ uint32_t handle; /* in, GEM handle */
+ uint64_t presumed; /* in/out, presumed buffer address */
+};
+#endif
+
struct tu_bo_list
{
uint32_t count;
VkResult
tu_bo_list_merge(struct tu_bo_list *list, const struct tu_bo_list *other);
-/* This struct defines the layout of the scratch_bo */
-struct tu6_control
-{
- uint32_t seqno; /* seqno for async CP_EVENT_WRITE, etc */
- uint32_t _pad0;
- volatile uint32_t vsc_overflow;
- uint32_t _pad1;
- /* flag set from cmdstream when VSC overflow detected: */
- uint32_t vsc_scratch;
- uint32_t _pad2;
- uint32_t _pad3;
- uint32_t _pad4;
-
- /* scratch space for VPC_SO[i].FLUSH_BASE_LO/HI, start on 32 byte boundary. */
- struct {
- uint32_t offset;
- uint32_t pad[7];
- } flush_base[4];
-};
-
-#define ctrl_offset(member) offsetof(struct tu6_control, member)
-
struct tu_cmd_buffer
{
- VK_LOADER_DATA _loader_data;
+ struct vk_object_base base;
struct tu_device *device;
struct tu_cmd_state state;
struct tu_vertex_binding vertex_bindings[MAX_VBS];
+ uint32_t vertex_bindings_set;
uint32_t queue_family_index;
uint32_t push_constants[MAX_PUSH_CONSTANTS_SIZE / 4];
VkShaderStageFlags push_constant_stages;
struct tu_descriptor_set meta_push_descriptors;
- struct tu_descriptor_state descriptors[VK_PIPELINE_BIND_POINT_RANGE_SIZE];
+ struct tu_descriptor_state descriptors[MAX_BIND_POINTS];
struct tu_cmd_buffer_upload upload;
struct tu_cs draw_epilogue_cs;
struct tu_cs sub_cs;
- struct tu_bo scratch_bo;
- uint32_t scratch_seqno;
-
- struct tu_bo vsc_draw_strm;
- struct tu_bo vsc_prim_strm;
uint32_t vsc_draw_strm_pitch;
uint32_t vsc_prim_strm_pitch;
- bool use_vsc_data;
-
- bool wait_for_idle;
};
/* Temporary struct for tracking a register state to be written, used by
uint32_t bo_shift;
};
-unsigned
+
+void tu_emit_cache_flush_renderpass(struct tu_cmd_buffer *cmd_buffer,
+ struct tu_cs *cs);
+
+void tu_emit_cache_flush_ccu(struct tu_cmd_buffer *cmd_buffer,
+ struct tu_cs *cs,
+ enum tu_cmd_ccu_state ccu_state);
+
+void
tu6_emit_event_write(struct tu_cmd_buffer *cmd,
struct tu_cs *cs,
- enum vgt_event_type event,
- bool need_seqno);
-
-bool
-tu_get_memory_fd(struct tu_device *device,
- struct tu_device_memory *memory,
- int *pFD);
+ enum vgt_event_type event);
static inline struct tu_descriptor_state *
tu_get_descriptors_state(struct tu_cmd_buffer *cmd_buffer,
return &cmd_buffer->descriptors[bind_point];
}
-/*
- * Takes x,y,z as exact numbers of invocations, instead of blocks.
- *
- * Limitations: Can't call normal dispatch functions without binding or
- * rebinding
- * the compute pipeline.
- */
-void
-tu_unaligned_dispatch(struct tu_cmd_buffer *cmd_buffer,
- uint32_t x,
- uint32_t y,
- uint32_t z);
-
struct tu_event
{
+ struct vk_object_base base;
struct tu_bo bo;
};
-struct tu_shader_module;
-
-#define TU_HASH_SHADER_IS_GEOM_COPY_SHADER (1 << 0)
-#define TU_HASH_SHADER_SISCHED (1 << 1)
-#define TU_HASH_SHADER_UNSAFE_MATH (1 << 2)
-void
-tu_hash_shaders(unsigned char *hash,
- const VkPipelineShaderStageCreateInfo **stages,
- const struct tu_pipeline_layout *layout,
- const struct tu_pipeline_key *key,
- uint32_t flags);
-
-static inline gl_shader_stage
-vk_to_mesa_shader_stage(VkShaderStageFlagBits vk_stage)
-{
- assert(__builtin_popcount(vk_stage) == 1);
- return ffs(vk_stage) - 1;
-}
-
-static inline VkShaderStageFlagBits
-mesa_to_vk_shader_stage(gl_shader_stage mesa_stage)
-{
- return (1 << mesa_stage);
-}
-
-#define TU_STAGE_MASK ((1 << MESA_SHADER_STAGES) - 1)
-
-#define tu_foreach_stage(stage, stage_bits) \
- for (gl_shader_stage stage, \
- __tmp = (gl_shader_stage)((stage_bits) &TU_STAGE_MASK); \
- stage = __builtin_ffs(__tmp) - 1, __tmp; __tmp &= ~(1 << (stage)))
-
struct tu_shader_module
{
+ struct vk_object_base base;
+
unsigned char sha1[20];
uint32_t code_size;
const uint32_t *code[0];
};
-struct tu_shader_compile_options
-{
- struct ir3_shader_key key;
-
- bool optimize;
- bool include_binning_pass;
-};
-
struct tu_push_constant_range
{
uint32_t lo;
struct tu_shader
{
- struct ir3_shader ir3_shader;
+ struct ir3_shader *ir3_shader;
struct tu_push_constant_range push_consts;
- unsigned attachment_idx[MAX_RTS];
-
- /* This may be true for vertex shaders. When true, variants[1] is the
- * binning variant and binning_binary is non-NULL.
- */
- bool has_binning_pass;
-
- void *binary;
- void *binning_binary;
-
- struct ir3_shader_variant variants[0];
+ uint8_t active_desc_sets;
};
struct tu_shader *
struct tu_shader *shader,
const VkAllocationCallbacks *alloc);
-void
-tu_shader_compile_options_init(
- struct tu_shader_compile_options *options,
- const VkGraphicsPipelineCreateInfo *pipeline_info);
-
-VkResult
-tu_shader_compile(struct tu_device *dev,
- struct tu_shader *shader,
- const struct tu_shader *next_stage,
- const struct tu_shader_compile_options *options,
- const VkAllocationCallbacks *alloc);
-
struct tu_program_descriptor_linkage
{
- struct ir3_ubo_analysis_state ubo_state;
struct ir3_const_state const_state;
uint32_t constlen;
struct tu_pipeline
{
- struct tu_cs cs;
+ struct vk_object_base base;
- struct tu_dynamic_state dynamic_state;
+ struct tu_cs cs;
struct tu_pipeline_layout *layout;
bool need_indirect_descriptor_sets;
VkShaderStageFlags active_stages;
+ uint32_t active_desc_sets;
+
+ /* mask of enabled dynamic states
+ * if BIT(i) is set, pipeline->dynamic_state[i] is *NOT* used
+ */
+ uint32_t dynamic_state_mask;
+ struct tu_draw_state dynamic_state[TU_DYNAMIC_STATE_COUNT];
- struct tu_streamout_state streamout;
+ /* gras_su_cntl without line width, used for dynamic line width state */
+ uint32_t gras_su_cntl;
+
+ /* draw states for the pipeline */
+ struct tu_draw_state load_state, rast_state, ds_state, blend_state;
struct
{
- struct tu_bo binary_bo;
- struct tu_cs_entry state_ib;
- struct tu_cs_entry binning_state_ib;
+ struct tu_draw_state state;
+ struct tu_draw_state binning_state;
struct tu_program_descriptor_linkage link[MESA_SHADER_STAGES];
- unsigned input_attachment_idx[MAX_RTS];
} program;
struct
{
- struct tu_cs_entry state_ib;
- } load_state;
-
- struct
- {
- uint8_t bindings[MAX_VERTEX_ATTRIBS];
- uint32_t count;
-
- uint8_t binning_bindings[MAX_VERTEX_ATTRIBS];
- uint32_t binning_count;
-
- struct tu_cs_entry state_ib;
- struct tu_cs_entry binning_state_ib;
+ struct tu_draw_state state;
+ struct tu_draw_state binning_state;
+ uint32_t bindings_used;
} vi;
struct
struct
{
- struct tu_cs_entry state_ib;
- } vp;
-
- struct
- {
- uint32_t gras_su_cntl;
- struct tu_cs_entry state_ib;
- } rast;
-
- struct
- {
- struct tu_cs_entry state_ib;
- } ds;
-
- struct
- {
- struct tu_cs_entry state_ib;
- } blend;
+ uint32_t patch_type;
+ uint32_t param_stride;
+ uint32_t hs_bo_regid;
+ uint32_t ds_bo_regid;
+ bool upper_left_domain_origin;
+ } tess;
struct
{
void
tu6_emit_sample_locations(struct tu_cs *cs, const VkSampleLocationsInfoEXT *samp_loc);
-void
-tu6_emit_gras_su_cntl(struct tu_cs *cs,
- uint32_t gras_su_cntl,
- float line_width);
-
void
tu6_emit_depth_bias(struct tu_cs *cs,
float constant_factor,
float clamp,
float slope_factor);
-void
-tu6_emit_stencil_compare_mask(struct tu_cs *cs,
- uint32_t front,
- uint32_t back);
+void tu6_emit_msaa(struct tu_cs *cs, VkSampleCountFlagBits samples);
-void
-tu6_emit_stencil_write_mask(struct tu_cs *cs, uint32_t front, uint32_t back);
+void tu6_emit_window_scissor(struct tu_cs *cs, uint32_t x1, uint32_t y1, uint32_t x2, uint32_t y2);
-void
-tu6_emit_stencil_reference(struct tu_cs *cs, uint32_t front, uint32_t back);
+void tu6_emit_window_offset(struct tu_cs *cs, uint32_t x1, uint32_t y1);
void
-tu6_emit_blend_constants(struct tu_cs *cs, const float constants[4]);
+tu6_emit_xs_config(struct tu_cs *cs,
+ gl_shader_stage stage,
+ const struct ir3_shader_variant *xs,
+ uint64_t binary_iova);
-void tu6_emit_msaa(struct tu_cs *cs, VkSampleCountFlagBits samples);
-
-void tu6_emit_window_scissor(struct tu_cs *cs, uint32_t x1, uint32_t y1, uint32_t x2, uint32_t y2);
+void
+tu6_emit_vpc(struct tu_cs *cs,
+ const struct ir3_shader_variant *vs,
+ const struct ir3_shader_variant *hs,
+ const struct ir3_shader_variant *ds,
+ const struct ir3_shader_variant *gs,
+ const struct ir3_shader_variant *fs,
+ uint32_t patch_control_points,
+ bool vshs_workgroup);
-void tu6_emit_window_offset(struct tu_cs *cs, uint32_t x1, uint32_t y1);
+void
+tu6_emit_fs_inputs(struct tu_cs *cs, const struct ir3_shader_variant *fs);
struct tu_image_view;
uint32_t a,
uint32_t gmem_a);
-struct tu_userdata_info *
-tu_lookup_user_sgpr(struct tu_pipeline *pipeline,
- gl_shader_stage stage,
- int idx);
-
-struct tu_shader_variant *
-tu_get_shader(struct tu_pipeline *pipeline, gl_shader_stage stage);
-
-struct tu_graphics_pipeline_create_info
-{
- bool use_rectlist;
- bool db_depth_clear;
- bool db_stencil_clear;
- bool db_depth_disable_expclear;
- bool db_stencil_disable_expclear;
- bool db_flush_depth_inplace;
- bool db_flush_stencil_inplace;
- bool db_resummarize;
- uint32_t custom_blend_mode;
-};
-
enum tu_supported_formats {
FMT_VERTEX = 1,
FMT_TEXTURE = 2,
return tu6_format_color(format, TILE6_LINEAR).fmt;
}
-enum a6xx_depth_format tu6_pipe2depth(VkFormat format);
-
struct tu_image
{
+ struct vk_object_base base;
+
VkImageType type;
/* The original VkFormat provided by the client. This may not match any
* of the actual surface formats.
uint32_t layer_count;
VkSampleCountFlagBits samples;
- struct fdl_layout layout;
+ struct fdl_layout layout[3];
+ uint32_t total_size;
unsigned queue_family_mask;
bool exclusive;
VkDeviceSize bo_offset;
};
-unsigned
-tu_image_queue_family_mask(const struct tu_image *image,
- uint32_t family,
- uint32_t queue_family);
-
static inline uint32_t
tu_get_layerCount(const struct tu_image *image,
const VkImageSubresourceRange *range)
: range->levelCount;
}
-enum a3xx_msaa_samples
-tu_msaa_samples(uint32_t samples);
-enum a6xx_tex_fetchsize
-tu6_fetchsize(VkFormat format);
-
struct tu_image_view
{
+ struct vk_object_base base;
+
struct tu_image *image; /**< VkImageViewCreateInfo::image */
uint64_t base_addr;
uint32_t RB_2D_DST_INFO;
uint32_t RB_BLIT_DST_INFO;
+
+ /* for d32s8 separate stencil */
+ uint64_t stencil_base_addr;
+ uint32_t stencil_layer_size;
+ uint32_t stencil_PITCH;
+};
+
+struct tu_sampler_ycbcr_conversion {
+ struct vk_object_base base;
+
+ VkFormat format;
+ VkSamplerYcbcrModelConversion ycbcr_model;
+ VkSamplerYcbcrRange ycbcr_range;
+ VkComponentMapping components;
+ VkChromaLocation chroma_offsets[2];
+ VkFilter chroma_filter;
};
struct tu_sampler {
+ struct vk_object_base base;
+
uint32_t descriptor[A6XX_TEX_SAMP_DWORDS];
+ struct tu_sampler_ycbcr_conversion *ycbcr_sampler;
};
void
void
tu_cs_image_flag_ref(struct tu_cs *cs, const struct tu_image_view *iview, uint32_t layer);
+void
+tu_cs_image_stencil_ref(struct tu_cs *cs, const struct tu_image_view *iview, uint32_t layer);
+
+#define tu_image_view_stencil(iview, x) \
+ ((iview->x & ~A6XX_##x##_COLOR_FORMAT__MASK) | A6XX_##x##_COLOR_FORMAT(FMT6_8_UINT))
+
VkResult
tu_image_create(VkDevice _device,
const VkImageCreateInfo *pCreateInfo,
const VkAllocationCallbacks *alloc,
VkImage *pImage,
- uint64_t modifier);
+ uint64_t modifier,
+ const VkSubresourceLayout *plane_layouts);
VkResult
tu_image_from_gralloc(VkDevice device_h,
VkImage *out_image_h);
void
-tu_image_view_init(struct tu_image_view *view,
- const VkImageViewCreateInfo *pCreateInfo);
+tu_image_view_init(struct tu_image_view *iview,
+ const VkImageViewCreateInfo *pCreateInfo,
+ bool limited_z24s8);
struct tu_buffer_view
{
+ struct vk_object_base base;
+
uint32_t descriptor[A6XX_TEX_CONST_DWORDS];
struct tu_buffer *buffer;
struct tu_device *device,
const VkBufferViewCreateInfo *pCreateInfo);
-static inline struct VkExtent3D
-tu_sanitize_image_extent(const VkImageType imageType,
- const struct VkExtent3D imageExtent)
-{
- switch (imageType) {
- case VK_IMAGE_TYPE_1D:
- return (VkExtent3D) { imageExtent.width, 1, 1 };
- case VK_IMAGE_TYPE_2D:
- return (VkExtent3D) { imageExtent.width, imageExtent.height, 1 };
- case VK_IMAGE_TYPE_3D:
- return imageExtent;
- default:
- unreachable("invalid image type");
- }
-}
-
-static inline struct VkOffset3D
-tu_sanitize_image_offset(const VkImageType imageType,
- const struct VkOffset3D imageOffset)
-{
- switch (imageType) {
- case VK_IMAGE_TYPE_1D:
- return (VkOffset3D) { imageOffset.x, 0, 0 };
- case VK_IMAGE_TYPE_2D:
- return (VkOffset3D) { imageOffset.x, imageOffset.y, 0 };
- case VK_IMAGE_TYPE_3D:
- return imageOffset;
- default:
- unreachable("invalid image type");
- }
-}
-
struct tu_attachment_info
{
struct tu_image_view *attachment;
struct tu_framebuffer
{
+ struct vk_object_base base;
+
uint32_t width;
uint32_t height;
uint32_t layers;
+ /* size of the first tile */
+ VkExtent2D tile0;
+ /* number of tiles */
+ VkExtent2D tile_count;
+
+ /* size of the first VSC pipe */
+ VkExtent2D pipe0;
+ /* number of VSC pipes */
+ VkExtent2D pipe_count;
+
+ /* pipe register values */
+ uint32_t pipe_config[MAX_VSC_PIPES];
+ uint32_t pipe_sizes[MAX_VSC_PIPES];
+
uint32_t attachment_count;
struct tu_attachment_info attachments[0];
};
+void
+tu_framebuffer_tiling_config(struct tu_framebuffer *fb,
+ const struct tu_device *device,
+ const struct tu_render_pass *pass);
+
+struct tu_subpass_barrier {
+ VkPipelineStageFlags src_stage_mask;
+ VkAccessFlags src_access_mask;
+ VkAccessFlags dst_access_mask;
+ bool incoherent_ccu_color, incoherent_ccu_depth;
+};
+
struct tu_subpass_attachment
{
uint32_t attachment;
VkSampleCountFlagBits samples;
- /* pre-filled register values */
- uint32_t render_components;
uint32_t srgb_cntl;
+
+ struct tu_subpass_barrier start_barrier;
};
struct tu_render_pass_attachment
bool load;
bool store;
int32_t gmem_offset;
+ /* for D32S8 separate stencil: */
+ bool load_stencil;
+ bool store_stencil;
+ int32_t gmem_offset_stencil;
};
struct tu_render_pass
{
+ struct vk_object_base base;
+
uint32_t attachment_count;
uint32_t subpass_count;
uint32_t gmem_pixels;
uint32_t tile_align_w;
struct tu_subpass_attachment *subpass_attachments;
struct tu_render_pass_attachment *attachments;
+ struct tu_subpass_barrier end_barrier;
struct tu_subpass subpasses[0];
};
-VkResult
-tu_device_init_meta(struct tu_device *device);
-void
-tu_device_finish_meta(struct tu_device *device);
-
struct tu_query_pool
{
+ struct vk_object_base base;
+
VkQueryType type;
uint32_t stride;
uint64_t size;
struct tu_bo bo;
};
+enum tu_semaphore_kind
+{
+ TU_SEMAPHORE_NONE,
+ TU_SEMAPHORE_SYNCOBJ,
+};
+
+struct tu_semaphore_part
+{
+ enum tu_semaphore_kind kind;
+ union {
+ uint32_t syncobj;
+ };
+};
+
struct tu_semaphore
{
- uint32_t syncobj;
- uint32_t temp_syncobj;
+ struct vk_object_base base;
+
+ struct tu_semaphore_part permanent;
+ struct tu_semaphore_part temporary;
};
void
VkDescriptorUpdateTemplate descriptorUpdateTemplate,
const void *pData);
-void
-tu_meta_push_descriptor_set(struct tu_cmd_buffer *cmd_buffer,
- VkPipelineBindPoint pipelineBindPoint,
- VkPipelineLayout _layout,
- uint32_t set,
- uint32_t descriptorWriteCount,
- const VkWriteDescriptorSet *pDescriptorWrites);
-
-int
-tu_drm_get_gpu_id(const struct tu_physical_device *dev, uint32_t *id);
-
-int
-tu_drm_get_gmem_size(const struct tu_physical_device *dev, uint32_t *size);
-
-int
-tu_drm_get_gmem_base(const struct tu_physical_device *dev, uint64_t *base);
+VkResult
+tu_physical_device_init(struct tu_physical_device *device,
+ struct tu_instance *instance);
+VkResult
+tu_enumerate_devices(struct tu_instance *instance);
int
tu_drm_submitqueue_new(const struct tu_device *dev,
void
tu_drm_submitqueue_close(const struct tu_device *dev, uint32_t queue_id);
-uint32_t
-tu_gem_new(const struct tu_device *dev, uint64_t size, uint32_t flags);
-uint32_t
-tu_gem_import_dmabuf(const struct tu_device *dev,
- int prime_fd,
- uint64_t size);
-int
-tu_gem_export_dmabuf(const struct tu_device *dev, uint32_t gem_handle);
-void
-tu_gem_close(const struct tu_device *dev, uint32_t gem_handle);
-uint64_t
-tu_gem_info_offset(const struct tu_device *dev, uint32_t gem_handle);
-uint64_t
-tu_gem_info_iova(const struct tu_device *dev, uint32_t gem_handle);
-
#define TU_DEFINE_HANDLE_CASTS(__tu_type, __VkType) \
\
static inline struct __tu_type *__tu_type##_from_handle(__VkType _handle) \
TU_DEFINE_NONDISP_HANDLE_CASTS(tu_query_pool, VkQueryPool)
TU_DEFINE_NONDISP_HANDLE_CASTS(tu_render_pass, VkRenderPass)
TU_DEFINE_NONDISP_HANDLE_CASTS(tu_sampler, VkSampler)
+TU_DEFINE_NONDISP_HANDLE_CASTS(tu_sampler_ycbcr_conversion, VkSamplerYcbcrConversion)
TU_DEFINE_NONDISP_HANDLE_CASTS(tu_shader_module, VkShaderModule)
TU_DEFINE_NONDISP_HANDLE_CASTS(tu_semaphore, VkSemaphore)