turnip: Move tu_bo functions to tu_drm.c
[mesa.git] / src / freedreno / vulkan / tu_private.h
index 3a1bb331b9d288286d725a1fe22d1466a0a61d6c..40c2fb092a3f1cd9e36742c2565a920c4446d026 100644 (file)
@@ -47,7 +47,9 @@
 #include "main/macros.h"
 #include "util/list.h"
 #include "util/macros.h"
+#include "util/u_atomic.h"
 #include "vk_alloc.h"
+#include "vk_object.h"
 #include "vk_debug_report.h"
 #include "wsi_common.h"
 
@@ -62,6 +64,7 @@
 
 #include "tu_descriptor_set.h"
 #include "tu_extensions.h"
+#include "tu_util.h"
 
 /* Pre-declarations needed for WSI entrypoints */
 struct wl_surface;
@@ -77,6 +80,8 @@ typedef uint32_t xcb_window_t;
 
 #include "tu_entrypoints.h"
 
+#include "vk_format.h"
+
 #define MAX_VBS 32
 #define MAX_VERTEX_ATTRIBS 32
 #define MAX_RTS 8
@@ -90,90 +95,23 @@ typedef uint32_t xcb_window_t;
 #define MAX_DYNAMIC_STORAGE_BUFFERS 8
 #define MAX_DYNAMIC_BUFFERS                                                  \
    (MAX_DYNAMIC_UNIFORM_BUFFERS + MAX_DYNAMIC_STORAGE_BUFFERS)
-#define MAX_SAMPLES_LOG2 4
-#define NUM_META_FS_KEYS 13
 #define TU_MAX_DRM_DEVICES 8
 #define MAX_VIEWS 8
+#define MAX_BIND_POINTS 2 /* compute + graphics */
 /* The Qualcomm driver exposes 0x20000058 */
 #define MAX_STORAGE_BUFFER_RANGE 0x20000000
-
-#define NUM_DEPTH_CLEAR_PIPELINES 3
-
-/*
- * This is the point we switch from using CP to compute shader
- * for certain buffer operations.
+/* We use ldc for uniform buffer loads, just like the Qualcomm driver, so
+ * expose the same maximum range.
+ * TODO: The SIZE bitfield is 15 bits, and in 4-dword units, so the actual
+ * range might be higher.
  */
-#define TU_BUFFER_OPS_CS_THRESHOLD 4096
+#define MAX_UNIFORM_BUFFER_RANGE 0x10000
 
 #define A6XX_TEX_CONST_DWORDS 16
 #define A6XX_TEX_SAMP_DWORDS 4
 
-enum tu_mem_heap
-{
-   TU_MEM_HEAP_VRAM,
-   TU_MEM_HEAP_VRAM_CPU_ACCESS,
-   TU_MEM_HEAP_GTT,
-   TU_MEM_HEAP_COUNT
-};
-
-enum tu_mem_type
-{
-   TU_MEM_TYPE_VRAM,
-   TU_MEM_TYPE_GTT_WRITE_COMBINE,
-   TU_MEM_TYPE_VRAM_CPU_ACCESS,
-   TU_MEM_TYPE_GTT_CACHED,
-   TU_MEM_TYPE_COUNT
-};
-
 #define tu_printflike(a, b) __attribute__((__format__(__printf__, a, b)))
 
-static inline uint32_t
-align_u32(uint32_t v, uint32_t a)
-{
-   assert(a != 0 && a == (a & -a));
-   return (v + a - 1) & ~(a - 1);
-}
-
-static inline uint32_t
-align_u32_npot(uint32_t v, uint32_t a)
-{
-   return (v + a - 1) / a * a;
-}
-
-static inline uint64_t
-align_u64(uint64_t v, uint64_t a)
-{
-   assert(a != 0 && a == (a & -a));
-   return (v + a - 1) & ~(a - 1);
-}
-
-static inline int32_t
-align_i32(int32_t v, int32_t a)
-{
-   assert(a != 0 && a == (a & -a));
-   return (v + a - 1) & ~(a - 1);
-}
-
-/** Alignment must be a power of 2. */
-static inline bool
-tu_is_aligned(uintmax_t n, uintmax_t a)
-{
-   assert(a == (a & -a));
-   return (n & (a - 1)) == 0;
-}
-
-static inline uint32_t
-round_up_u32(uint32_t v, uint32_t a)
-{
-   return (v + a - 1) / a;
-}
-
-static inline uint64_t
-round_up_u64(uint64_t v, uint64_t a)
-{
-   return (v + a - 1) / a;
-}
-
 static inline uint32_t
 tu_minify(uint32_t n, uint32_t levels)
 {
@@ -182,29 +120,6 @@ tu_minify(uint32_t n, uint32_t levels)
    else
       return MAX2(n >> levels, 1);
 }
-static inline float
-tu_clamp_f(float f, float min, float max)
-{
-   assert(min < max);
-
-   if (f > max)
-      return max;
-   else if (f < min)
-      return min;
-   else
-      return f;
-}
-
-static inline bool
-tu_clear_mask(uint32_t *inout_mask, uint32_t clear_mask)
-{
-   if (*inout_mask & clear_mask) {
-      *inout_mask &= ~clear_mask;
-      return true;
-   } else {
-      return false;
-   }
-}
 
 #define for_each_bit(b, dword)                                               \
    for (uint32_t __dword = (dword);                                          \
@@ -217,6 +132,7 @@ tu_clear_mask(uint32_t *inout_mask, uint32_t clear_mask)
    })
 
 #define COND(bool, val) ((bool) ? (val) : 0)
+#define BIT(bit) (1u << (bit))
 
 /* Whenever we generate an error, pass it through this function. Useful for
  * debugging, where we can break on it. Only call at error site, not when
@@ -244,11 +160,7 @@ __tu_finishme(const char *file, int line, const char *format, ...)
 void
 tu_loge(const char *format, ...) tu_printflike(1, 2);
 void
-tu_loge_v(const char *format, va_list va);
-void
 tu_logi(const char *format, ...) tu_printflike(1, 2);
-void
-tu_logi_v(const char *format, va_list va);
 
 /**
  * Print a FINISHME message, including its source location.
@@ -262,24 +174,6 @@ tu_logi_v(const char *format, va_list va);
       }                                                                      \
    } while (0)
 
-/* A non-fatal assert.  Useful for debugging. */
-#ifdef DEBUG
-#define tu_assert(x)                                                         \
-   ({                                                                        \
-      if (unlikely(!(x)))                                                    \
-         fprintf(stderr, "%s:%d ASSERT: %s\n", __FILE__, __LINE__, #x);      \
-   })
-#else
-#define tu_assert(x)
-#endif
-
-/* Suppress -Wunused in stub functions */
-#define tu_use_args(...) __tu_use_args(0, ##__VA_ARGS__)
-static inline void
-__tu_use_args(int ignore, ...)
-{
-}
-
 #define tu_stub()                                                            \
    do {                                                                      \
       tu_finishme("stub %s", __func__);                                      \
@@ -296,7 +190,7 @@ tu_lookup_entrypoint_checked(
 
 struct tu_physical_device
 {
-   VK_LOADER_DATA _loader_data;
+   struct vk_object_base base;
 
    struct tu_instance *instance;
 
@@ -313,8 +207,25 @@ struct tu_physical_device
 
    unsigned gpu_id;
    uint32_t gmem_size;
+   uint64_t gmem_base;
+   uint32_t ccu_offset_gmem;
+   uint32_t ccu_offset_bypass;
+   /* alignment for size of tiles */
    uint32_t tile_align_w;
-   uint32_t tile_align_h;
+#define TILE_ALIGN_H 16
+   /* gmem store/load granularity */
+#define GMEM_ALIGN_W 16
+#define GMEM_ALIGN_H 4
+
+   struct {
+      uint32_t PC_UNKNOWN_9805;
+      uint32_t SP_UNKNOWN_A0F8;
+   } magic;
+
+   int msm_major_version;
+   int msm_minor_version;
+
+   bool limited_z24s8;
 
    /* This is the drivers on-disk cache used as a fallback as opposed to
     * the pipeline cache defined by apps.
@@ -330,11 +241,14 @@ enum tu_debug_flags
    TU_DEBUG_NIR = 1 << 1,
    TU_DEBUG_IR3 = 1 << 2,
    TU_DEBUG_NOBIN = 1 << 3,
+   TU_DEBUG_SYSMEM = 1 << 4,
+   TU_DEBUG_FORCEBIN = 1 << 5,
+   TU_DEBUG_NOUBWC = 1 << 6,
 };
 
 struct tu_instance
 {
-   VK_LOADER_DATA _loader_data;
+   struct vk_object_base base;
 
    VkAllocationCallbacks alloc;
 
@@ -366,6 +280,8 @@ struct cache_entry;
 
 struct tu_pipeline_cache
 {
+   struct vk_object_base base;
+
    struct tu_device *device;
    pthread_mutex_t mutex;
 
@@ -382,39 +298,6 @@ struct tu_pipeline_key
 {
 };
 
-void
-tu_pipeline_cache_init(struct tu_pipeline_cache *cache,
-                       struct tu_device *device);
-void
-tu_pipeline_cache_finish(struct tu_pipeline_cache *cache);
-void
-tu_pipeline_cache_load(struct tu_pipeline_cache *cache,
-                       const void *data,
-                       size_t size);
-
-struct tu_shader_variant;
-
-bool
-tu_create_shader_variants_from_pipeline_cache(
-   struct tu_device *device,
-   struct tu_pipeline_cache *cache,
-   const unsigned char *sha1,
-   struct tu_shader_variant **variants);
-
-void
-tu_pipeline_cache_insert_shaders(struct tu_device *device,
-                                 struct tu_pipeline_cache *cache,
-                                 const unsigned char *sha1,
-                                 struct tu_shader_variant **variants,
-                                 const void *const *codes,
-                                 const unsigned *code_sizes);
-
-struct tu_meta_state
-{
-   VkAllocationCallbacks alloc;
-
-   struct tu_pipeline_cache cache;
-};
 
 /* queue types */
 #define TU_QUEUE_GENERAL 0
@@ -423,6 +306,7 @@ struct tu_meta_state
 
 struct tu_fence
 {
+   struct vk_object_base base;
    struct wsi_fence *fence_wsi;
    bool signaled;
    int fd;
@@ -443,7 +327,8 @@ tu_fence_wait_idle(struct tu_fence *fence);
 
 struct tu_queue
 {
-   VK_LOADER_DATA _loader_data;
+   struct vk_object_base base;
+
    struct tu_device *device;
    uint32_t queue_family_index;
    int queue_idx;
@@ -453,39 +338,100 @@ struct tu_queue
    struct tu_fence submit_fence;
 };
 
-struct tu_device
+struct tu_bo
 {
-   VK_LOADER_DATA _loader_data;
+   uint32_t gem_handle;
+   uint64_t size;
+   uint64_t iova;
+   void *map;
+};
 
-   VkAllocationCallbacks alloc;
+enum global_shader {
+   GLOBAL_SH_VS,
+   GLOBAL_SH_FS_BLIT,
+   GLOBAL_SH_FS_CLEAR0,
+   GLOBAL_SH_FS_CLEAR_MAX = GLOBAL_SH_FS_CLEAR0 + MAX_RTS,
+   GLOBAL_SH_COUNT,
+};
 
-   struct tu_instance *instance;
+/* This struct defines the layout of the global_bo */
+struct tu6_global
+{
+   /* 6 bcolor_entry entries, one for each VK_BORDER_COLOR */
+   uint8_t border_color[128 * 6];
+
+   /* clear/blit shaders, all <= 16 instrs (16 instr = 1 instrlen unit) */
+   instr_t shaders[GLOBAL_SH_COUNT][16];
 
-   struct tu_meta_state meta_state;
+   uint32_t seqno_dummy;          /* dummy seqno for CP_EVENT_WRITE */
+   uint32_t _pad0;
+   volatile uint32_t vsc_draw_overflow;
+   uint32_t _pad1;
+   volatile uint32_t vsc_prim_overflow;
+   uint32_t _pad2;
+   uint64_t predicate;
+
+   /* scratch space for VPC_SO[i].FLUSH_BASE_LO/HI, start on 32 byte boundary. */
+   struct {
+      uint32_t offset;
+      uint32_t pad[7];
+   } flush_base[4];
+};
+#define gb_offset(member) offsetof(struct tu6_global, member)
+#define global_iova(cmd, member) ((cmd)->device->global_bo.iova + gb_offset(member))
+
+void tu_init_clear_blit_shaders(struct tu6_global *global);
+
+/* extra space in vsc draw/prim streams */
+#define VSC_PAD 0x40
+
+struct tu_device
+{
+   struct vk_device vk;
+   struct tu_instance *instance;
 
    struct tu_queue *queues[TU_MAX_QUEUE_FAMILIES];
    int queue_count[TU_MAX_QUEUE_FAMILIES];
 
    struct tu_physical_device *physical_device;
+   int _lost;
 
    struct ir3_compiler *compiler;
 
    /* Backup in-memory cache to be used if the app doesn't provide one */
    struct tu_pipeline_cache *mem_cache;
 
-   struct list_head shader_slabs;
-   mtx_t shader_slab_mutex;
+#define MIN_SCRATCH_BO_SIZE_LOG2 12 /* A page */
+
+   /* Currently the kernel driver uses a 32-bit GPU address space, but it
+    * should be impossible to go beyond 48 bits.
+    */
+   struct {
+      struct tu_bo bo;
+      mtx_t construct_mtx;
+      bool initialized;
+   } scratch_bos[48 - MIN_SCRATCH_BO_SIZE_LOG2];
+
+   struct tu_bo global_bo;
 
    struct tu_device_extension_table enabled_extensions;
+
+   uint32_t vsc_draw_strm_pitch;
+   uint32_t vsc_prim_strm_pitch;
+   mtx_t vsc_pitch_mtx;
 };
 
-struct tu_bo
+VkResult _tu_device_set_lost(struct tu_device *device,
+                             const char *file, int line,
+                             const char *msg, ...) PRINTFLIKE(4, 5);
+#define tu_device_set_lost(dev, ...) \
+   _tu_device_set_lost(dev, __FILE__, __LINE__, __VA_ARGS__)
+
+static inline bool
+tu_device_is_lost(struct tu_device *device)
 {
-   uint32_t gem_handle;
-   uint64_t size;
-   uint64_t iova;
-   void *map;
-};
+   return unlikely(p_atomic_read(&device->_lost));
+}
 
 VkResult
 tu_bo_init_new(struct tu_device *dev, struct tu_bo *bo, uint64_t size);
@@ -501,6 +447,15 @@ tu_bo_finish(struct tu_device *dev, struct tu_bo *bo);
 VkResult
 tu_bo_map(struct tu_device *dev, struct tu_bo *bo);
 
+/* Get a scratch bo for use inside a command buffer. This will always return
+ * the same bo given the same size or similar sizes, so only one scratch bo
+ * can be used at the same time. It's meant for short-lived things where we
+ * need to write to some piece of memory, read from it, and then immediately
+ * discard it.
+ */
+VkResult
+tu_get_scratch_bo(struct tu_device *dev, uint64_t size, struct tu_bo **bo);
+
 struct tu_cs_entry
 {
    /* No ownership */
@@ -510,11 +465,50 @@ struct tu_cs_entry
    uint32_t offset;
 };
 
-struct ts_cs_memory {
+struct tu_cs_memory {
    uint32_t *map;
    uint64_t iova;
 };
 
+struct tu_draw_state {
+   uint64_t iova : 48;
+   uint32_t size : 16;
+};
+
+enum tu_dynamic_state
+{
+   /* re-use VK_DYNAMIC_STATE_ enums for non-extended dynamic states */
+   TU_DYNAMIC_STATE_SAMPLE_LOCATIONS = VK_DYNAMIC_STATE_STENCIL_REFERENCE + 1,
+   TU_DYNAMIC_STATE_COUNT,
+};
+
+enum tu_draw_state_group_id
+{
+   TU_DRAW_STATE_PROGRAM,
+   TU_DRAW_STATE_PROGRAM_BINNING,
+   TU_DRAW_STATE_TESS,
+   TU_DRAW_STATE_VB,
+   TU_DRAW_STATE_VI,
+   TU_DRAW_STATE_VI_BINNING,
+   TU_DRAW_STATE_RAST,
+   TU_DRAW_STATE_DS,
+   TU_DRAW_STATE_BLEND,
+   TU_DRAW_STATE_VS_CONST,
+   TU_DRAW_STATE_HS_CONST,
+   TU_DRAW_STATE_DS_CONST,
+   TU_DRAW_STATE_GS_CONST,
+   TU_DRAW_STATE_FS_CONST,
+   TU_DRAW_STATE_DESC_SETS,
+   TU_DRAW_STATE_DESC_SETS_LOAD,
+   TU_DRAW_STATE_VS_PARAMS,
+   TU_DRAW_STATE_INPUT_ATTACHMENTS_GMEM,
+   TU_DRAW_STATE_INPUT_ATTACHMENTS_SYSMEM,
+
+   /* dynamic state related draw states */
+   TU_DRAW_STATE_DYNAMIC,
+   TU_DRAW_STATE_COUNT = TU_DRAW_STATE_DYNAMIC + TU_DYNAMIC_STATE_COUNT,
+};
+
 enum tu_cs_mode
 {
 
@@ -555,6 +549,7 @@ struct tu_cs
    uint32_t *reserved_end;
    uint32_t *end;
 
+   struct tu_device *device;
    enum tu_cs_mode mode;
    uint32_t next_bo_size;
 
@@ -565,10 +560,16 @@ struct tu_cs
    struct tu_bo **bos;
    uint32_t bo_count;
    uint32_t bo_capacity;
+
+   /* state for cond_exec_start/cond_exec_end */
+   uint32_t cond_flags;
+   uint32_t *cond_dwords;
 };
 
 struct tu_device_memory
 {
+   struct vk_object_base base;
+
    struct tu_bo bo;
    VkDeviceSize size;
 
@@ -589,14 +590,18 @@ struct tu_descriptor_range
 
 struct tu_descriptor_set
 {
+   struct vk_object_base base;
+
    const struct tu_descriptor_set_layout *layout;
+   struct tu_descriptor_pool *pool;
    uint32_t size;
 
    uint64_t va;
    uint32_t *mapped_ptr;
-   struct tu_descriptor_range *dynamic_descriptors;
 
-   struct tu_bo *descriptors[0];
+   uint32_t *dynamic_descriptors;
+
+   struct tu_bo *buffers[0];
 };
 
 struct tu_push_descriptor_set
@@ -614,6 +619,8 @@ struct tu_descriptor_pool_entry
 
 struct tu_descriptor_pool
 {
+   struct vk_object_base base;
+
    struct tu_bo bo;
    uint64_t current_offset;
    uint64_t size;
@@ -656,13 +663,16 @@ struct tu_descriptor_update_template_entry
 
 struct tu_descriptor_update_template
 {
+   struct vk_object_base base;
+
    uint32_t entry_count;
-   VkPipelineBindPoint bind_point;
    struct tu_descriptor_update_template_entry entry[0];
 };
 
 struct tu_buffer
 {
+   struct vk_object_base base;
+
    VkDeviceSize size;
 
    VkBufferUsageFlags usage;
@@ -678,97 +688,12 @@ tu_buffer_iova(struct tu_buffer *buffer)
    return buffer->bo->iova + buffer->bo_offset;
 }
 
-enum tu_dynamic_state_bits
-{
-   TU_DYNAMIC_VIEWPORT = 1 << 0,
-   TU_DYNAMIC_SCISSOR = 1 << 1,
-   TU_DYNAMIC_LINE_WIDTH = 1 << 2,
-   TU_DYNAMIC_DEPTH_BIAS = 1 << 3,
-   TU_DYNAMIC_BLEND_CONSTANTS = 1 << 4,
-   TU_DYNAMIC_DEPTH_BOUNDS = 1 << 5,
-   TU_DYNAMIC_STENCIL_COMPARE_MASK = 1 << 6,
-   TU_DYNAMIC_STENCIL_WRITE_MASK = 1 << 7,
-   TU_DYNAMIC_STENCIL_REFERENCE = 1 << 8,
-   TU_DYNAMIC_DISCARD_RECTANGLE = 1 << 9,
-   TU_DYNAMIC_ALL = (1 << 10) - 1,
-};
-
 struct tu_vertex_binding
 {
    struct tu_buffer *buffer;
    VkDeviceSize offset;
 };
 
-struct tu_viewport_state
-{
-   uint32_t count;
-   VkViewport viewports[MAX_VIEWPORTS];
-};
-
-struct tu_scissor_state
-{
-   uint32_t count;
-   VkRect2D scissors[MAX_SCISSORS];
-};
-
-struct tu_discard_rectangle_state
-{
-   uint32_t count;
-   VkRect2D rectangles[MAX_DISCARD_RECTANGLES];
-};
-
-struct tu_dynamic_state
-{
-   /**
-    * Bitmask of (1 << VK_DYNAMIC_STATE_*).
-    * Defines the set of saved dynamic state.
-    */
-   uint32_t mask;
-
-   struct tu_viewport_state viewport;
-
-   struct tu_scissor_state scissor;
-
-   float line_width;
-
-   struct
-   {
-      float bias;
-      float clamp;
-      float slope;
-   } depth_bias;
-
-   float blend_constants[4];
-
-   struct
-   {
-      float min;
-      float max;
-   } depth_bounds;
-
-   struct
-   {
-      uint32_t front;
-      uint32_t back;
-   } stencil_compare_mask;
-
-   struct
-   {
-      uint32_t front;
-      uint32_t back;
-   } stencil_write_mask;
-
-   struct
-   {
-      uint32_t front;
-      uint32_t back;
-   } stencil_reference;
-
-   struct tu_discard_rectangle_state discard_rectangle;
-};
-
-extern const struct tu_dynamic_state default_dynamic_state;
-
 const char *
 tu_get_debug_option_name(int id);
 
@@ -778,51 +703,166 @@ tu_get_perftest_option_name(int id);
 struct tu_descriptor_state
 {
    struct tu_descriptor_set *sets[MAX_SETS];
-   uint32_t valid;
-   struct tu_push_descriptor_set push_set;
-   bool push_dirty;
-   uint64_t dynamic_buffers[MAX_DYNAMIC_BUFFERS];
+   uint32_t dynamic_descriptors[MAX_DYNAMIC_BUFFERS * A6XX_TEX_CONST_DWORDS];
 };
 
-struct tu_tile
+enum tu_cmd_dirty_bits
 {
-   uint8_t pipe;
-   uint8_t slot;
-   VkOffset2D begin;
-   VkOffset2D end;
-};
+   TU_CMD_DIRTY_VERTEX_BUFFERS = 1 << 2,
+   TU_CMD_DIRTY_DESC_SETS_LOAD = 1 << 3,
+   TU_CMD_DIRTY_COMPUTE_DESC_SETS_LOAD = 1 << 4,
+   TU_CMD_DIRTY_SHADER_CONSTS = 1 << 5,
+   /* all draw states were disabled and need to be re-enabled: */
+   TU_CMD_DIRTY_DRAW_STATE = 1 << 7,
+};
+
+/* There are only three cache domains we have to care about: the CCU, or
+ * color cache unit, which is used for color and depth/stencil attachments
+ * and copy/blit destinations, and is split conceptually into color and depth,
+ * and the universal cache or UCHE which is used for pretty much everything
+ * else, except for the CP (uncached) and host. We need to flush whenever data
+ * crosses these boundaries.
+ */
 
-struct tu_tiling_config
-{
-   VkRect2D render_area;
+enum tu_cmd_access_mask {
+   TU_ACCESS_UCHE_READ = 1 << 0,
+   TU_ACCESS_UCHE_WRITE = 1 << 1,
+   TU_ACCESS_CCU_COLOR_READ = 1 << 2,
+   TU_ACCESS_CCU_COLOR_WRITE = 1 << 3,
+   TU_ACCESS_CCU_DEPTH_READ = 1 << 4,
+   TU_ACCESS_CCU_DEPTH_WRITE = 1 << 5,
+
+   /* Experiments have shown that while it's safe to avoid flushing the CCU
+    * after each blit/renderpass, it's not safe to assume that subsequent
+    * lookups with a different attachment state will hit unflushed cache
+    * entries. That is, the CCU needs to be flushed and possibly invalidated
+    * when accessing memory with a different attachment state. Writing to an
+    * attachment under the following conditions after clearing using the
+    * normal 2d engine path is known to have issues:
+    *
+    * - It isn't the 0'th layer.
+    * - There are more than one attachment, and this isn't the 0'th attachment
+    *   (this seems to also depend on the cpp of the attachments).
+    *
+    * Our best guess is that the layer/MRT state is used when computing
+    * the location of a cache entry in CCU, to avoid conflicts. We assume that
+    * any access in a renderpass after or before an access by a transfer needs
+    * a flush/invalidate, and use the _INCOHERENT variants to represent access
+    * by a transfer.
+    */
+   TU_ACCESS_CCU_COLOR_INCOHERENT_READ = 1 << 6,
+   TU_ACCESS_CCU_COLOR_INCOHERENT_WRITE = 1 << 7,
+   TU_ACCESS_CCU_DEPTH_INCOHERENT_READ = 1 << 8,
+   TU_ACCESS_CCU_DEPTH_INCOHERENT_WRITE = 1 << 9,
 
-   /* position and size of the first tile */
-   VkRect2D tile0;
-   /* number of tiles */
-   VkExtent2D tile_count;
+   /* Accesses by the host */
+   TU_ACCESS_HOST_READ = 1 << 10,
+   TU_ACCESS_HOST_WRITE = 1 << 11,
 
-   /* size of the first VSC pipe */
-   VkExtent2D pipe0;
-   /* number of VSC pipes */
-   VkExtent2D pipe_count;
+   /* Accesses by a GPU engine which bypasses any cache. e.g. writes via
+    * CP_EVENT_WRITE::BLIT and the CP are SYSMEM_WRITE.
+    */
+   TU_ACCESS_SYSMEM_READ = 1 << 12,
+   TU_ACCESS_SYSMEM_WRITE = 1 << 13,
+
+   /* Set if a WFI is required. This can be required for:
+    * - 2D engine which (on some models) doesn't wait for flushes to complete
+    *   before starting
+    * - CP draw indirect opcodes, where we need to wait for any flushes to
+    *   complete but the CP implicitly waits for WFI's to complete and
+    *   therefore we only need a WFI after the flushes.
+    */
+   TU_ACCESS_WFI_READ = 1 << 14,
 
-   /* pipe register values */
-   uint32_t pipe_config[MAX_VSC_PIPES];
-   uint32_t pipe_sizes[MAX_VSC_PIPES];
-};
+   /* Set if a CP_WAIT_FOR_ME is required due to the data being read by the CP
+    * without it waiting for any WFI.
+    */
+   TU_ACCESS_WFM_READ = 1 << 15,
 
-enum tu_cmd_dirty_bits
-{
-   TU_CMD_DIRTY_PIPELINE = 1 << 0,
-   TU_CMD_DIRTY_COMPUTE_PIPELINE = 1 << 1,
-   TU_CMD_DIRTY_VERTEX_BUFFERS = 1 << 2,
-   TU_CMD_DIRTY_DESCRIPTOR_SETS = 1 << 3,
-   TU_CMD_DIRTY_PUSH_CONSTANTS = 1 << 4,
+   /* Memory writes from the CP start in-order with draws and event writes,
+    * but execute asynchronously and hence need a CP_WAIT_MEM_WRITES if read.
+    */
+   TU_ACCESS_CP_WRITE = 1 << 16,
+
+   TU_ACCESS_READ =
+      TU_ACCESS_UCHE_READ |
+      TU_ACCESS_CCU_COLOR_READ |
+      TU_ACCESS_CCU_DEPTH_READ |
+      TU_ACCESS_CCU_COLOR_INCOHERENT_READ |
+      TU_ACCESS_CCU_DEPTH_INCOHERENT_READ |
+      TU_ACCESS_HOST_READ |
+      TU_ACCESS_SYSMEM_READ |
+      TU_ACCESS_WFI_READ |
+      TU_ACCESS_WFM_READ,
+
+   TU_ACCESS_WRITE =
+      TU_ACCESS_UCHE_WRITE |
+      TU_ACCESS_CCU_COLOR_WRITE |
+      TU_ACCESS_CCU_COLOR_INCOHERENT_WRITE |
+      TU_ACCESS_CCU_DEPTH_WRITE |
+      TU_ACCESS_CCU_DEPTH_INCOHERENT_WRITE |
+      TU_ACCESS_HOST_WRITE |
+      TU_ACCESS_SYSMEM_WRITE |
+      TU_ACCESS_CP_WRITE,
+
+   TU_ACCESS_ALL =
+      TU_ACCESS_READ |
+      TU_ACCESS_WRITE,
+};
+
+enum tu_cmd_flush_bits {
+   TU_CMD_FLAG_CCU_FLUSH_DEPTH = 1 << 0,
+   TU_CMD_FLAG_CCU_FLUSH_COLOR = 1 << 1,
+   TU_CMD_FLAG_CCU_INVALIDATE_DEPTH = 1 << 2,
+   TU_CMD_FLAG_CCU_INVALIDATE_COLOR = 1 << 3,
+   TU_CMD_FLAG_CACHE_FLUSH = 1 << 4,
+   TU_CMD_FLAG_CACHE_INVALIDATE = 1 << 5,
+   TU_CMD_FLAG_WAIT_MEM_WRITES = 1 << 6,
+   TU_CMD_FLAG_WAIT_FOR_IDLE = 1 << 7,
+   TU_CMD_FLAG_WAIT_FOR_ME = 1 << 8,
+
+   TU_CMD_FLAG_ALL_FLUSH =
+      TU_CMD_FLAG_CCU_FLUSH_DEPTH |
+      TU_CMD_FLAG_CCU_FLUSH_COLOR |
+      TU_CMD_FLAG_CACHE_FLUSH |
+      /* Treat the CP as a sort of "cache" which may need to be "flushed" via
+       * waiting for writes to land with WAIT_FOR_MEM_WRITES.
+       */
+      TU_CMD_FLAG_WAIT_MEM_WRITES,
+
+   TU_CMD_FLAG_GPU_INVALIDATE =
+      TU_CMD_FLAG_CCU_INVALIDATE_DEPTH |
+      TU_CMD_FLAG_CCU_INVALIDATE_COLOR |
+      TU_CMD_FLAG_CACHE_INVALIDATE,
+
+   TU_CMD_FLAG_ALL_INVALIDATE =
+      TU_CMD_FLAG_GPU_INVALIDATE |
+      /* Treat the CP as a sort of "cache" which may need to be "invalidated"
+       * via waiting for UCHE/CCU flushes to land with WFI/WFM.
+       */
+      TU_CMD_FLAG_WAIT_FOR_IDLE |
+      TU_CMD_FLAG_WAIT_FOR_ME,
+};
+
+/* Changing the CCU from sysmem mode to gmem mode or vice-versa is pretty
+ * heavy, involving a CCU cache flush/invalidate and a WFI in order to change
+ * which part of the gmem is used by the CCU. Here we keep track of what the
+ * state of the CCU.
+ */
+enum tu_cmd_ccu_state {
+   TU_CMD_CCU_SYSMEM,
+   TU_CMD_CCU_GMEM,
+   TU_CMD_CCU_UNKNOWN,
+};
 
-   TU_CMD_DIRTY_DYNAMIC_LINE_WIDTH = 1 << 16,
-   TU_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK = 1 << 17,
-   TU_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK = 1 << 18,
-   TU_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE = 1 << 19,
+struct tu_cache_state {
+   /* Caches which must be made available (flushed) eventually if there are
+    * any users outside that cache domain, and caches which must be
+    * invalidated eventually if there are any reads.
+    */
+   enum tu_cmd_flush_bits pending_flush_bits;
+   /* Pending flushes */
+   enum tu_cmd_flush_bits flush_bits;
 };
 
 struct tu_cmd_state
@@ -839,27 +879,59 @@ struct tu_cmd_state
       VkDeviceSize offsets[MAX_VBS];
    } vb;
 
-   struct tu_dynamic_state dynamic;
+   /* for dynamic states that can't be emitted directly */
+   uint32_t dynamic_stencil_mask;
+   uint32_t dynamic_stencil_wrmask;
+   uint32_t dynamic_stencil_ref;
+   uint32_t dynamic_gras_su_cntl;
+
+   /* saved states to re-emit in TU_CMD_DIRTY_DRAW_STATE case */
+   struct tu_draw_state dynamic_state[TU_DYNAMIC_STATE_COUNT];
+   struct tu_draw_state vertex_buffers;
+   struct tu_draw_state shader_const[MESA_SHADER_STAGES];
+   struct tu_draw_state desc_sets;
+
+   struct tu_draw_state vs_params;
 
    /* Index buffer */
-   struct tu_buffer *index_buffer;
-   uint64_t index_offset;
-   uint32_t index_type;
-   uint32_t max_index_count;
    uint64_t index_va;
+   uint32_t max_index_count;
+   uint8_t index_size;
+
+   /* because streamout base has to be 32-byte aligned
+    * there is an extra offset to deal with when it is
+    * unaligned
+    */
+   uint8_t streamout_offset[IR3_MAX_SO_BUFFERS];
+
+   /* Renderpasses are tricky, because we may need to flush differently if
+    * using sysmem vs. gmem and therefore we have to delay any flushing that
+    * happens before a renderpass. So we have to have two copies of the flush
+    * state, one for intra-renderpass flushes (i.e. renderpass dependencies)
+    * and one for outside a renderpass.
+    */
+   struct tu_cache_state cache;
+   struct tu_cache_state renderpass_cache;
+
+   enum tu_cmd_ccu_state ccu_state;
 
    const struct tu_render_pass *pass;
    const struct tu_subpass *subpass;
    const struct tu_framebuffer *framebuffer;
+   VkRect2D render_area;
 
-   struct tu_tiling_config tiling_config;
-
-   struct tu_cs_entry tile_load_ib;
    struct tu_cs_entry tile_store_ib;
+
+   bool xfb_used;
+   bool has_tess;
+   bool has_subpass_predication;
+   bool predication_active;
 };
 
 struct tu_cmd_pool
 {
+   struct vk_object_base base;
+
    VkAllocationCallbacks alloc;
    struct list_head cmd_buffers;
    struct list_head free_cmd_buffers;
@@ -907,7 +979,7 @@ tu_bo_list_merge(struct tu_bo_list *list, const struct tu_bo_list *other);
 
 struct tu_cmd_buffer
 {
-   VK_LOADER_DATA _loader_data;
+   struct vk_object_base base;
 
    struct tu_device *device;
 
@@ -920,13 +992,14 @@ struct tu_cmd_buffer
 
    struct tu_cmd_state state;
    struct tu_vertex_binding vertex_bindings[MAX_VBS];
+   uint32_t vertex_bindings_set;
    uint32_t queue_family_index;
 
    uint32_t push_constants[MAX_PUSH_CONSTANTS_SIZE / 4];
    VkShaderStageFlags push_constant_stages;
    struct tu_descriptor_set meta_push_descriptors;
 
-   struct tu_descriptor_state descriptors[VK_PIPELINE_BIND_POINT_RANGE_SIZE];
+   struct tu_descriptor_state descriptors[MAX_BIND_POINTS];
 
    struct tu_cmd_buffer_upload upload;
 
@@ -938,21 +1011,8 @@ struct tu_cmd_buffer
    struct tu_cs draw_epilogue_cs;
    struct tu_cs sub_cs;
 
-   uint16_t marker_reg;
-   uint32_t marker_seqno;
-
-   struct tu_bo scratch_bo;
-   uint32_t scratch_seqno;
-#define VSC_OVERFLOW 0x8
-#define VSC_SCRATCH 0x10
-
-   struct tu_bo vsc_data;
-   struct tu_bo vsc_data2;
-   uint32_t vsc_data_pitch;
-   uint32_t vsc_data2_pitch;
-   bool use_vsc_data;
-
-   bool wait_for_idle;
+   uint32_t vsc_draw_strm_pitch;
+   uint32_t vsc_prim_strm_pitch;
 };
 
 /* Temporary struct for tracking a register state to be written, used by
@@ -968,16 +1028,18 @@ struct tu_reg_value {
    uint32_t bo_shift;
 };
 
-unsigned
+
+void tu_emit_cache_flush_renderpass(struct tu_cmd_buffer *cmd_buffer,
+                                    struct tu_cs *cs);
+
+void tu_emit_cache_flush_ccu(struct tu_cmd_buffer *cmd_buffer,
+                             struct tu_cs *cs,
+                             enum tu_cmd_ccu_state ccu_state);
+
+void
 tu6_emit_event_write(struct tu_cmd_buffer *cmd,
                      struct tu_cs *cs,
-                     enum vgt_event_type event,
-                     bool need_seqno);
-
-bool
-tu_get_memory_fd(struct tu_device *device,
-                 struct tu_device_memory *memory,
-                 int *pFD);
+                     enum vgt_event_type event);
 
 static inline struct tu_descriptor_state *
 tu_get_descriptors_state(struct tu_cmd_buffer *cmd_buffer,
@@ -986,102 +1048,34 @@ tu_get_descriptors_state(struct tu_cmd_buffer *cmd_buffer,
    return &cmd_buffer->descriptors[bind_point];
 }
 
-/*
- * Takes x,y,z as exact numbers of invocations, instead of blocks.
- *
- * Limitations: Can't call normal dispatch functions without binding or
- * rebinding
- *              the compute pipeline.
- */
-void
-tu_unaligned_dispatch(struct tu_cmd_buffer *cmd_buffer,
-                      uint32_t x,
-                      uint32_t y,
-                      uint32_t z);
-
 struct tu_event
 {
+   struct vk_object_base base;
    struct tu_bo bo;
 };
 
-struct tu_shader_module;
-
-#define TU_HASH_SHADER_IS_GEOM_COPY_SHADER (1 << 0)
-#define TU_HASH_SHADER_SISCHED (1 << 1)
-#define TU_HASH_SHADER_UNSAFE_MATH (1 << 2)
-void
-tu_hash_shaders(unsigned char *hash,
-                const VkPipelineShaderStageCreateInfo **stages,
-                const struct tu_pipeline_layout *layout,
-                const struct tu_pipeline_key *key,
-                uint32_t flags);
-
-static inline gl_shader_stage
-vk_to_mesa_shader_stage(VkShaderStageFlagBits vk_stage)
-{
-   assert(__builtin_popcount(vk_stage) == 1);
-   return ffs(vk_stage) - 1;
-}
-
-static inline VkShaderStageFlagBits
-mesa_to_vk_shader_stage(gl_shader_stage mesa_stage)
-{
-   return (1 << mesa_stage);
-}
-
-#define TU_STAGE_MASK ((1 << MESA_SHADER_STAGES) - 1)
-
-#define tu_foreach_stage(stage, stage_bits)                                  \
-   for (gl_shader_stage stage,                                               \
-        __tmp = (gl_shader_stage)((stage_bits) &TU_STAGE_MASK);              \
-        stage = __builtin_ffs(__tmp) - 1, __tmp; __tmp &= ~(1 << (stage)))
-
 struct tu_shader_module
 {
+   struct vk_object_base base;
+
    unsigned char sha1[20];
 
    uint32_t code_size;
    const uint32_t *code[0];
 };
 
-struct tu_shader_compile_options
-{
-   struct ir3_shader_key key;
-
-   bool optimize;
-   bool include_binning_pass;
-};
-
-struct tu_descriptor_map
+struct tu_push_constant_range
 {
-   /* TODO: avoid fixed size array/justify the size */
-   unsigned num; /* number of array entries */
-   unsigned num_desc; /* Number of descriptors (sum of array_size[]) */
-   int set[64];
-   int binding[64];
-   int value[64];
-   int array_size[64];
+   uint32_t lo;
+   uint32_t count;
 };
 
 struct tu_shader
 {
-   struct ir3_shader ir3_shader;
+   struct ir3_shader *ir3_shader;
 
-   struct tu_descriptor_map texture_map;
-   struct tu_descriptor_map sampler_map;
-   struct tu_descriptor_map ubo_map;
-   struct tu_descriptor_map ssbo_map;
-   struct tu_descriptor_map image_map;
-
-   /* This may be true for vertex shaders.  When true, variants[1] is the
-    * binning variant and binning_binary is non-NULL.
-    */
-   bool has_binning_pass;
-
-   void *binary;
-   void *binning_binary;
-
-   struct ir3_shader_variant variants[0];
+   struct tu_push_constant_range push_consts;
+   uint8_t active_desc_sets;
 };
 
 struct tu_shader *
@@ -1096,66 +1090,52 @@ tu_shader_destroy(struct tu_device *dev,
                   struct tu_shader *shader,
                   const VkAllocationCallbacks *alloc);
 
-void
-tu_shader_compile_options_init(
-   struct tu_shader_compile_options *options,
-   const VkGraphicsPipelineCreateInfo *pipeline_info);
-
-VkResult
-tu_shader_compile(struct tu_device *dev,
-                  struct tu_shader *shader,
-                  const struct tu_shader *next_stage,
-                  const struct tu_shader_compile_options *options,
-                  const VkAllocationCallbacks *alloc);
-
 struct tu_program_descriptor_linkage
 {
-   struct ir3_ubo_analysis_state ubo_state;
    struct ir3_const_state const_state;
 
    uint32_t constlen;
 
-   struct tu_descriptor_map texture_map;
-   struct tu_descriptor_map sampler_map;
-   struct tu_descriptor_map ubo_map;
-   struct tu_descriptor_map ssbo_map;
-   struct tu_descriptor_map image_map;
+   struct tu_push_constant_range push_consts;
 };
 
 struct tu_pipeline
 {
-   struct tu_cs cs;
+   struct vk_object_base base;
 
-   struct tu_dynamic_state dynamic_state;
+   struct tu_cs cs;
 
    struct tu_pipeline_layout *layout;
 
    bool need_indirect_descriptor_sets;
    VkShaderStageFlags active_stages;
+   uint32_t active_desc_sets;
+
+   /* mask of enabled dynamic states
+    * if BIT(i) is set, pipeline->dynamic_state[i] is *NOT* used
+    */
+   uint32_t dynamic_state_mask;
+   struct tu_draw_state dynamic_state[TU_DYNAMIC_STATE_COUNT];
+
+   /* gras_su_cntl without line width, used for dynamic line width state */
+   uint32_t gras_su_cntl;
+
+   /* draw states for the pipeline */
+   struct tu_draw_state load_state, rast_state, ds_state, blend_state;
 
    struct
    {
-      struct tu_bo binary_bo;
-      struct tu_cs_entry state_ib;
-      struct tu_cs_entry binning_state_ib;
+      struct tu_draw_state state;
+      struct tu_draw_state binning_state;
 
       struct tu_program_descriptor_linkage link[MESA_SHADER_STAGES];
    } program;
 
    struct
    {
-      uint8_t bindings[MAX_VERTEX_ATTRIBS];
-      uint16_t strides[MAX_VERTEX_ATTRIBS];
-      uint16_t offsets[MAX_VERTEX_ATTRIBS];
-      uint32_t count;
-
-      uint8_t binning_bindings[MAX_VERTEX_ATTRIBS];
-      uint16_t binning_strides[MAX_VERTEX_ATTRIBS];
-      uint16_t binning_offsets[MAX_VERTEX_ATTRIBS];
-      uint32_t binning_count;
-
-      struct tu_cs_entry state_ib;
-      struct tu_cs_entry binning_state_ib;
+      struct tu_draw_state state;
+      struct tu_draw_state binning_state;
+      uint32_t bindings_used;
    } vi;
 
    struct
@@ -1166,24 +1146,12 @@ struct tu_pipeline
 
    struct
    {
-      struct tu_cs_entry state_ib;
-   } vp;
-
-   struct
-   {
-      uint32_t gras_su_cntl;
-      struct tu_cs_entry state_ib;
-   } rast;
-
-   struct
-   {
-      struct tu_cs_entry state_ib;
-   } ds;
-
-   struct
-   {
-      struct tu_cs_entry state_ib;
-   } blend;
+      uint32_t patch_type;
+      uint32_t param_stride;
+      uint32_t hs_bo_regid;
+      uint32_t ds_bo_regid;
+      bool upper_left_domain_origin;
+   } tess;
 
    struct
    {
@@ -1198,9 +1166,7 @@ void
 tu6_emit_scissor(struct tu_cs *cs, const VkRect2D *scissor);
 
 void
-tu6_emit_gras_su_cntl(struct tu_cs *cs,
-                      uint32_t gras_su_cntl,
-                      float line_width);
+tu6_emit_sample_locations(struct tu_cs *cs, const VkSampleLocationsInfoEXT *samp_loc);
 
 void
 tu6_emit_depth_bias(struct tu_cs *cs,
@@ -1208,76 +1174,99 @@ tu6_emit_depth_bias(struct tu_cs *cs,
                     float clamp,
                     float slope_factor);
 
-void
-tu6_emit_stencil_compare_mask(struct tu_cs *cs,
-                              uint32_t front,
-                              uint32_t back);
+void tu6_emit_msaa(struct tu_cs *cs, VkSampleCountFlagBits samples);
 
-void
-tu6_emit_stencil_write_mask(struct tu_cs *cs, uint32_t front, uint32_t back);
+void tu6_emit_window_scissor(struct tu_cs *cs, uint32_t x1, uint32_t y1, uint32_t x2, uint32_t y2);
+
+void tu6_emit_window_offset(struct tu_cs *cs, uint32_t x1, uint32_t y1);
 
 void
-tu6_emit_stencil_reference(struct tu_cs *cs, uint32_t front, uint32_t back);
+tu6_emit_xs_config(struct tu_cs *cs,
+                   gl_shader_stage stage,
+                   const struct ir3_shader_variant *xs,
+                   uint64_t binary_iova);
 
 void
-tu6_emit_blend_constants(struct tu_cs *cs, const float constants[4]);
+tu6_emit_vpc(struct tu_cs *cs,
+             const struct ir3_shader_variant *vs,
+             const struct ir3_shader_variant *hs,
+             const struct ir3_shader_variant *ds,
+             const struct ir3_shader_variant *gs,
+             const struct ir3_shader_variant *fs,
+             uint32_t patch_control_points,
+             bool vshs_workgroup);
 
-struct tu_userdata_info *
-tu_lookup_user_sgpr(struct tu_pipeline *pipeline,
-                    gl_shader_stage stage,
-                    int idx);
+void
+tu6_emit_fs_inputs(struct tu_cs *cs, const struct ir3_shader_variant *fs);
 
-struct tu_shader_variant *
-tu_get_shader(struct tu_pipeline *pipeline, gl_shader_stage stage);
+struct tu_image_view;
 
-struct tu_graphics_pipeline_create_info
-{
-   bool use_rectlist;
-   bool db_depth_clear;
-   bool db_stencil_clear;
-   bool db_depth_disable_expclear;
-   bool db_stencil_disable_expclear;
-   bool db_flush_depth_inplace;
-   bool db_flush_stencil_inplace;
-   bool db_resummarize;
-   uint32_t custom_blend_mode;
-};
+void
+tu_resolve_sysmem(struct tu_cmd_buffer *cmd,
+                  struct tu_cs *cs,
+                  struct tu_image_view *src,
+                  struct tu_image_view *dst,
+                  uint32_t layers,
+                  const VkRect2D *rect);
 
-struct tu_native_format
-{
-   int vtx;      /* VFMTn_xxx or -1 */
-   int tex;      /* TFMTn_xxx or -1 */
-   int rb;       /* RBn_xxx or -1 */
-   int swap;     /* enum a3xx_color_swap */
-   bool present; /* internal only; always true to external users */
-};
+void
+tu_clear_sysmem_attachment(struct tu_cmd_buffer *cmd,
+                           struct tu_cs *cs,
+                           uint32_t a,
+                           const VkRenderPassBeginInfo *info);
 
-const struct tu_native_format *
-tu6_get_native_format(VkFormat format);
+void
+tu_clear_gmem_attachment(struct tu_cmd_buffer *cmd,
+                         struct tu_cs *cs,
+                         uint32_t a,
+                         const VkRenderPassBeginInfo *info);
 
 void
-tu_pack_clear_value(const VkClearValue *val,
-                    VkFormat format,
-                    uint32_t buf[4]);
+tu_load_gmem_attachment(struct tu_cmd_buffer *cmd,
+                        struct tu_cs *cs,
+                        uint32_t a,
+                        bool force_load);
 
+/* expose this function to be able to emit load without checking LOAD_OP */
 void
-tu_2d_clear_color(const VkClearColorValue *val, VkFormat format, uint32_t buf[4]);
+tu_emit_load_gmem_attachment(struct tu_cmd_buffer *cmd, struct tu_cs *cs, uint32_t a);
 
+/* note: gmem store can also resolve */
 void
-tu_2d_clear_zs(const VkClearDepthStencilValue *val, VkFormat format, uint32_t buf[4]);
+tu_store_gmem_attachment(struct tu_cmd_buffer *cmd,
+                         struct tu_cs *cs,
+                         uint32_t a,
+                         uint32_t gmem_a);
 
-enum a6xx_2d_ifmt tu6_rb_fmt_to_ifmt(enum a6xx_color_fmt fmt);
-enum a6xx_depth_format tu6_pipe2depth(VkFormat format);
+enum tu_supported_formats {
+   FMT_VERTEX = 1,
+   FMT_TEXTURE = 2,
+   FMT_COLOR = 4,
+};
 
-struct tu_image_level
+struct tu_native_format
 {
-   VkDeviceSize offset;
-   VkDeviceSize size;
-   uint32_t pitch;
+   enum a6xx_format fmt : 8;
+   enum a3xx_color_swap swap : 8;
+   enum a6xx_tile_mode tile_mode : 8;
+   enum tu_supported_formats supported : 8;
 };
 
+struct tu_native_format tu6_format_vtx(VkFormat format);
+struct tu_native_format tu6_format_color(VkFormat format, enum a6xx_tile_mode tile_mode);
+struct tu_native_format tu6_format_texture(VkFormat format, enum a6xx_tile_mode tile_mode);
+
+static inline enum a6xx_format
+tu6_base_format(VkFormat format)
+{
+   /* note: tu6_format_color doesn't care about tiling for .fmt field */
+   return tu6_format_color(format, TILE6_LINEAR).fmt;
+}
+
 struct tu_image
 {
+   struct vk_object_base base;
+
    VkImageType type;
    /* The original VkFormat provided by the client.  This may not match any
     * of the actual surface formats.
@@ -1292,10 +1281,8 @@ struct tu_image
    uint32_t layer_count;
    VkSampleCountFlagBits samples;
 
-
-   uint32_t alignment;
-
-   struct fdl_layout layout;
+   struct fdl_layout layout[3];
+   uint32_t total_size;
 
    unsigned queue_family_mask;
    bool exclusive;
@@ -1309,11 +1296,6 @@ struct tu_image
    VkDeviceSize bo_offset;
 };
 
-unsigned
-tu_image_queue_family_mask(const struct tu_image *image,
-                           uint32_t family,
-                           uint32_t queue_family);
-
 static inline uint32_t
 tu_get_layerCount(const struct tu_image *image,
                   const VkImageSubresourceRange *range)
@@ -1332,107 +1314,90 @@ tu_get_levelCount(const struct tu_image *image,
              : range->levelCount;
 }
 
-static inline VkDeviceSize
-tu_layer_size(struct tu_image *image, int level)
+struct tu_image_view
 {
-   return fdl_layer_stride(&image->layout, level);
-}
+   struct vk_object_base base;
 
-static inline uint32_t
-tu_image_stride(struct tu_image *image, int level)
-{
-   return image->layout.slices[level].pitch * image->layout.cpp;
-}
-
-static inline uint64_t
-tu_image_base(struct tu_image *image, int level, int layer)
-{
-   return image->bo->iova + image->bo_offset +
-      fdl_surface_offset(&image->layout, level, layer);
-}
+   struct tu_image *image; /**< VkImageViewCreateInfo::image */
 
-#define tu_image_base_ref(image, level, layer)                          \
-   .bo = image->bo,                                                     \
-   .bo_offset = (image->bo_offset + fdl_surface_offset(&image->layout,  \
-                                                       level, layer))
+   uint64_t base_addr;
+   uint64_t ubwc_addr;
+   uint32_t layer_size;
+   uint32_t ubwc_layer_size;
 
-#define tu_image_view_base_ref(iview)                                   \
-   tu_image_base_ref(iview->image, iview->base_mip, iview->base_layer)
+   /* used to determine if fast gmem store path can be used */
+   VkExtent2D extent;
+   bool need_y2_align;
 
-static inline VkDeviceSize
-tu_image_ubwc_size(struct tu_image *image, int level)
-{
-   return image->layout.ubwc_layer_size;
-}
+   bool ubwc_enabled;
 
-static inline uint32_t
-tu_image_ubwc_pitch(struct tu_image *image, int level)
-{
-   return image->layout.ubwc_slices[level].pitch;
-}
+   uint32_t descriptor[A6XX_TEX_CONST_DWORDS];
 
-static inline uint64_t
-tu_image_ubwc_surface_offset(struct tu_image *image, int level, int layer)
-{
-   return image->layout.ubwc_slices[level].offset +
-      layer * tu_image_ubwc_size(image, level);
-}
+   /* Descriptor for use as a storage image as opposed to a sampled image.
+    * This has a few differences for cube maps (e.g. type).
+    */
+   uint32_t storage_descriptor[A6XX_TEX_CONST_DWORDS];
 
-static inline uint64_t
-tu_image_ubwc_base(struct tu_image *image, int level, int layer)
-{
-   return image->bo->iova + image->bo_offset +
-      tu_image_ubwc_surface_offset(image, level, layer);
-}
+   /* pre-filled register values */
+   uint32_t PITCH;
+   uint32_t FLAG_BUFFER_PITCH;
 
-#define tu_image_ubwc_base_ref(image, level, layer)                     \
-   .bo = image->bo,                                                     \
-   .bo_offset = (image->bo_offset + tu_image_ubwc_surface_offset(image, \
-                                                                 level, layer))
+   uint32_t RB_MRT_BUF_INFO;
+   uint32_t SP_FS_MRT_REG;
 
-#define tu_image_view_ubwc_base_ref(iview) \
-   tu_image_ubwc_base_ref(iview->image, iview->base_mip, iview->base_layer)
+   uint32_t SP_PS_2D_SRC_INFO;
+   uint32_t SP_PS_2D_SRC_SIZE;
 
-enum a6xx_tile_mode
-tu6_get_image_tile_mode(struct tu_image *image, int level);
-enum a3xx_msaa_samples
-tu_msaa_samples(uint32_t samples);
+   uint32_t RB_2D_DST_INFO;
 
-struct tu_image_view
-{
-   struct tu_image *image; /**< VkImageViewCreateInfo::image */
+   uint32_t RB_BLIT_DST_INFO;
 
-   VkImageViewType type;
-   VkImageAspectFlags aspect_mask;
-   VkFormat vk_format;
-   uint32_t base_layer;
-   uint32_t layer_count;
-   uint32_t base_mip;
-   uint32_t level_count;
-   VkExtent3D extent; /**< Extent of VkImageViewCreateInfo::baseMipLevel. */
+   /* for d32s8 separate stencil */
+   uint64_t stencil_base_addr;
+   uint32_t stencil_layer_size;
+   uint32_t stencil_PITCH;
+};
 
-   uint32_t descriptor[A6XX_TEX_CONST_DWORDS];
+struct tu_sampler_ycbcr_conversion {
+   struct vk_object_base base;
 
-   /* Descriptor for use as a storage image as opposed to a sampled image.
-    * This has a few differences for cube maps (e.g. type).
-    */
-   uint32_t storage_descriptor[A6XX_TEX_CONST_DWORDS];
+   VkFormat format;
+   VkSamplerYcbcrModelConversion ycbcr_model;
+   VkSamplerYcbcrRange ycbcr_range;
+   VkComponentMapping components;
+   VkChromaLocation chroma_offsets[2];
+   VkFilter chroma_filter;
 };
 
-struct tu_sampler
-{
-   uint32_t state[A6XX_TEX_SAMP_DWORDS];
+struct tu_sampler {
+   struct vk_object_base base;
 
-   bool needs_border;
-   VkBorderColor border;
+   uint32_t descriptor[A6XX_TEX_SAMP_DWORDS];
+   struct tu_sampler_ycbcr_conversion *ycbcr_sampler;
 };
 
+void
+tu_cs_image_ref(struct tu_cs *cs, const struct tu_image_view *iview, uint32_t layer);
+
+void
+tu_cs_image_ref_2d(struct tu_cs *cs, const struct tu_image_view *iview, uint32_t layer, bool src);
+
+void
+tu_cs_image_flag_ref(struct tu_cs *cs, const struct tu_image_view *iview, uint32_t layer);
+
+void
+tu_cs_image_stencil_ref(struct tu_cs *cs, const struct tu_image_view *iview, uint32_t layer);
+
+#define tu_image_view_stencil(iview, x) \
+   ((iview->x & ~A6XX_##x##_COLOR_FORMAT__MASK) | A6XX_##x##_COLOR_FORMAT(FMT6_8_UINT))
+
 VkResult
 tu_image_create(VkDevice _device,
                 const VkImageCreateInfo *pCreateInfo,
                 const VkAllocationCallbacks *alloc,
                 VkImage *pImage,
-                uint64_t modifier);
+                uint64_t modifier,
+                const VkSubresourceLayout *plane_layouts);
 
 VkResult
 tu_image_from_gralloc(VkDevice device_h,
@@ -1442,12 +1407,14 @@ tu_image_from_gralloc(VkDevice device_h,
                       VkImage *out_image_h);
 
 void
-tu_image_view_init(struct tu_image_view *view,
-                   struct tu_device *device,
-                   const VkImageViewCreateInfo *pCreateInfo);
+tu_image_view_init(struct tu_image_view *iview,
+                   const VkImageViewCreateInfo *pCreateInfo,
+                   bool limited_z24s8);
 
 struct tu_buffer_view
 {
+   struct vk_object_base base;
+
    uint32_t descriptor[A6XX_TEX_CONST_DWORDS];
 
    struct tu_buffer *buffer;
@@ -1457,38 +1424,6 @@ tu_buffer_view_init(struct tu_buffer_view *view,
                     struct tu_device *device,
                     const VkBufferViewCreateInfo *pCreateInfo);
 
-static inline struct VkExtent3D
-tu_sanitize_image_extent(const VkImageType imageType,
-                         const struct VkExtent3D imageExtent)
-{
-   switch (imageType) {
-   case VK_IMAGE_TYPE_1D:
-      return (VkExtent3D) { imageExtent.width, 1, 1 };
-   case VK_IMAGE_TYPE_2D:
-      return (VkExtent3D) { imageExtent.width, imageExtent.height, 1 };
-   case VK_IMAGE_TYPE_3D:
-      return imageExtent;
-   default:
-      unreachable("invalid image type");
-   }
-}
-
-static inline struct VkOffset3D
-tu_sanitize_image_offset(const VkImageType imageType,
-                         const struct VkOffset3D imageOffset)
-{
-   switch (imageType) {
-   case VK_IMAGE_TYPE_1D:
-      return (VkOffset3D) { imageOffset.x, 0, 0 };
-   case VK_IMAGE_TYPE_2D:
-      return (VkOffset3D) { imageOffset.x, imageOffset.y, 0 };
-   case VK_IMAGE_TYPE_3D:
-      return imageOffset;
-   default:
-      unreachable("invalid image type");
-   }
-}
-
 struct tu_attachment_info
 {
    struct tu_image_view *attachment;
@@ -1496,14 +1431,42 @@ struct tu_attachment_info
 
 struct tu_framebuffer
 {
+   struct vk_object_base base;
+
    uint32_t width;
    uint32_t height;
    uint32_t layers;
 
+   /* size of the first tile */
+   VkExtent2D tile0;
+   /* number of tiles */
+   VkExtent2D tile_count;
+
+   /* size of the first VSC pipe */
+   VkExtent2D pipe0;
+   /* number of VSC pipes */
+   VkExtent2D pipe_count;
+
+   /* pipe register values */
+   uint32_t pipe_config[MAX_VSC_PIPES];
+   uint32_t pipe_sizes[MAX_VSC_PIPES];
+
    uint32_t attachment_count;
    struct tu_attachment_info attachments[0];
 };
 
+void
+tu_framebuffer_tiling_config(struct tu_framebuffer *fb,
+                             const struct tu_device *device,
+                             const struct tu_render_pass *pass);
+
+struct tu_subpass_barrier {
+   VkPipelineStageFlags src_stage_mask;
+   VkAccessFlags src_access_mask;
+   VkAccessFlags dst_access_mask;
+   bool incoherent_ccu_color, incoherent_ccu_depth;
+};
+
 struct tu_subpass_attachment
 {
    uint32_t attachment;
@@ -1519,36 +1482,45 @@ struct tu_subpass
    struct tu_subpass_attachment depth_stencil_attachment;
 
    VkSampleCountFlagBits samples;
+
+   uint32_t srgb_cntl;
+
+   struct tu_subpass_barrier start_barrier;
 };
 
 struct tu_render_pass_attachment
 {
    VkFormat format;
+   uint32_t samples;
    uint32_t cpp;
-   VkAttachmentLoadOp load_op;
-   VkAttachmentLoadOp stencil_load_op;
-   VkAttachmentStoreOp store_op;
-   VkAttachmentStoreOp stencil_store_op;
+   VkImageAspectFlags clear_mask;
+   bool load;
+   bool store;
    int32_t gmem_offset;
+   /* for D32S8 separate stencil: */
+   bool load_stencil;
+   bool store_stencil;
+   int32_t gmem_offset_stencil;
 };
 
 struct tu_render_pass
 {
+   struct vk_object_base base;
+
    uint32_t attachment_count;
    uint32_t subpass_count;
    uint32_t gmem_pixels;
+   uint32_t tile_align_w;
    struct tu_subpass_attachment *subpass_attachments;
    struct tu_render_pass_attachment *attachments;
+   struct tu_subpass_barrier end_barrier;
    struct tu_subpass subpasses[0];
 };
 
-VkResult
-tu_device_init_meta(struct tu_device *device);
-void
-tu_device_finish_meta(struct tu_device *device);
-
 struct tu_query_pool
 {
+   struct vk_object_base base;
+
    VkQueryType type;
    uint32_t stride;
    uint64_t size;
@@ -1556,10 +1528,26 @@ struct tu_query_pool
    struct tu_bo bo;
 };
 
+enum tu_semaphore_kind
+{
+   TU_SEMAPHORE_NONE,
+   TU_SEMAPHORE_SYNCOBJ,
+};
+
+struct tu_semaphore_part
+{
+   enum tu_semaphore_kind kind;
+   union {
+      uint32_t syncobj;
+   };
+};
+
 struct tu_semaphore
 {
-   uint32_t syncobj;
-   uint32_t temp_syncobj;
+   struct vk_object_base base;
+
+   struct tu_semaphore_part permanent;
+   struct tu_semaphore_part temporary;
 };
 
 void
@@ -1585,19 +1573,11 @@ tu_update_descriptor_set_with_template(
    VkDescriptorUpdateTemplate descriptorUpdateTemplate,
    const void *pData);
 
-void
-tu_meta_push_descriptor_set(struct tu_cmd_buffer *cmd_buffer,
-                            VkPipelineBindPoint pipelineBindPoint,
-                            VkPipelineLayout _layout,
-                            uint32_t set,
-                            uint32_t descriptorWriteCount,
-                            const VkWriteDescriptorSet *pDescriptorWrites);
-
-int
-tu_drm_get_gpu_id(const struct tu_physical_device *dev, uint32_t *id);
-
-int
-tu_drm_get_gmem_size(const struct tu_physical_device *dev, uint32_t *size);
+VkResult
+tu_physical_device_init(struct tu_physical_device *device,
+                        struct tu_instance *instance);
+VkResult
+tu_enumerate_devices(struct tu_instance *instance);
 
 int
 tu_drm_submitqueue_new(const struct tu_device *dev,
@@ -1607,21 +1587,6 @@ tu_drm_submitqueue_new(const struct tu_device *dev,
 void
 tu_drm_submitqueue_close(const struct tu_device *dev, uint32_t queue_id);
 
-uint32_t
-tu_gem_new(const struct tu_device *dev, uint64_t size, uint32_t flags);
-uint32_t
-tu_gem_import_dmabuf(const struct tu_device *dev,
-                     int prime_fd,
-                     uint64_t size);
-int
-tu_gem_export_dmabuf(const struct tu_device *dev, uint32_t gem_handle);
-void
-tu_gem_close(const struct tu_device *dev, uint32_t gem_handle);
-uint64_t
-tu_gem_info_offset(const struct tu_device *dev, uint32_t gem_handle);
-uint64_t
-tu_gem_info_iova(const struct tu_device *dev, uint32_t gem_handle);
-
 #define TU_DEFINE_HANDLE_CASTS(__tu_type, __VkType)                          \
                                                                              \
    static inline struct __tu_type *__tu_type##_from_handle(__VkType _handle) \
@@ -1676,6 +1641,7 @@ TU_DEFINE_NONDISP_HANDLE_CASTS(tu_pipeline_layout, VkPipelineLayout)
 TU_DEFINE_NONDISP_HANDLE_CASTS(tu_query_pool, VkQueryPool)
 TU_DEFINE_NONDISP_HANDLE_CASTS(tu_render_pass, VkRenderPass)
 TU_DEFINE_NONDISP_HANDLE_CASTS(tu_sampler, VkSampler)
+TU_DEFINE_NONDISP_HANDLE_CASTS(tu_sampler_ycbcr_conversion, VkSamplerYcbcrConversion)
 TU_DEFINE_NONDISP_HANDLE_CASTS(tu_shader_module, VkShaderModule)
 TU_DEFINE_NONDISP_HANDLE_CASTS(tu_semaphore, VkSemaphore)