turnip: share code between 3D blit/clear path and tu_pipeline
[mesa.git] / src / freedreno / vulkan / tu_private.h
index 9fa8763179da371ab743b35a6ea910a9fba370e0..1e1fdc1c61ab8b98ea9aa2f0b89bdf26f0ec3f39 100644 (file)
@@ -96,6 +96,7 @@ typedef uint32_t xcb_window_t;
 #define NUM_META_FS_KEYS 13
 #define TU_MAX_DRM_DEVICES 8
 #define MAX_VIEWS 8
+#define MAX_BIND_POINTS 2 /* compute + graphics */
 /* The Qualcomm driver exposes 0x20000058 */
 #define MAX_STORAGE_BUFFER_RANGE 0x20000000
 /* We use ldc for uniform buffer loads, just like the Qualcomm driver, so
@@ -324,11 +325,14 @@ struct tu_physical_device
    uint64_t gmem_base;
    uint32_t ccu_offset_gmem;
    uint32_t ccu_offset_bypass;
+   /* alignment for size of tiles */
+   uint32_t tile_align_w;
+#define TILE_ALIGN_H 16
+   /* gmem store/load granularity */
 #define GMEM_ALIGN_W 16
 #define GMEM_ALIGN_H 4
 
    struct {
-      uint32_t RB_UNKNOWN_8E04_blit;    /* for CP_BLIT's */
       uint32_t PC_UNKNOWN_9805;
       uint32_t SP_UNKNOWN_A0F8;
    } magic;
@@ -349,6 +353,7 @@ enum tu_debug_flags
    TU_DEBUG_NOBIN = 1 << 3,
    TU_DEBUG_SYSMEM = 1 << 4,
    TU_DEBUG_FORCEBIN = 1 << 5,
+   TU_DEBUG_NOUBWC = 1 << 6,
 };
 
 struct tu_instance
@@ -500,10 +505,21 @@ struct tu_device
    /* Backup in-memory cache to be used if the app doesn't provide one */
    struct tu_pipeline_cache *mem_cache;
 
-   struct tu_bo vsc_data;
-   struct tu_bo vsc_data2;
-   uint32_t vsc_data_pitch;
-   uint32_t vsc_data2_pitch;
+   struct tu_bo vsc_draw_strm;
+   struct tu_bo vsc_prim_strm;
+   uint32_t vsc_draw_strm_pitch;
+   uint32_t vsc_prim_strm_pitch;
+
+#define MIN_SCRATCH_BO_SIZE_LOG2 12 /* A page */
+
+   /* Currently the kernel driver uses a 32-bit GPU address space, but it
+    * should be impossible to go beyond 48 bits.
+    */
+   struct {
+      struct tu_bo bo;
+      mtx_t construct_mtx;
+      bool initialized;
+   } scratch_bos[48 - MIN_SCRATCH_BO_SIZE_LOG2];
 
    struct tu_bo border_color;
 
@@ -527,6 +543,15 @@ tu_bo_finish(struct tu_device *dev, struct tu_bo *bo);
 VkResult
 tu_bo_map(struct tu_device *dev, struct tu_bo *bo);
 
+/* Get a scratch bo for use inside a command buffer. This will always return
+ * the same bo given the same size or similar sizes, so only one scratch bo
+ * can be used at the same time. It's meant for short-lived things where we
+ * need to write to some piece of memory, read from it, and then immediately
+ * discard it.
+ */
+VkResult
+tu_get_scratch_bo(struct tu_device *dev, uint64_t size, struct tu_bo **bo);
+
 struct tu_cs_entry
 {
    /* No ownership */
@@ -690,7 +715,6 @@ struct tu_descriptor_update_template_entry
 struct tu_descriptor_update_template
 {
    uint32_t entry_count;
-   VkPipelineBindPoint bind_point;
    struct tu_descriptor_update_template_entry entry[0];
 };
 
@@ -723,7 +747,8 @@ enum tu_dynamic_state_bits
    TU_DYNAMIC_STENCIL_WRITE_MASK = 1 << 7,
    TU_DYNAMIC_STENCIL_REFERENCE = 1 << 8,
    TU_DYNAMIC_DISCARD_RECTANGLE = 1 << 9,
-   TU_DYNAMIC_ALL = (1 << 10) - 1,
+   TU_DYNAMIC_SAMPLE_LOCATIONS = 1 << 10,
+   TU_DYNAMIC_ALL = (1 << 11) - 1,
 };
 
 struct tu_vertex_binding
@@ -853,6 +878,7 @@ enum tu_cmd_dirty_bits
    TU_CMD_DIRTY_PIPELINE = 1 << 0,
    TU_CMD_DIRTY_COMPUTE_PIPELINE = 1 << 1,
    TU_CMD_DIRTY_VERTEX_BUFFERS = 1 << 2,
+
    TU_CMD_DIRTY_DESCRIPTOR_SETS = 1 << 3,
    TU_CMD_DIRTY_COMPUTE_DESCRIPTOR_SETS = 1 << 4,
    TU_CMD_DIRTY_PUSH_CONSTANTS = 1 << 5,
@@ -875,6 +901,116 @@ struct tu_streamout_state {
    uint32_t vpc_so_buf_cntl;
 };
 
+/* There are only three cache domains we have to care about: the CCU, or
+ * color cache unit, which is used for color and depth/stencil attachments
+ * and copy/blit destinations, and is split conceptually into color and depth,
+ * and the universal cache or UCHE which is used for pretty much everything
+ * else, except for the CP (uncached) and host. We need to flush whenever data
+ * crosses these boundaries.
+ */
+
+enum tu_cmd_access_mask {
+   TU_ACCESS_UCHE_READ = 1 << 0,
+   TU_ACCESS_UCHE_WRITE = 1 << 1,
+   TU_ACCESS_CCU_COLOR_READ = 1 << 2,
+   TU_ACCESS_CCU_COLOR_WRITE = 1 << 3,
+   TU_ACCESS_CCU_DEPTH_READ = 1 << 4,
+   TU_ACCESS_CCU_DEPTH_WRITE = 1 << 5,
+
+   /* Experiments have shown that while it's safe to avoid flushing the CCU
+    * after each blit/renderpass, it's not safe to assume that subsequent
+    * lookups with a different attachment state will hit unflushed cache
+    * entries. That is, the CCU needs to be flushed and possibly invalidated
+    * when accessing memory with a different attachment state. Writing to an
+    * attachment under the following conditions after clearing using the
+    * normal 2d engine path is known to have issues:
+    *
+    * - It isn't the 0'th layer.
+    * - There are more than one attachment, and this isn't the 0'th attachment
+    *   (this seems to also depend on the cpp of the attachments).
+    *
+    * Our best guess is that the layer/MRT state is used when computing
+    * the location of a cache entry in CCU, to avoid conflicts. We assume that
+    * any access in a renderpass after or before an access by a transfer needs
+    * a flush/invalidate, and use the _INCOHERENT variants to represent access
+    * by a transfer.
+    */
+   TU_ACCESS_CCU_COLOR_INCOHERENT_READ = 1 << 6,
+   TU_ACCESS_CCU_COLOR_INCOHERENT_WRITE = 1 << 7,
+   TU_ACCESS_CCU_DEPTH_INCOHERENT_READ = 1 << 8,
+   TU_ACCESS_CCU_DEPTH_INCOHERENT_WRITE = 1 << 9,
+
+   TU_ACCESS_SYSMEM_READ = 1 << 10,
+   TU_ACCESS_SYSMEM_WRITE = 1 << 11,
+
+   /* Set if a WFI is required due to data being read by the CP or the 2D
+    * engine.
+    */
+   TU_ACCESS_WFI_READ = 1 << 12,
+
+   TU_ACCESS_READ =
+      TU_ACCESS_UCHE_READ |
+      TU_ACCESS_CCU_COLOR_READ |
+      TU_ACCESS_CCU_DEPTH_READ |
+      TU_ACCESS_CCU_COLOR_INCOHERENT_READ |
+      TU_ACCESS_CCU_DEPTH_INCOHERENT_READ |
+      TU_ACCESS_SYSMEM_READ,
+
+   TU_ACCESS_WRITE =
+      TU_ACCESS_UCHE_WRITE |
+      TU_ACCESS_CCU_COLOR_WRITE |
+      TU_ACCESS_CCU_COLOR_INCOHERENT_WRITE |
+      TU_ACCESS_CCU_DEPTH_WRITE |
+      TU_ACCESS_CCU_DEPTH_INCOHERENT_WRITE |
+      TU_ACCESS_SYSMEM_WRITE,
+
+   TU_ACCESS_ALL =
+      TU_ACCESS_READ |
+      TU_ACCESS_WRITE,
+};
+
+enum tu_cmd_flush_bits {
+   TU_CMD_FLAG_CCU_FLUSH_DEPTH = 1 << 0,
+   TU_CMD_FLAG_CCU_FLUSH_COLOR = 1 << 1,
+   TU_CMD_FLAG_CCU_INVALIDATE_DEPTH = 1 << 2,
+   TU_CMD_FLAG_CCU_INVALIDATE_COLOR = 1 << 3,
+   TU_CMD_FLAG_CACHE_FLUSH = 1 << 4,
+   TU_CMD_FLAG_CACHE_INVALIDATE = 1 << 5,
+
+   TU_CMD_FLAG_ALL_FLUSH =
+      TU_CMD_FLAG_CCU_FLUSH_DEPTH |
+      TU_CMD_FLAG_CCU_FLUSH_COLOR |
+      TU_CMD_FLAG_CACHE_FLUSH,
+
+   TU_CMD_FLAG_ALL_INVALIDATE =
+      TU_CMD_FLAG_CCU_INVALIDATE_DEPTH |
+      TU_CMD_FLAG_CCU_INVALIDATE_COLOR |
+      TU_CMD_FLAG_CACHE_INVALIDATE,
+
+   TU_CMD_FLAG_WFI = 1 << 6,
+};
+
+/* Changing the CCU from sysmem mode to gmem mode or vice-versa is pretty
+ * heavy, involving a CCU cache flush/invalidate and a WFI in order to change
+ * which part of the gmem is used by the CCU. Here we keep track of what the
+ * state of the CCU.
+ */
+enum tu_cmd_ccu_state {
+   TU_CMD_CCU_SYSMEM,
+   TU_CMD_CCU_GMEM,
+   TU_CMD_CCU_UNKNOWN,
+};
+
+struct tu_cache_state {
+   /* Caches which must be made available (flushed) eventually if there are
+    * any users outside that cache domain, and caches which must be
+    * invalidated eventually if there are any reads.
+    */
+   enum tu_cmd_flush_bits pending_flush_bits;
+   /* Pending flushes */
+   enum tu_cmd_flush_bits flush_bits;
+};
+
 struct tu_cmd_state
 {
    uint32_t dirty;
@@ -909,6 +1045,17 @@ struct tu_cmd_state
    uint32_t max_index_count;
    uint64_t index_va;
 
+   /* Renderpasses are tricky, because we may need to flush differently if
+    * using sysmem vs. gmem and therefore we have to delay any flushing that
+    * happens before a renderpass. So we have to have two copies of the flush
+    * state, one for intra-renderpass flushes (i.e. renderpass dependencies)
+    * and one for outside a renderpass.
+    */
+   struct tu_cache_state cache;
+   struct tu_cache_state renderpass_cache;
+
+   enum tu_cmd_ccu_state ccu_state;
+
    const struct tu_render_pass *pass;
    const struct tu_subpass *subpass;
    const struct tu_framebuffer *framebuffer;
@@ -968,7 +1115,7 @@ tu_bo_list_merge(struct tu_bo_list *list, const struct tu_bo_list *other);
 /* This struct defines the layout of the scratch_bo */
 struct tu6_control
 {
-   uint32_t seqno;          /* seqno for async CP_EVENT_WRITE, etc */
+   uint32_t seqno_dummy;          /* dummy seqno for CP_EVENT_WRITE */
    uint32_t _pad0;
    volatile uint32_t vsc_overflow;
    uint32_t _pad1;
@@ -1002,13 +1149,14 @@ struct tu_cmd_buffer
 
    struct tu_cmd_state state;
    struct tu_vertex_binding vertex_bindings[MAX_VBS];
+   uint32_t vertex_bindings_set;
    uint32_t queue_family_index;
 
    uint32_t push_constants[MAX_PUSH_CONSTANTS_SIZE / 4];
    VkShaderStageFlags push_constant_stages;
    struct tu_descriptor_set meta_push_descriptors;
 
-   struct tu_descriptor_state descriptors[VK_PIPELINE_BIND_POINT_RANGE_SIZE];
+   struct tu_descriptor_state descriptors[MAX_BIND_POINTS];
 
    struct tu_cmd_buffer_upload upload;
 
@@ -1021,15 +1169,12 @@ struct tu_cmd_buffer
    struct tu_cs sub_cs;
 
    struct tu_bo scratch_bo;
-   uint32_t scratch_seqno;
 
-   struct tu_bo vsc_data;
-   struct tu_bo vsc_data2;
-   uint32_t vsc_data_pitch;
-   uint32_t vsc_data2_pitch;
+   struct tu_bo vsc_draw_strm;
+   struct tu_bo vsc_prim_strm;
+   uint32_t vsc_draw_strm_pitch;
+   uint32_t vsc_prim_strm_pitch;
    bool use_vsc_data;
-
-   bool wait_for_idle;
 };
 
 /* Temporary struct for tracking a register state to be written, used by
@@ -1045,11 +1190,14 @@ struct tu_reg_value {
    uint32_t bo_shift;
 };
 
-unsigned
+void tu_emit_cache_flush_ccu(struct tu_cmd_buffer *cmd_buffer,
+                             struct tu_cs *cs,
+                             enum tu_cmd_ccu_state ccu_state);
+
+void
 tu6_emit_event_write(struct tu_cmd_buffer *cmd,
                      struct tu_cs *cs,
-                     enum vgt_event_type event,
-                     bool need_seqno);
+                     enum vgt_event_type event);
 
 bool
 tu_get_memory_fd(struct tu_device *device,
@@ -1141,6 +1289,7 @@ struct tu_shader
 
    struct tu_push_constant_range push_consts;
    unsigned attachment_idx[MAX_RTS];
+   uint8_t active_desc_sets;
 
    /* This may be true for vertex shaders.  When true, variants[1] is the
     * binning variant and binning_binary is non-NULL.
@@ -1197,6 +1346,7 @@ struct tu_pipeline
 
    bool need_indirect_descriptor_sets;
    VkShaderStageFlags active_stages;
+   uint32_t active_desc_sets;
 
    struct tu_streamout_state streamout;
 
@@ -1217,14 +1367,9 @@ struct tu_pipeline
 
    struct
    {
-      uint8_t bindings[MAX_VERTEX_ATTRIBS];
-      uint32_t count;
-
-      uint8_t binning_bindings[MAX_VERTEX_ATTRIBS];
-      uint32_t binning_count;
-
       struct tu_cs_entry state_ib;
       struct tu_cs_entry binning_state_ib;
+      uint32_t bindings_used;
    } vi;
 
    struct
@@ -1266,6 +1411,9 @@ tu6_emit_viewport(struct tu_cs *cs, const VkViewport *viewport);
 void
 tu6_emit_scissor(struct tu_cs *cs, const VkRect2D *scissor);
 
+void
+tu6_emit_sample_locations(struct tu_cs *cs, const VkSampleLocationsInfoEXT *samp_loc);
+
 void
 tu6_emit_gras_su_cntl(struct tu_cs *cs,
                       uint32_t gras_su_cntl,
@@ -1297,6 +1445,22 @@ void tu6_emit_window_scissor(struct tu_cs *cs, uint32_t x1, uint32_t y1, uint32_
 
 void tu6_emit_window_offset(struct tu_cs *cs, uint32_t x1, uint32_t y1);
 
+void
+tu6_emit_xs_config(struct tu_cs *cs,
+                   gl_shader_stage stage,
+                   const struct ir3_shader_variant *xs,
+                   uint64_t binary_iova);
+
+void
+tu6_emit_vpc(struct tu_cs *cs,
+             const struct ir3_shader_variant *vs,
+             const struct ir3_shader_variant *gs,
+             const struct ir3_shader_variant *fs,
+             struct tu_streamout_state *tf);
+
+void
+tu6_emit_fs_inputs(struct tu_cs *cs, const struct ir3_shader_variant *fs);
+
 struct tu_image_view;
 
 void
@@ -1320,7 +1484,10 @@ tu_clear_gmem_attachment(struct tu_cmd_buffer *cmd,
                          const VkRenderPassBeginInfo *info);
 
 void
-tu_load_gmem_attachment(struct tu_cmd_buffer *cmd, struct tu_cs *cs, uint32_t a);
+tu_load_gmem_attachment(struct tu_cmd_buffer *cmd,
+                        struct tu_cs *cs,
+                        uint32_t a,
+                        bool force_load);
 
 /* expose this function to be able to emit load without checking LOAD_OP */
 void
@@ -1434,116 +1601,25 @@ tu_get_levelCount(const struct tu_image *image,
              : range->levelCount;
 }
 
-static inline VkDeviceSize
-tu_layer_size(struct tu_image *image, int level)
-{
-   return fdl_layer_stride(&image->layout, level);
-}
-
-static inline uint32_t
-tu_image_stride(struct tu_image *image, int level)
-{
-   return image->layout.slices[level].pitch * image->layout.cpp;
-}
-
-/* to get the right pitch for compressed formats */
-static inline uint32_t
-tu_image_pitch(struct tu_image *image, int level)
-{
-   uint32_t stride = tu_image_stride(image, level);
-   return stride / vk_format_get_blockwidth(image->vk_format);
-}
-
-static inline uint64_t
-tu_image_base(struct tu_image *image, int level, int layer)
-{
-   return image->bo->iova + image->bo_offset +
-      fdl_surface_offset(&image->layout, level, layer);
-}
-
-#define tu_image_base_ref(image, level, layer)                          \
-   .bo = image->bo,                                                     \
-   .bo_offset = (image->bo_offset + fdl_surface_offset(&image->layout,  \
-                                                       level, layer))
-
-#define tu_image_view_base_ref(iview)                                   \
-   tu_image_base_ref(iview->image, iview->base_mip, iview->base_layer)
-
-static inline VkDeviceSize
-tu_image_ubwc_size(struct tu_image *image, int level)
-{
-   return image->layout.ubwc_layer_size;
-}
-
-static inline uint32_t
-tu_image_ubwc_pitch(struct tu_image *image, int level)
-{
-   return image->layout.ubwc_slices[level].pitch;
-}
-
-static inline uint64_t
-tu_image_ubwc_surface_offset(struct tu_image *image, int level, int layer)
-{
-   return image->layout.ubwc_slices[level].offset +
-      layer * tu_image_ubwc_size(image, level);
-}
-
-static inline uint64_t
-tu_image_ubwc_base(struct tu_image *image, int level, int layer)
-{
-   return image->bo->iova + image->bo_offset +
-      tu_image_ubwc_surface_offset(image, level, layer);
-}
-
-#define tu_image_ubwc_base_ref(image, level, layer)                     \
-   .bo = image->bo,                                                     \
-   .bo_offset = (image->bo_offset + tu_image_ubwc_surface_offset(image, \
-                                                                 level, layer))
-
-#define tu_image_view_ubwc_base_ref(iview) \
-   tu_image_ubwc_base_ref(iview->image, iview->base_mip, iview->base_layer)
-
-#define tu_image_view_ubwc_pitches(iview)                                \
-   .pitch = tu_image_ubwc_pitch(iview->image, iview->base_mip),          \
-   .array_pitch = tu_image_ubwc_size(iview->image, iview->base_mip) >> 2
-
-enum a6xx_tile_mode
-tu6_get_image_tile_mode(struct tu_image *image, int level);
 enum a3xx_msaa_samples
 tu_msaa_samples(uint32_t samples);
 enum a6xx_tex_fetchsize
 tu6_fetchsize(VkFormat format);
 
-static inline struct tu_native_format
-tu6_format_image(struct tu_image *image, VkFormat format, uint32_t level)
-{
-   struct tu_native_format fmt =
-      tu6_format_color(format, image->layout.tile_mode);
-   fmt.tile_mode = tu6_get_image_tile_mode(image, level);
-   return fmt;
-}
-
-static inline struct tu_native_format
-tu6_format_image_src(struct tu_image *image, VkFormat format, uint32_t level)
-{
-   struct tu_native_format fmt =
-      tu6_format_texture(format, image->layout.tile_mode);
-   fmt.tile_mode = tu6_get_image_tile_mode(image, level);
-   return fmt;
-}
-
 struct tu_image_view
 {
    struct tu_image *image; /**< VkImageViewCreateInfo::image */
 
-   VkImageViewType type;
-   VkImageAspectFlags aspect_mask;
-   VkFormat vk_format;
-   uint32_t base_layer;
-   uint32_t layer_count;
-   uint32_t base_mip;
-   uint32_t level_count;
-   VkExtent3D extent; /**< Extent of VkImageViewCreateInfo::baseMipLevel. */
+   uint64_t base_addr;
+   uint64_t ubwc_addr;
+   uint32_t layer_size;
+   uint32_t ubwc_layer_size;
+
+   /* used to determine if fast gmem store path can be used */
+   VkExtent2D extent;
+   bool need_y2_align;
+
+   bool ubwc_enabled;
 
    uint32_t descriptor[A6XX_TEX_CONST_DWORDS];
 
@@ -1551,18 +1627,52 @@ struct tu_image_view
     * This has a few differences for cube maps (e.g. type).
     */
    uint32_t storage_descriptor[A6XX_TEX_CONST_DWORDS];
+
+   /* pre-filled register values */
+   uint32_t PITCH;
+   uint32_t FLAG_BUFFER_PITCH;
+
+   uint32_t RB_MRT_BUF_INFO;
+   uint32_t SP_FS_MRT_REG;
+
+   uint32_t SP_PS_2D_SRC_INFO;
+   uint32_t SP_PS_2D_SRC_SIZE;
+
+   uint32_t RB_2D_DST_INFO;
+
+   uint32_t RB_BLIT_DST_INFO;
+};
+
+struct tu_sampler_ycbcr_conversion {
+   VkFormat format;
+   VkSamplerYcbcrModelConversion ycbcr_model;
+   VkSamplerYcbcrRange ycbcr_range;
+   VkComponentMapping components;
+   VkChromaLocation chroma_offsets[2];
+   VkFilter chroma_filter;
 };
 
 struct tu_sampler {
    uint32_t descriptor[A6XX_TEX_SAMP_DWORDS];
+   struct tu_sampler_ycbcr_conversion *ycbcr_sampler;
 };
 
+void
+tu_cs_image_ref(struct tu_cs *cs, const struct tu_image_view *iview, uint32_t layer);
+
+void
+tu_cs_image_ref_2d(struct tu_cs *cs, const struct tu_image_view *iview, uint32_t layer, bool src);
+
+void
+tu_cs_image_flag_ref(struct tu_cs *cs, const struct tu_image_view *iview, uint32_t layer);
+
 VkResult
 tu_image_create(VkDevice _device,
                 const VkImageCreateInfo *pCreateInfo,
                 const VkAllocationCallbacks *alloc,
                 VkImage *pImage,
-                uint64_t modifier);
+                uint64_t modifier,
+                const VkSubresourceLayout *plane_layouts);
 
 VkResult
 tu_image_from_gralloc(VkDevice device_h,
@@ -1573,7 +1683,6 @@ tu_image_from_gralloc(VkDevice device_h,
 
 void
 tu_image_view_init(struct tu_image_view *view,
-                   struct tu_device *device,
                    const VkImageViewCreateInfo *pCreateInfo);
 
 struct tu_buffer_view
@@ -1634,9 +1743,17 @@ struct tu_framebuffer
    struct tu_attachment_info attachments[0];
 };
 
+struct tu_subpass_barrier {
+   VkPipelineStageFlags src_stage_mask;
+   VkAccessFlags src_access_mask;
+   VkAccessFlags dst_access_mask;
+   bool incoherent_ccu_color, incoherent_ccu_depth;
+};
+
 struct tu_subpass_attachment
 {
    uint32_t attachment;
+   VkImageLayout layout;
 };
 
 struct tu_subpass
@@ -1649,6 +1766,11 @@ struct tu_subpass
    struct tu_subpass_attachment depth_stencil_attachment;
 
    VkSampleCountFlagBits samples;
+   bool has_external_src, has_external_dst;
+
+   uint32_t srgb_cntl;
+
+   struct tu_subpass_barrier start_barrier;
 };
 
 struct tu_render_pass_attachment
@@ -1656,10 +1778,10 @@ struct tu_render_pass_attachment
    VkFormat format;
    uint32_t samples;
    uint32_t cpp;
-   VkAttachmentLoadOp load_op;
-   VkAttachmentLoadOp stencil_load_op;
-   VkAttachmentStoreOp store_op;
-   VkAttachmentStoreOp stencil_store_op;
+   VkImageAspectFlags clear_mask;
+   bool load;
+   bool store;
+   VkImageLayout initial_layout, final_layout;
    int32_t gmem_offset;
 };
 
@@ -1668,8 +1790,10 @@ struct tu_render_pass
    uint32_t attachment_count;
    uint32_t subpass_count;
    uint32_t gmem_pixels;
+   uint32_t tile_align_w;
    struct tu_subpass_attachment *subpass_attachments;
    struct tu_render_pass_attachment *attachments;
+   struct tu_subpass_barrier end_barrier;
    struct tu_subpass subpasses[0];
 };
 
@@ -1810,6 +1934,7 @@ TU_DEFINE_NONDISP_HANDLE_CASTS(tu_pipeline_layout, VkPipelineLayout)
 TU_DEFINE_NONDISP_HANDLE_CASTS(tu_query_pool, VkQueryPool)
 TU_DEFINE_NONDISP_HANDLE_CASTS(tu_render_pass, VkRenderPass)
 TU_DEFINE_NONDISP_HANDLE_CASTS(tu_sampler, VkSampler)
+TU_DEFINE_NONDISP_HANDLE_CASTS(tu_sampler_ycbcr_conversion, VkSamplerYcbcrConversion)
 TU_DEFINE_NONDISP_HANDLE_CASTS(tu_shader_module, VkShaderModule)
 TU_DEFINE_NONDISP_HANDLE_CASTS(tu_semaphore, VkSemaphore)