#include <valgrind.h>
#define VG(x) x
#else
-#define VG(x)
+#define VG(x) ((void)0)
#endif
#include "c11/threads.h"
-#include "compiler/shader_enums.h"
#include "main/macros.h"
#include "util/list.h"
#include "util/macros.h"
#include "vk_alloc.h"
#include "vk_debug_report.h"
+#include "wsi_common.h"
+
+#include "drm-uapi/msm_drm.h"
+#include "ir3/ir3_compiler.h"
+#include "ir3/ir3_shader.h"
+
+#include "adreno_common.xml.h"
+#include "adreno_pm4.xml.h"
+#include "a6xx.xml.h"
+#include "fdl/freedreno_layout.h"
-#include "drm/msm_drm.h"
#include "tu_descriptor_set.h"
#include "tu_extensions.h"
#define MAX_VERTEX_ATTRIBS 32
#define MAX_RTS 8
#define MAX_VSC_PIPES 32
-#define MAX_VIEWPORTS 16
+#define MAX_VIEWPORTS 1
#define MAX_SCISSORS 16
#define MAX_DISCARD_RECTANGLES 4
#define MAX_PUSH_CONSTANTS_SIZE 128
#define NUM_META_FS_KEYS 13
#define TU_MAX_DRM_DEVICES 8
#define MAX_VIEWS 8
+/* The Qualcomm driver exposes 0x20000058 */
+#define MAX_STORAGE_BUFFER_RANGE 0x20000000
+/* TODO: this isn't a hardware limit, but for a high # of attachments
+ * we are missing logic to avoid having them all in GMEM at the same time
+ */
+#define MAX_ATTACHMENTS 64
#define NUM_DEPTH_CLEAR_PIPELINES 3
*/
#define TU_BUFFER_OPS_CS_THRESHOLD 4096
+#define A6XX_TEX_CONST_DWORDS 16
+#define A6XX_TEX_SAMP_DWORDS 4
+
enum tu_mem_heap
{
TU_MEM_HEAP_VRAM,
memcpy((dest), (src), (count) * sizeof(*(src))); \
})
+#define COND(bool, val) ((bool) ? (val) : 0)
+
/* Whenever we generate an error, pass it through this function. Useful for
* debugging, where we can break on it. Only call at error site, not when
* propagating errors. Might be useful to plug in a stack trace here.
uint8_t device_uuid[VK_UUID_SIZE];
uint8_t cache_uuid[VK_UUID_SIZE];
+ struct wsi_device wsi_device;
+
int local_fd;
int master_fd;
enum tu_debug_flags
{
TU_DEBUG_STARTUP = 1 << 0,
+ TU_DEBUG_NIR = 1 << 1,
+ TU_DEBUG_IR3 = 1 << 2,
+ TU_DEBUG_NOBIN = 1 << 3,
};
struct tu_instance
struct tu_instance_extension_table enabled_extensions;
};
+VkResult
+tu_wsi_init(struct tu_physical_device *physical_device);
+void
+tu_wsi_finish(struct tu_physical_device *physical_device);
+
bool
tu_instance_extension_supported(const char *name);
uint32_t
#define TU_MAX_QUEUE_FAMILIES 1
+struct tu_fence
+{
+ struct wsi_fence *fence_wsi;
+ bool signaled;
+ int fd;
+};
+
+void
+tu_fence_init(struct tu_fence *fence, bool signaled);
+void
+tu_fence_finish(struct tu_fence *fence);
+void
+tu_fence_update_fd(struct tu_fence *fence, int fd);
+void
+tu_fence_copy(struct tu_fence *fence, const struct tu_fence *src);
+void
+tu_fence_signal(struct tu_fence *fence);
+void
+tu_fence_wait_idle(struct tu_fence *fence);
+
struct tu_queue
{
VK_LOADER_DATA _loader_data;
VkDeviceQueueCreateFlags flags;
uint32_t msm_queue_id;
- int submit_fence_fd;
+ struct tu_fence submit_fence;
};
struct tu_device
struct tu_physical_device *physical_device;
+ struct ir3_compiler *compiler;
+
/* Backup in-memory cache to be used if the app doesn't provide one */
struct tu_pipeline_cache *mem_cache;
VkResult
tu_bo_init_new(struct tu_device *dev, struct tu_bo *bo, uint64_t size);
+VkResult
+tu_bo_init_dmabuf(struct tu_device *dev,
+ struct tu_bo *bo,
+ uint64_t size,
+ int fd);
+int
+tu_bo_export_dmabuf(struct tu_device *dev, struct tu_bo *bo);
void
tu_bo_finish(struct tu_device *dev, struct tu_bo *bo);
VkResult
tu_bo_map(struct tu_device *dev, struct tu_bo *bo);
+struct tu_cs_entry
+{
+ /* No ownership */
+ const struct tu_bo *bo;
+
+ uint32_t size;
+ uint32_t offset;
+};
+
+struct ts_cs_memory {
+ uint32_t *map;
+ uint64_t iova;
+};
+
+enum tu_cs_mode
+{
+
+ /*
+ * A command stream in TU_CS_MODE_GROW mode grows automatically whenever it
+ * is full. tu_cs_begin must be called before command packet emission and
+ * tu_cs_end must be called after.
+ *
+ * This mode may create multiple entries internally. The entries must be
+ * submitted together.
+ */
+ TU_CS_MODE_GROW,
+
+ /*
+ * A command stream in TU_CS_MODE_EXTERNAL mode wraps an external,
+ * fixed-size buffer. tu_cs_begin and tu_cs_end are optional and have no
+ * effect on it.
+ *
+ * This mode does not create any entry or any BO.
+ */
+ TU_CS_MODE_EXTERNAL,
+
+ /*
+ * A command stream in TU_CS_MODE_SUB_STREAM mode does not support direct
+ * command packet emission. tu_cs_begin_sub_stream must be called to get a
+ * sub-stream to emit comamnd packets to. When done with the sub-stream,
+ * tu_cs_end_sub_stream must be called.
+ *
+ * This mode does not create any entry internally.
+ */
+ TU_CS_MODE_SUB_STREAM,
+};
+
+struct tu_cs
+{
+ uint32_t *start;
+ uint32_t *cur;
+ uint32_t *reserved_end;
+ uint32_t *end;
+
+ enum tu_cs_mode mode;
+ uint32_t next_bo_size;
+
+ struct tu_cs_entry *entries;
+ uint32_t entry_count;
+ uint32_t entry_capacity;
+
+ struct tu_bo **bos;
+ uint32_t bo_count;
+ uint32_t bo_capacity;
+};
+
struct tu_device_memory
{
struct tu_bo bo;
uint64_t va;
uint32_t *mapped_ptr;
struct tu_descriptor_range *dynamic_descriptors;
+
+ struct tu_bo *descriptors[0];
};
struct tu_push_descriptor_set
struct tu_descriptor_pool
{
- uint8_t *mapped_ptr;
+ struct tu_bo bo;
uint64_t current_offset;
uint64_t size;
VkBufferUsageFlags usage;
VkBufferCreateFlags flags;
+
+ struct tu_bo *bo;
+ VkDeviceSize bo_offset;
};
+static inline uint64_t
+tu_buffer_iova(struct tu_buffer *buffer)
+{
+ return buffer->bo->iova + buffer->bo_offset;
+}
+
enum tu_dynamic_state_bits
{
TU_DYNAMIC_VIEWPORT = 1 << 0,
const char *
tu_get_perftest_option_name(int id);
-/**
- * Attachment state when recording a renderpass instance.
- *
- * The clear value is valid only if there exists a pending clear.
- */
-struct tu_attachment_state
-{
- VkImageAspectFlags pending_clear_aspects;
- uint32_t cleared_views;
- VkClearValue clear_value;
- VkImageLayout current_layout;
-};
-
struct tu_descriptor_state
{
struct tu_descriptor_set *sets[MAX_SETS];
uint32_t valid;
struct tu_push_descriptor_set push_set;
bool push_dirty;
- uint32_t dynamic_buffers[4 * MAX_DYNAMIC_BUFFERS];
+ uint64_t dynamic_buffers[MAX_DYNAMIC_BUFFERS];
};
struct tu_tile
struct tu_tiling_config
{
VkRect2D render_area;
- uint32_t buffer_cpp[MAX_RTS + 2];
+ uint32_t buffer_cpp[MAX_ATTACHMENTS];
uint32_t buffer_count;
/* position and size of the first tile */
/* number of tiles */
VkExtent2D tile_count;
- uint32_t gmem_offsets[MAX_RTS + 2];
+ uint32_t gmem_offsets[MAX_ATTACHMENTS];
/* size of the first VSC pipe */
VkExtent2D pipe0;
uint32_t pipe_sizes[MAX_VSC_PIPES];
};
+enum tu_cmd_dirty_bits
+{
+ TU_CMD_DIRTY_PIPELINE = 1 << 0,
+ TU_CMD_DIRTY_COMPUTE_PIPELINE = 1 << 1,
+ TU_CMD_DIRTY_VERTEX_BUFFERS = 1 << 2,
+ TU_CMD_DIRTY_DESCRIPTOR_SETS = 1 << 3,
+ TU_CMD_DIRTY_PUSH_CONSTANTS = 1 << 4,
+
+ TU_CMD_DIRTY_DYNAMIC_LINE_WIDTH = 1 << 16,
+ TU_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK = 1 << 17,
+ TU_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK = 1 << 18,
+ TU_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE = 1 << 19,
+};
+
struct tu_cmd_state
{
- /* Vertex descriptors */
- uint64_t vb_va;
- unsigned vb_size;
+ uint32_t dirty;
+
+ struct tu_pipeline *pipeline;
+ struct tu_pipeline *compute_pipeline;
+
+ /* Vertex buffers */
+ struct
+ {
+ struct tu_buffer *buffers[MAX_VBS];
+ VkDeviceSize offsets[MAX_VBS];
+ } vb;
struct tu_dynamic_state dynamic;
const struct tu_render_pass *pass;
const struct tu_subpass *subpass;
const struct tu_framebuffer *framebuffer;
- struct tu_attachment_state *attachments;
struct tu_tiling_config tiling_config;
+
+ struct tu_cs_entry tile_load_ib;
+ struct tu_cs_entry tile_store_ib;
};
struct tu_cmd_pool
VkResult
tu_bo_list_merge(struct tu_bo_list *list, const struct tu_bo_list *other);
-struct tu_cs_entry
-{
- /* No ownership */
- const struct tu_bo *bo;
-
- uint32_t size;
- uint64_t offset;
-};
-
-struct tu_cs
-{
- uint32_t *start;
- uint32_t *cur;
- uint32_t *reserved_end;
- uint32_t *end;
-
- uint32_t next_bo_size;
-
- struct tu_cs_entry *entries;
- uint32_t entry_count;
- uint32_t entry_capacity;
-
- struct tu_bo **bos;
- uint32_t bo_count;
- uint32_t bo_capacity;
-};
-
struct tu_cmd_buffer
{
VK_LOADER_DATA _loader_data;
struct tu_vertex_binding vertex_bindings[MAX_VBS];
uint32_t queue_family_index;
- uint8_t push_constants[MAX_PUSH_CONSTANTS_SIZE];
+ uint32_t push_constants[MAX_PUSH_CONSTANTS_SIZE / 4];
VkShaderStageFlags push_constant_stages;
struct tu_descriptor_set meta_push_descriptors;
struct tu_bo_list bo_list;
struct tu_cs cs;
+ struct tu_cs draw_cs;
+ struct tu_cs draw_state;
+ struct tu_cs tile_cs;
uint16_t marker_reg;
uint32_t marker_seqno;
struct tu_bo scratch_bo;
uint32_t scratch_seqno;
+#define VSC_OVERFLOW 0x8
+#define VSC_SCRATCH 0x10
+
+ struct tu_bo vsc_data;
+ struct tu_bo vsc_data2;
+ uint32_t vsc_data_pitch;
+ uint32_t vsc_data2_pitch;
+ bool use_vsc_data;
- /* current cs; command packets are always emitted to it */
- struct tu_cs *cur_cs;
+ bool wait_for_idle;
};
+unsigned
+tu6_emit_event_write(struct tu_cmd_buffer *cmd,
+ struct tu_cs *cs,
+ enum vgt_event_type event,
+ bool need_seqno);
+
bool
tu_get_memory_fd(struct tu_device *device,
struct tu_device_memory *memory,
int *pFD);
+static inline struct tu_descriptor_state *
+tu_get_descriptors_state(struct tu_cmd_buffer *cmd_buffer,
+ VkPipelineBindPoint bind_point)
+{
+ return &cmd_buffer->descriptors[bind_point];
+}
+
/*
* Takes x,y,z as exact numbers of invocations, instead of blocks.
*
struct tu_event
{
- uint64_t *map;
+ struct tu_bo bo;
};
struct tu_shader_module;
struct tu_shader_module
{
- struct nir_shader *nir;
unsigned char sha1[20];
- uint32_t size;
- char data[0];
+
+ uint32_t code_size;
+ const uint32_t *code[0];
+};
+
+struct tu_shader_compile_options
+{
+ struct ir3_shader_key key;
+
+ bool optimize;
+ bool include_binning_pass;
+};
+
+struct tu_descriptor_map
+{
+ /* TODO: avoid fixed size array/justify the size */
+ unsigned num;
+ int set[64];
+ int binding[64];
+ int value[64];
+};
+
+struct tu_shader
+{
+ struct ir3_shader ir3_shader;
+
+ struct tu_descriptor_map texture_map;
+ struct tu_descriptor_map sampler_map;
+ struct tu_descriptor_map ubo_map;
+ struct tu_descriptor_map ssbo_map;
+
+ /* This may be true for vertex shaders. When true, variants[1] is the
+ * binning variant and binning_binary is non-NULL.
+ */
+ bool has_binning_pass;
+
+ void *binary;
+ void *binning_binary;
+
+ struct ir3_shader_variant variants[0];
+};
+
+struct tu_shader *
+tu_shader_create(struct tu_device *dev,
+ gl_shader_stage stage,
+ const VkPipelineShaderStageCreateInfo *stage_info,
+ const VkAllocationCallbacks *alloc);
+
+void
+tu_shader_destroy(struct tu_device *dev,
+ struct tu_shader *shader,
+ const VkAllocationCallbacks *alloc);
+
+void
+tu_shader_compile_options_init(
+ struct tu_shader_compile_options *options,
+ const VkGraphicsPipelineCreateInfo *pipeline_info);
+
+VkResult
+tu_shader_compile(struct tu_device *dev,
+ struct tu_shader *shader,
+ const struct tu_shader *next_stage,
+ const struct tu_shader_compile_options *options,
+ const VkAllocationCallbacks *alloc);
+
+struct tu_program_descriptor_linkage
+{
+ struct ir3_ubo_analysis_state ubo_state;
+ struct ir3_const_state const_state;
+
+ uint32_t constlen;
+
+ struct tu_descriptor_map texture_map;
+ struct tu_descriptor_map sampler_map;
+ struct tu_descriptor_map ubo_map;
+ struct tu_descriptor_map ssbo_map;
+ struct ir3_ibo_mapping image_mapping;
};
struct tu_pipeline
{
- struct tu_device *device;
+ struct tu_cs cs;
+
struct tu_dynamic_state dynamic_state;
struct tu_pipeline_layout *layout;
bool need_indirect_descriptor_sets;
VkShaderStageFlags active_stages;
+
+ struct
+ {
+ struct tu_bo binary_bo;
+ struct tu_cs_entry state_ib;
+ struct tu_cs_entry binning_state_ib;
+
+ struct tu_program_descriptor_linkage link[MESA_SHADER_STAGES];
+ } program;
+
+ struct
+ {
+ uint8_t bindings[MAX_VERTEX_ATTRIBS];
+ uint16_t strides[MAX_VERTEX_ATTRIBS];
+ uint16_t offsets[MAX_VERTEX_ATTRIBS];
+ uint32_t count;
+
+ uint8_t binning_bindings[MAX_VERTEX_ATTRIBS];
+ uint16_t binning_strides[MAX_VERTEX_ATTRIBS];
+ uint16_t binning_offsets[MAX_VERTEX_ATTRIBS];
+ uint32_t binning_count;
+
+ struct tu_cs_entry state_ib;
+ struct tu_cs_entry binning_state_ib;
+ } vi;
+
+ struct
+ {
+ enum pc_di_primtype primtype;
+ bool primitive_restart;
+ } ia;
+
+ struct
+ {
+ struct tu_cs_entry state_ib;
+ } vp;
+
+ struct
+ {
+ uint32_t gras_su_cntl;
+ struct tu_cs_entry state_ib;
+ } rast;
+
+ struct
+ {
+ struct tu_cs_entry state_ib;
+ } ds;
+
+ struct
+ {
+ struct tu_cs_entry state_ib;
+ } blend;
+
+ struct
+ {
+ uint32_t local_size[3];
+ } compute;
};
+void
+tu6_emit_viewport(struct tu_cs *cs, const VkViewport *viewport);
+
+void
+tu6_emit_scissor(struct tu_cs *cs, const VkRect2D *scissor);
+
+void
+tu6_emit_gras_su_cntl(struct tu_cs *cs,
+ uint32_t gras_su_cntl,
+ float line_width);
+
+void
+tu6_emit_depth_bias(struct tu_cs *cs,
+ float constant_factor,
+ float clamp,
+ float slope_factor);
+
+void
+tu6_emit_stencil_compare_mask(struct tu_cs *cs,
+ uint32_t front,
+ uint32_t back);
+
+void
+tu6_emit_stencil_write_mask(struct tu_cs *cs, uint32_t front, uint32_t back);
+
+void
+tu6_emit_stencil_reference(struct tu_cs *cs, uint32_t front, uint32_t back);
+
+void
+tu6_emit_blend_constants(struct tu_cs *cs, const float constants[4]);
+
struct tu_userdata_info *
tu_lookup_user_sgpr(struct tu_pipeline *pipeline,
gl_shader_stage stage,
uint32_t custom_blend_mode;
};
-VkResult
-tu_graphics_pipeline_create(
- VkDevice device,
- VkPipelineCache cache,
- const VkGraphicsPipelineCreateInfo *pCreateInfo,
- const struct tu_graphics_pipeline_create_info *extra,
- const VkAllocationCallbacks *alloc,
- VkPipeline *pPipeline);
-
-struct vk_format_description;
-uint32_t
-tu_translate_buffer_dataformat(const struct vk_format_description *desc,
- int first_non_void);
-uint32_t
-tu_translate_buffer_numformat(const struct vk_format_description *desc,
- int first_non_void);
-uint32_t
-tu_translate_colorformat(VkFormat format);
-uint32_t
-tu_translate_color_numformat(VkFormat format,
- const struct vk_format_description *desc,
- int first_non_void);
-uint32_t
-tu_colorformat_endian_swap(uint32_t colorformat);
-unsigned
-tu_translate_colorswap(VkFormat format, bool do_endian_swap);
-uint32_t
-tu_translate_dbformat(VkFormat format);
-uint32_t
-tu_translate_tex_dataformat(VkFormat format,
- const struct vk_format_description *desc,
- int first_non_void);
-uint32_t
-tu_translate_tex_numformat(VkFormat format,
- const struct vk_format_description *desc,
- int first_non_void);
-bool
-tu_format_pack_clear_color(VkFormat format,
- uint32_t clear_vals[2],
- VkClearColorValue *value);
-bool
-tu_is_colorbuffer_format_supported(VkFormat format, bool *blendable);
-bool
-tu_dcc_formats_compatible(VkFormat format1, VkFormat format2);
+struct tu_native_format
+{
+ int vtx; /* VFMTn_xxx or -1 */
+ int tex; /* TFMTn_xxx or -1 */
+ int rb; /* RBn_xxx or -1 */
+ int swap; /* enum a3xx_color_swap */
+ bool present; /* internal only; always true to external users */
+};
+
+const struct tu_native_format *
+tu6_get_native_format(VkFormat format);
+
+void
+tu_pack_clear_value(const VkClearValue *val,
+ VkFormat format,
+ uint32_t buf[4]);
+
+void
+tu_2d_clear_color(const VkClearColorValue *val, VkFormat format, uint32_t buf[4]);
+
+void
+tu_2d_clear_zs(const VkClearDepthStencilValue *val, VkFormat format, uint32_t buf[4]);
+
+enum a6xx_2d_ifmt tu6_rb_fmt_to_ifmt(enum a6xx_color_fmt fmt);
+enum a6xx_depth_format tu6_pipe2depth(VkFormat format);
struct tu_image_level
{
VkExtent3D extent;
uint32_t level_count;
uint32_t layer_count;
+ VkSampleCountFlagBits samples;
+
- VkDeviceSize size;
uint32_t alignment;
- /* memory layout */
- VkDeviceSize layer_size;
- struct tu_image_level levels[15];
- unsigned tile_mode;
+ struct fdl_layout layout;
unsigned queue_family_mask;
bool exclusive;
VkDeviceMemory owned_memory;
/* Set when bound */
- const struct tu_bo *bo;
+ struct tu_bo *bo;
VkDeviceSize bo_offset;
};
: range->levelCount;
}
+static inline VkDeviceSize
+tu_layer_size(struct tu_image *image, int level)
+{
+ return fdl_layer_stride(&image->layout, level);
+}
+
+static inline uint32_t
+tu_image_stride(struct tu_image *image, int level)
+{
+ return image->layout.slices[level].pitch * image->layout.cpp;
+}
+
+static inline uint64_t
+tu_image_base(struct tu_image *image, int level, int layer)
+{
+ return image->bo->iova + image->bo_offset +
+ fdl_surface_offset(&image->layout, level, layer);
+}
+
+static inline VkDeviceSize
+tu_image_ubwc_size(struct tu_image *image, int level)
+{
+ return image->layout.ubwc_size;
+}
+
+static inline uint32_t
+tu_image_ubwc_pitch(struct tu_image *image, int level)
+{
+ return image->layout.ubwc_slices[level].pitch;
+}
+
+static inline uint64_t
+tu_image_ubwc_base(struct tu_image *image, int level, int layer)
+{
+ return image->bo->iova + image->bo_offset +
+ image->layout.ubwc_slices[level].offset +
+ layer * tu_image_ubwc_size(image, level);
+}
+
+enum a6xx_tile_mode
+tu6_get_image_tile_mode(struct tu_image *image, int level);
+enum a3xx_msaa_samples
+tu_msaa_samples(uint32_t samples);
+
struct tu_image_view
{
struct tu_image *image; /**< VkImageViewCreateInfo::image */
uint32_t level_count;
VkExtent3D extent; /**< Extent of VkImageViewCreateInfo::baseMipLevel. */
- uint32_t descriptor[16];
+ uint32_t descriptor[A6XX_TEX_CONST_DWORDS];
/* Descriptor for use as a storage image as opposed to a sampled image.
* This has a few differences for cube maps (e.g. type).
*/
- uint32_t storage_descriptor[16];
+ uint32_t storage_descriptor[A6XX_TEX_CONST_DWORDS];
};
struct tu_sampler
{
-};
+ uint32_t state[A6XX_TEX_SAMP_DWORDS];
-struct tu_image_create_info
-{
- const VkImageCreateInfo *vk_info;
- bool scanout;
- bool no_metadata_planes;
+ bool needs_border;
+ VkBorderColor border;
};
VkResult
tu_image_create(VkDevice _device,
- const struct tu_image_create_info *info,
+ const VkImageCreateInfo *pCreateInfo,
const VkAllocationCallbacks *alloc,
- VkImage *pImage);
+ VkImage *pImage,
+ uint64_t modifier);
VkResult
tu_image_from_gralloc(VkDevice device_h,
struct tu_attachment_info attachments[0];
};
-struct tu_subpass_barrier
-{
- VkPipelineStageFlags src_stage_mask;
- VkAccessFlags src_access_mask;
- VkAccessFlags dst_access_mask;
-};
-
-void
-tu_subpass_barrier(struct tu_cmd_buffer *cmd_buffer,
- const struct tu_subpass_barrier *barrier);
-
struct tu_subpass_attachment
{
uint32_t attachment;
- VkImageLayout layout;
};
struct tu_subpass
struct tu_subpass_attachment *resolve_attachments;
struct tu_subpass_attachment depth_stencil_attachment;
- /** Subpass has at least one resolve attachment */
- bool has_resolve;
-
- struct tu_subpass_barrier start_barrier;
-
- uint32_t view_mask;
- VkSampleCountFlagBits max_sample_count;
+ VkSampleCountFlagBits samples;
};
struct tu_render_pass_attachment
{
VkFormat format;
- uint32_t samples;
+ uint32_t cpp;
VkAttachmentLoadOp load_op;
VkAttachmentLoadOp stencil_load_op;
- VkImageLayout initial_layout;
- VkImageLayout final_layout;
- uint32_t view_mask;
+ VkAttachmentStoreOp store_op;
+ VkAttachmentStoreOp stencil_store_op;
+ bool needs_gmem;
};
struct tu_render_pass
uint32_t subpass_count;
struct tu_subpass_attachment *subpass_attachments;
struct tu_render_pass_attachment *attachments;
- struct tu_subpass_barrier end_barrier;
struct tu_subpass subpasses[0];
};
struct tu_device *device,
struct tu_cmd_buffer *cmd_buffer,
struct tu_descriptor_set *set,
- VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
+ VkDescriptorUpdateTemplate descriptorUpdateTemplate,
const void *pData);
void
uint32_t descriptorWriteCount,
const VkWriteDescriptorSet *pDescriptorWrites);
-struct tu_fence
-{
- uint32_t syncobj;
- uint32_t temp_syncobj;
-};
-
int
tu_drm_get_gpu_id(const struct tu_physical_device *dev, uint32_t *id);
uint32_t
tu_gem_new(const struct tu_device *dev, uint64_t size, uint32_t flags);
+uint32_t
+tu_gem_import_dmabuf(const struct tu_device *dev,
+ int prime_fd,
+ uint64_t size);
+int
+tu_gem_export_dmabuf(const struct tu_device *dev, uint32_t gem_handle);
void
tu_gem_close(const struct tu_device *dev, uint32_t gem_handle);
uint64_t
TU_DEFINE_NONDISP_HANDLE_CASTS(tu_descriptor_set_layout,
VkDescriptorSetLayout)
TU_DEFINE_NONDISP_HANDLE_CASTS(tu_descriptor_update_template,
- VkDescriptorUpdateTemplateKHR)
+ VkDescriptorUpdateTemplate)
TU_DEFINE_NONDISP_HANDLE_CASTS(tu_device_memory, VkDeviceMemory)
TU_DEFINE_NONDISP_HANDLE_CASTS(tu_fence, VkFence)
TU_DEFINE_NONDISP_HANDLE_CASTS(tu_event, VkEvent)