* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
*/
#ifndef TU_PRIVATE_H
#include <valgrind.h>
#define VG(x) x
#else
-#define VG(x)
+#define VG(x) ((void)0)
#endif
#include "c11/threads.h"
-#include "compiler/shader_enums.h"
#include "main/macros.h"
#include "util/list.h"
#include "util/macros.h"
#include "vk_alloc.h"
#include "vk_debug_report.h"
+#include "wsi_common.h"
+
+#include "drm-uapi/msm_drm.h"
+#include "ir3/ir3_compiler.h"
+#include "ir3/ir3_shader.h"
+
+#include "adreno_common.xml.h"
+#include "adreno_pm4.xml.h"
+#include "a6xx.xml.h"
+#include "fdl/freedreno_layout.h"
#include "tu_descriptor_set.h"
#include "tu_extensions.h"
#include <vulkan/vulkan.h>
#include <vulkan/vulkan_intel.h>
-#include "drm/freedreno_ringbuffer.h"
-
#include "tu_entrypoints.h"
#define MAX_VBS 32
#define MAX_VERTEX_ATTRIBS 32
#define MAX_RTS 8
-#define MAX_VIEWPORTS 16
+#define MAX_VSC_PIPES 32
+#define MAX_VIEWPORTS 1
#define MAX_SCISSORS 16
#define MAX_DISCARD_RECTANGLES 4
#define MAX_PUSH_CONSTANTS_SIZE 128
#define MAX_PUSH_DESCRIPTORS 32
#define MAX_DYNAMIC_UNIFORM_BUFFERS 16
#define MAX_DYNAMIC_STORAGE_BUFFERS 8
-#define MAX_DYNAMIC_BUFFERS \
+#define MAX_DYNAMIC_BUFFERS \
(MAX_DYNAMIC_UNIFORM_BUFFERS + MAX_DYNAMIC_STORAGE_BUFFERS)
#define MAX_SAMPLES_LOG2 4
#define NUM_META_FS_KEYS 13
#define TU_MAX_DRM_DEVICES 8
#define MAX_VIEWS 8
+/* The Qualcomm driver exposes 0x20000058 */
+#define MAX_STORAGE_BUFFER_RANGE 0x20000000
+/* TODO: this isn't a hardware limit, but for a high # of attachments
+ * we are missing logic to avoid having them all in GMEM at the same time
+ */
+#define MAX_ATTACHMENTS 64
#define NUM_DEPTH_CLEAR_PIPELINES 3
*/
#define TU_BUFFER_OPS_CS_THRESHOLD 4096
+#define A6XX_TEX_CONST_DWORDS 16
+#define A6XX_TEX_SAMP_DWORDS 4
+
enum tu_mem_heap
{
TU_MEM_HEAP_VRAM,
}
}
-#define for_each_bit(b, dword) \
- for (uint32_t __dword = (dword); (b) = __builtin_ffs(__dword) - 1, __dword; \
- __dword &= ~(1 << (b)))
+#define for_each_bit(b, dword) \
+ for (uint32_t __dword = (dword); \
+ (b) = __builtin_ffs(__dword) - 1, __dword; __dword &= ~(1 << (b)))
-#define typed_memcpy(dest, src, count) \
- ({ \
- STATIC_ASSERT(sizeof(*src) == sizeof(*dest)); \
- memcpy((dest), (src), (count) * sizeof(*(src))); \
+#define typed_memcpy(dest, src, count) \
+ ({ \
+ STATIC_ASSERT(sizeof(*src) == sizeof(*dest)); \
+ memcpy((dest), (src), (count) * sizeof(*(src))); \
})
+#define COND(bool, val) ((bool) ? (val) : 0)
+
/* Whenever we generate an error, pass it through this function. Useful for
* debugging, where we can break on it. Only call at error site, not when
* propagating errors. Might be useful to plug in a stack trace here.
const char *format,
...);
-#define vk_error(instance, error) \
+#define vk_error(instance, error) \
__vk_errorf(instance, error, __FILE__, __LINE__, NULL);
-#define vk_errorf(instance, error, format, ...) \
+#define vk_errorf(instance, error, format, ...) \
__vk_errorf(instance, error, __FILE__, __LINE__, format, ##__VA_ARGS__);
void
__tu_finishme(const char *file, int line, const char *format, ...)
- tu_printflike(3, 4);
+ tu_printflike(3, 4);
void
tu_loge(const char *format, ...) tu_printflike(1, 2);
void
/**
* Print a FINISHME message, including its source location.
*/
-#define tu_finishme(format, ...) \
- do { \
- static bool reported = false; \
- if (!reported) { \
- __tu_finishme(__FILE__, __LINE__, format, ##__VA_ARGS__); \
- reported = true; \
- } \
+#define tu_finishme(format, ...) \
+ do { \
+ static bool reported = false; \
+ if (!reported) { \
+ __tu_finishme(__FILE__, __LINE__, format, ##__VA_ARGS__); \
+ reported = true; \
+ } \
} while (0)
/* A non-fatal assert. Useful for debugging. */
#ifdef DEBUG
-#define tu_assert(x) \
- ({ \
- if (unlikely(!(x))) \
- fprintf(stderr, "%s:%d ASSERT: %s\n", __FILE__, __LINE__, #x); \
+#define tu_assert(x) \
+ ({ \
+ if (unlikely(!(x))) \
+ fprintf(stderr, "%s:%d ASSERT: %s\n", __FILE__, __LINE__, #x); \
})
#else
#define tu_assert(x)
/* Suppress -Wunused in stub functions */
#define tu_use_args(...) __tu_use_args(0, ##__VA_ARGS__)
-static inline void __tu_use_args(int ignore, ...) {}
+static inline void
+__tu_use_args(int ignore, ...)
+{
+}
-#define tu_stub() \
- do { \
- tu_finishme("stub %s", __func__); \
+#define tu_stub() \
+ do { \
+ tu_finishme("stub %s", __func__); \
} while (0)
void *
uint8_t device_uuid[VK_UUID_SIZE];
uint8_t cache_uuid[VK_UUID_SIZE];
+ struct wsi_device wsi_device;
+
int local_fd;
int master_fd;
- struct fd_device *drm_device;
unsigned gpu_id;
uint32_t gmem_size;
+ uint32_t tile_align_w;
+ uint32_t tile_align_h;
/* This is the drivers on-disk cache used as a fallback as opposed to
* the pipeline cache defined by apps.
enum tu_debug_flags
{
TU_DEBUG_STARTUP = 1 << 0,
+ TU_DEBUG_NIR = 1 << 1,
+ TU_DEBUG_IR3 = 1 << 2,
+ TU_DEBUG_NOBIN = 1 << 3,
};
struct tu_instance
struct tu_instance_extension_table enabled_extensions;
};
+VkResult
+tu_wsi_init(struct tu_physical_device *physical_device);
+void
+tu_wsi_finish(struct tu_physical_device *physical_device);
+
bool
tu_instance_extension_supported(const char *name);
uint32_t
#define TU_MAX_QUEUE_FAMILIES 1
+struct tu_fence
+{
+ struct wsi_fence *fence_wsi;
+ bool signaled;
+ int fd;
+};
+
+void
+tu_fence_init(struct tu_fence *fence, bool signaled);
+void
+tu_fence_finish(struct tu_fence *fence);
+void
+tu_fence_update_fd(struct tu_fence *fence, int fd);
+void
+tu_fence_copy(struct tu_fence *fence, const struct tu_fence *src);
+void
+tu_fence_signal(struct tu_fence *fence);
+void
+tu_fence_wait_idle(struct tu_fence *fence);
+
struct tu_queue
{
VK_LOADER_DATA _loader_data;
uint32_t queue_family_index;
int queue_idx;
VkDeviceQueueCreateFlags flags;
-};
-struct tu_bo_list
-{
- unsigned capacity;
- pthread_mutex_t mutex;
+ uint32_t msm_queue_id;
+ struct tu_fence submit_fence;
};
struct tu_device
VkAllocationCallbacks alloc;
struct tu_instance *instance;
- struct radeon_winsys *ws;
struct tu_meta_state meta_state;
struct tu_physical_device *physical_device;
+ struct ir3_compiler *compiler;
+
/* Backup in-memory cache to be used if the app doesn't provide one */
struct tu_pipeline_cache *mem_cache;
mtx_t shader_slab_mutex;
struct tu_device_extension_table enabled_extensions;
-
- /* Whether the driver uses a global BO list. */
- bool use_global_bo_list;
-
- struct tu_bo_list bo_list;
};
struct tu_bo
{
uint32_t gem_handle;
uint64_t size;
- uint64_t offset;
uint64_t iova;
void *map;
};
VkResult
tu_bo_init_new(struct tu_device *dev, struct tu_bo *bo, uint64_t size);
+VkResult
+tu_bo_init_dmabuf(struct tu_device *dev,
+ struct tu_bo *bo,
+ uint64_t size,
+ int fd);
+int
+tu_bo_export_dmabuf(struct tu_device *dev, struct tu_bo *bo);
void
tu_bo_finish(struct tu_device *dev, struct tu_bo *bo);
VkResult
tu_bo_map(struct tu_device *dev, struct tu_bo *bo);
+struct tu_cs_entry
+{
+ /* No ownership */
+ const struct tu_bo *bo;
+
+ uint32_t size;
+ uint32_t offset;
+};
+
+struct ts_cs_memory {
+ uint32_t *map;
+ uint64_t iova;
+};
+
+enum tu_cs_mode
+{
+
+ /*
+ * A command stream in TU_CS_MODE_GROW mode grows automatically whenever it
+ * is full. tu_cs_begin must be called before command packet emission and
+ * tu_cs_end must be called after.
+ *
+ * This mode may create multiple entries internally. The entries must be
+ * submitted together.
+ */
+ TU_CS_MODE_GROW,
+
+ /*
+ * A command stream in TU_CS_MODE_EXTERNAL mode wraps an external,
+ * fixed-size buffer. tu_cs_begin and tu_cs_end are optional and have no
+ * effect on it.
+ *
+ * This mode does not create any entry or any BO.
+ */
+ TU_CS_MODE_EXTERNAL,
+
+ /*
+ * A command stream in TU_CS_MODE_SUB_STREAM mode does not support direct
+ * command packet emission. tu_cs_begin_sub_stream must be called to get a
+ * sub-stream to emit comamnd packets to. When done with the sub-stream,
+ * tu_cs_end_sub_stream must be called.
+ *
+ * This mode does not create any entry internally.
+ */
+ TU_CS_MODE_SUB_STREAM,
+};
+
+struct tu_cs
+{
+ uint32_t *start;
+ uint32_t *cur;
+ uint32_t *reserved_end;
+ uint32_t *end;
+
+ enum tu_cs_mode mode;
+ uint32_t next_bo_size;
+
+ struct tu_cs_entry *entries;
+ uint32_t entry_count;
+ uint32_t entry_capacity;
+
+ struct tu_bo **bos;
+ uint32_t bo_count;
+ uint32_t bo_capacity;
+};
+
struct tu_device_memory
{
struct tu_bo bo;
const struct tu_descriptor_set_layout *layout;
uint32_t size;
- struct radeon_winsys_bo *bo;
uint64_t va;
uint32_t *mapped_ptr;
struct tu_descriptor_range *dynamic_descriptors;
+
+ struct tu_bo *descriptors[0];
};
struct tu_push_descriptor_set
struct tu_descriptor_pool
{
- struct radeon_winsys_bo *bo;
- uint8_t *mapped_ptr;
+ struct tu_bo bo;
uint64_t current_offset;
uint64_t size;
VkBufferUsageFlags usage;
VkBufferCreateFlags flags;
+
+ struct tu_bo *bo;
+ VkDeviceSize bo_offset;
};
+static inline uint64_t
+tu_buffer_iova(struct tu_buffer *buffer)
+{
+ return buffer->bo->iova + buffer->bo_offset;
+}
+
enum tu_dynamic_state_bits
{
TU_DYNAMIC_VIEWPORT = 1 << 0,
const char *
tu_get_perftest_option_name(int id);
-/**
- * Attachment state when recording a renderpass instance.
- *
- * The clear value is valid only if there exists a pending clear.
- */
-struct tu_attachment_state
-{
- VkImageAspectFlags pending_clear_aspects;
- uint32_t cleared_views;
- VkClearValue clear_value;
- VkImageLayout current_layout;
-};
-
struct tu_descriptor_state
{
struct tu_descriptor_set *sets[MAX_SETS];
uint32_t valid;
struct tu_push_descriptor_set push_set;
bool push_dirty;
- uint32_t dynamic_buffers[4 * MAX_DYNAMIC_BUFFERS];
+ uint64_t dynamic_buffers[MAX_DYNAMIC_BUFFERS];
+};
+
+struct tu_tile
+{
+ uint8_t pipe;
+ uint8_t slot;
+ VkOffset2D begin;
+ VkOffset2D end;
+};
+
+struct tu_tiling_config
+{
+ VkRect2D render_area;
+ uint32_t buffer_cpp[MAX_ATTACHMENTS];
+ uint32_t buffer_count;
+
+ /* position and size of the first tile */
+ VkRect2D tile0;
+ /* number of tiles */
+ VkExtent2D tile_count;
+
+ uint32_t gmem_offsets[MAX_ATTACHMENTS];
+
+ /* size of the first VSC pipe */
+ VkExtent2D pipe0;
+ /* number of VSC pipes */
+ VkExtent2D pipe_count;
+
+ /* pipe register values */
+ uint32_t pipe_config[MAX_VSC_PIPES];
+ uint32_t pipe_sizes[MAX_VSC_PIPES];
+};
+
+enum tu_cmd_dirty_bits
+{
+ TU_CMD_DIRTY_PIPELINE = 1 << 0,
+ TU_CMD_DIRTY_COMPUTE_PIPELINE = 1 << 1,
+ TU_CMD_DIRTY_VERTEX_BUFFERS = 1 << 2,
+ TU_CMD_DIRTY_DESCRIPTOR_SETS = 1 << 3,
+ TU_CMD_DIRTY_PUSH_CONSTANTS = 1 << 4,
+
+ TU_CMD_DIRTY_DYNAMIC_LINE_WIDTH = 1 << 16,
+ TU_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK = 1 << 17,
+ TU_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK = 1 << 18,
+ TU_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE = 1 << 19,
};
struct tu_cmd_state
{
- /* Vertex descriptors */
- uint64_t vb_va;
- unsigned vb_size;
+ uint32_t dirty;
+
+ struct tu_pipeline *pipeline;
+ struct tu_pipeline *compute_pipeline;
+
+ /* Vertex buffers */
+ struct
+ {
+ struct tu_buffer *buffers[MAX_VBS];
+ VkDeviceSize offsets[MAX_VBS];
+ } vb;
struct tu_dynamic_state dynamic;
uint32_t index_type;
uint32_t max_index_count;
uint64_t index_va;
+
+ const struct tu_render_pass *pass;
+ const struct tu_subpass *subpass;
+ const struct tu_framebuffer *framebuffer;
+
+ struct tu_tiling_config tiling_config;
+
+ struct tu_cs_entry tile_load_ib;
+ struct tu_cs_entry tile_store_ib;
};
struct tu_cmd_pool
uint8_t *map;
unsigned offset;
uint64_t size;
- struct radeon_winsys_bo *upload_bo;
struct list_head list;
};
TU_CMD_BUFFER_STATUS_PENDING,
};
+struct tu_bo_list
+{
+ uint32_t count;
+ uint32_t capacity;
+ struct drm_msm_gem_submit_bo *bo_infos;
+};
+
+#define TU_BO_LIST_FAILED (~0)
+
+void
+tu_bo_list_init(struct tu_bo_list *list);
+void
+tu_bo_list_destroy(struct tu_bo_list *list);
+void
+tu_bo_list_reset(struct tu_bo_list *list);
+uint32_t
+tu_bo_list_add(struct tu_bo_list *list,
+ const struct tu_bo *bo,
+ uint32_t flags);
+VkResult
+tu_bo_list_merge(struct tu_bo_list *list, const struct tu_bo_list *other);
+
struct tu_cmd_buffer
{
VK_LOADER_DATA _loader_data;
VkCommandBufferUsageFlags usage_flags;
VkCommandBufferLevel level;
enum tu_cmd_buffer_status status;
- struct radeon_cmdbuf *cs;
+
struct tu_cmd_state state;
struct tu_vertex_binding vertex_bindings[MAX_VBS];
uint32_t queue_family_index;
- uint8_t push_constants[MAX_PUSH_CONSTANTS_SIZE];
+ uint32_t push_constants[MAX_PUSH_CONSTANTS_SIZE / 4];
VkShaderStageFlags push_constant_stages;
struct tu_descriptor_set meta_push_descriptors;
struct tu_cmd_buffer_upload upload;
- uint32_t scratch_size_needed;
- uint32_t compute_scratch_size_needed;
- uint32_t esgs_ring_size_needed;
- uint32_t gsvs_ring_size_needed;
- bool tess_rings_needed;
- bool sample_positions_needed;
-
VkResult record_result;
- uint32_t gfx9_fence_offset;
- struct radeon_winsys_bo *gfx9_fence_bo;
- uint32_t gfx9_fence_idx;
- uint64_t gfx9_eop_bug_va;
-
- /**
- * Whether a query pool has been resetted and we have to flush caches.
- */
- bool pending_reset_query;
+ struct tu_bo_list bo_list;
+ struct tu_cs cs;
+ struct tu_cs draw_cs;
+ struct tu_cs draw_state;
+ struct tu_cs tile_cs;
+
+ uint16_t marker_reg;
+ uint32_t marker_seqno;
+
+ struct tu_bo scratch_bo;
+ uint32_t scratch_seqno;
+#define VSC_OVERFLOW 0x8
+#define VSC_SCRATCH 0x10
+
+ struct tu_bo vsc_data;
+ struct tu_bo vsc_data2;
+ uint32_t vsc_data_pitch;
+ uint32_t vsc_data2_pitch;
+ bool use_vsc_data;
+
+ bool wait_for_idle;
};
+unsigned
+tu6_emit_event_write(struct tu_cmd_buffer *cmd,
+ struct tu_cs *cs,
+ enum vgt_event_type event,
+ bool need_seqno);
+
bool
tu_get_memory_fd(struct tu_device *device,
struct tu_device_memory *memory,
int *pFD);
+static inline struct tu_descriptor_state *
+tu_get_descriptors_state(struct tu_cmd_buffer *cmd_buffer,
+ VkPipelineBindPoint bind_point)
+{
+ return &cmd_buffer->descriptors[bind_point];
+}
+
/*
* Takes x,y,z as exact numbers of invocations, instead of blocks.
*
struct tu_event
{
- struct radeon_winsys_bo *bo;
- uint64_t *map;
+ struct tu_bo bo;
};
struct tu_shader_module;
#define TU_STAGE_MASK ((1 << MESA_SHADER_STAGES) - 1)
-#define tu_foreach_stage(stage, stage_bits) \
- for (gl_shader_stage stage, \
- __tmp = (gl_shader_stage)((stage_bits)&TU_STAGE_MASK); \
- stage = __builtin_ffs(__tmp) - 1, __tmp; \
- __tmp &= ~(1 << (stage)))
+#define tu_foreach_stage(stage, stage_bits) \
+ for (gl_shader_stage stage, \
+ __tmp = (gl_shader_stage)((stage_bits) &TU_STAGE_MASK); \
+ stage = __builtin_ffs(__tmp) - 1, __tmp; __tmp &= ~(1 << (stage)))
struct tu_shader_module
{
- struct nir_shader *nir;
unsigned char sha1[20];
- uint32_t size;
- char data[0];
+
+ uint32_t code_size;
+ const uint32_t *code[0];
+};
+
+struct tu_shader_compile_options
+{
+ struct ir3_shader_key key;
+
+ bool optimize;
+ bool include_binning_pass;
+};
+
+struct tu_descriptor_map
+{
+ /* TODO: avoid fixed size array/justify the size */
+ unsigned num;
+ int set[64];
+ int binding[64];
+ int value[64];
+};
+
+struct tu_shader
+{
+ struct ir3_shader ir3_shader;
+
+ struct tu_descriptor_map texture_map;
+ struct tu_descriptor_map sampler_map;
+ struct tu_descriptor_map ubo_map;
+ struct tu_descriptor_map ssbo_map;
+
+ /* This may be true for vertex shaders. When true, variants[1] is the
+ * binning variant and binning_binary is non-NULL.
+ */
+ bool has_binning_pass;
+
+ void *binary;
+ void *binning_binary;
+
+ struct ir3_shader_variant variants[0];
+};
+
+struct tu_shader *
+tu_shader_create(struct tu_device *dev,
+ gl_shader_stage stage,
+ const VkPipelineShaderStageCreateInfo *stage_info,
+ const VkAllocationCallbacks *alloc);
+
+void
+tu_shader_destroy(struct tu_device *dev,
+ struct tu_shader *shader,
+ const VkAllocationCallbacks *alloc);
+
+void
+tu_shader_compile_options_init(
+ struct tu_shader_compile_options *options,
+ const VkGraphicsPipelineCreateInfo *pipeline_info);
+
+VkResult
+tu_shader_compile(struct tu_device *dev,
+ struct tu_shader *shader,
+ const struct tu_shader *next_stage,
+ const struct tu_shader_compile_options *options,
+ const VkAllocationCallbacks *alloc);
+
+struct tu_program_descriptor_linkage
+{
+ struct ir3_ubo_analysis_state ubo_state;
+ struct ir3_const_state const_state;
+
+ uint32_t constlen;
+
+ struct tu_descriptor_map texture_map;
+ struct tu_descriptor_map sampler_map;
+ struct tu_descriptor_map ubo_map;
+ struct tu_descriptor_map ssbo_map;
+ struct ir3_ibo_mapping image_mapping;
};
struct tu_pipeline
{
- struct tu_device *device;
+ struct tu_cs cs;
+
struct tu_dynamic_state dynamic_state;
struct tu_pipeline_layout *layout;
bool need_indirect_descriptor_sets;
VkShaderStageFlags active_stages;
+
+ struct
+ {
+ struct tu_bo binary_bo;
+ struct tu_cs_entry state_ib;
+ struct tu_cs_entry binning_state_ib;
+
+ struct tu_program_descriptor_linkage link[MESA_SHADER_STAGES];
+ } program;
+
+ struct
+ {
+ uint8_t bindings[MAX_VERTEX_ATTRIBS];
+ uint16_t strides[MAX_VERTEX_ATTRIBS];
+ uint16_t offsets[MAX_VERTEX_ATTRIBS];
+ uint32_t count;
+
+ uint8_t binning_bindings[MAX_VERTEX_ATTRIBS];
+ uint16_t binning_strides[MAX_VERTEX_ATTRIBS];
+ uint16_t binning_offsets[MAX_VERTEX_ATTRIBS];
+ uint32_t binning_count;
+
+ struct tu_cs_entry state_ib;
+ struct tu_cs_entry binning_state_ib;
+ } vi;
+
+ struct
+ {
+ enum pc_di_primtype primtype;
+ bool primitive_restart;
+ } ia;
+
+ struct
+ {
+ struct tu_cs_entry state_ib;
+ } vp;
+
+ struct
+ {
+ uint32_t gras_su_cntl;
+ struct tu_cs_entry state_ib;
+ } rast;
+
+ struct
+ {
+ struct tu_cs_entry state_ib;
+ } ds;
+
+ struct
+ {
+ struct tu_cs_entry state_ib;
+ } blend;
+
+ struct
+ {
+ uint32_t local_size[3];
+ } compute;
};
+void
+tu6_emit_viewport(struct tu_cs *cs, const VkViewport *viewport);
+
+void
+tu6_emit_scissor(struct tu_cs *cs, const VkRect2D *scissor);
+
+void
+tu6_emit_gras_su_cntl(struct tu_cs *cs,
+ uint32_t gras_su_cntl,
+ float line_width);
+
+void
+tu6_emit_depth_bias(struct tu_cs *cs,
+ float constant_factor,
+ float clamp,
+ float slope_factor);
+
+void
+tu6_emit_stencil_compare_mask(struct tu_cs *cs,
+ uint32_t front,
+ uint32_t back);
+
+void
+tu6_emit_stencil_write_mask(struct tu_cs *cs, uint32_t front, uint32_t back);
+
+void
+tu6_emit_stencil_reference(struct tu_cs *cs, uint32_t front, uint32_t back);
+
+void
+tu6_emit_blend_constants(struct tu_cs *cs, const float constants[4]);
+
struct tu_userdata_info *
tu_lookup_user_sgpr(struct tu_pipeline *pipeline,
gl_shader_stage stage,
uint32_t custom_blend_mode;
};
-VkResult
-tu_graphics_pipeline_create(
- VkDevice device,
- VkPipelineCache cache,
- const VkGraphicsPipelineCreateInfo *pCreateInfo,
- const struct tu_graphics_pipeline_create_info *extra,
- const VkAllocationCallbacks *alloc,
- VkPipeline *pPipeline);
-
-struct vk_format_description;
-uint32_t
-tu_translate_buffer_dataformat(const struct vk_format_description *desc,
- int first_non_void);
-uint32_t
-tu_translate_buffer_numformat(const struct vk_format_description *desc,
- int first_non_void);
-uint32_t
-tu_translate_colorformat(VkFormat format);
-uint32_t
-tu_translate_color_numformat(VkFormat format,
- const struct vk_format_description *desc,
- int first_non_void);
-uint32_t
-tu_colorformat_endian_swap(uint32_t colorformat);
-unsigned
-tu_translate_colorswap(VkFormat format, bool do_endian_swap);
-uint32_t
-tu_translate_dbformat(VkFormat format);
-uint32_t
-tu_translate_tex_dataformat(VkFormat format,
- const struct vk_format_description *desc,
- int first_non_void);
-uint32_t
-tu_translate_tex_numformat(VkFormat format,
- const struct vk_format_description *desc,
- int first_non_void);
-bool
-tu_format_pack_clear_color(VkFormat format,
- uint32_t clear_vals[2],
- VkClearColorValue *value);
-bool
-tu_is_colorbuffer_format_supported(VkFormat format, bool *blendable);
-bool
-tu_dcc_formats_compatible(VkFormat format1, VkFormat format2);
+struct tu_native_format
+{
+ int vtx; /* VFMTn_xxx or -1 */
+ int tex; /* TFMTn_xxx or -1 */
+ int rb; /* RBn_xxx or -1 */
+ int swap; /* enum a3xx_color_swap */
+ bool present; /* internal only; always true to external users */
+};
+const struct tu_native_format *
+tu6_get_native_format(VkFormat format);
+
+void
+tu_pack_clear_value(const VkClearValue *val,
+ VkFormat format,
+ uint32_t buf[4]);
-struct tu_image_level {
- VkDeviceSize offset;
- VkDeviceSize size;
- uint32_t pitch;
+void
+tu_2d_clear_color(const VkClearColorValue *val, VkFormat format, uint32_t buf[4]);
+
+void
+tu_2d_clear_zs(const VkClearDepthStencilValue *val, VkFormat format, uint32_t buf[4]);
+
+enum a6xx_2d_ifmt tu6_rb_fmt_to_ifmt(enum a6xx_color_fmt fmt);
+enum a6xx_depth_format tu6_pipe2depth(VkFormat format);
+
+struct tu_image_level
+{
+ VkDeviceSize offset;
+ VkDeviceSize size;
+ uint32_t pitch;
};
struct tu_image
VkImageTiling tiling; /** VkImageCreateInfo::tiling */
VkImageCreateFlags flags; /** VkImageCreateInfo::flags */
VkExtent3D extent;
+ uint32_t level_count;
+ uint32_t layer_count;
+ VkSampleCountFlagBits samples;
+
- VkDeviceSize size;
uint32_t alignment;
- /* memory layout */
- VkDeviceSize layer_size;
- struct tu_image_level levels[15];
- unsigned tile_mode;
+ struct fdl_layout layout;
unsigned queue_family_mask;
bool exclusive;
/* For VK_ANDROID_native_buffer, the WSI image owns the memory, */
VkDeviceMemory owned_memory;
+
+ /* Set when bound */
+ struct tu_bo *bo;
+ VkDeviceSize bo_offset;
};
unsigned
tu_get_layerCount(const struct tu_image *image,
const VkImageSubresourceRange *range)
{
- abort();
+ return range->layerCount == VK_REMAINING_ARRAY_LAYERS
+ ? image->layer_count - range->baseArrayLayer
+ : range->layerCount;
}
static inline uint32_t
tu_get_levelCount(const struct tu_image *image,
const VkImageSubresourceRange *range)
{
- abort();
+ return range->levelCount == VK_REMAINING_MIP_LEVELS
+ ? image->level_count - range->baseMipLevel
+ : range->levelCount;
+}
+
+static inline VkDeviceSize
+tu_layer_size(struct tu_image *image, int level)
+{
+ return fdl_layer_stride(&image->layout, level);
+}
+
+static inline uint32_t
+tu_image_stride(struct tu_image *image, int level)
+{
+ return image->layout.slices[level].pitch * image->layout.cpp;
+}
+
+static inline uint64_t
+tu_image_base(struct tu_image *image, int level, int layer)
+{
+ return image->bo->iova + image->bo_offset +
+ fdl_surface_offset(&image->layout, level, layer);
}
+static inline VkDeviceSize
+tu_image_ubwc_size(struct tu_image *image, int level)
+{
+ return image->layout.ubwc_size;
+}
+
+static inline uint32_t
+tu_image_ubwc_pitch(struct tu_image *image, int level)
+{
+ return image->layout.ubwc_slices[level].pitch;
+}
+
+static inline uint64_t
+tu_image_ubwc_base(struct tu_image *image, int level, int layer)
+{
+ return image->bo->iova + image->bo_offset +
+ image->layout.ubwc_slices[level].offset +
+ layer * tu_image_ubwc_size(image, level);
+}
+
+enum a6xx_tile_mode
+tu6_get_image_tile_mode(struct tu_image *image, int level);
+enum a3xx_msaa_samples
+tu_msaa_samples(uint32_t samples);
+
struct tu_image_view
{
struct tu_image *image; /**< VkImageViewCreateInfo::image */
uint32_t level_count;
VkExtent3D extent; /**< Extent of VkImageViewCreateInfo::baseMipLevel. */
- uint32_t descriptor[16];
+ uint32_t descriptor[A6XX_TEX_CONST_DWORDS];
/* Descriptor for use as a storage image as opposed to a sampled image.
* This has a few differences for cube maps (e.g. type).
*/
- uint32_t storage_descriptor[16];
+ uint32_t storage_descriptor[A6XX_TEX_CONST_DWORDS];
};
struct tu_sampler
{
-};
+ uint32_t state[A6XX_TEX_SAMP_DWORDS];
-struct tu_image_create_info
-{
- const VkImageCreateInfo *vk_info;
- bool scanout;
- bool no_metadata_planes;
+ bool needs_border;
+ VkBorderColor border;
};
VkResult
tu_image_create(VkDevice _device,
- const struct tu_image_create_info *info,
+ const VkImageCreateInfo *pCreateInfo,
const VkAllocationCallbacks *alloc,
- VkImage *pImage);
+ VkImage *pImage,
+ uint64_t modifier);
VkResult
tu_image_from_gralloc(VkDevice device_h,
struct tu_buffer_view
{
- struct radeon_winsys_bo *bo;
VkFormat vk_format;
uint64_t range; /**< VkBufferViewCreateInfo::range */
uint32_t state[4];
const struct VkExtent3D imageExtent)
{
switch (imageType) {
- case VK_IMAGE_TYPE_1D:
- return (VkExtent3D){ imageExtent.width, 1, 1 };
- case VK_IMAGE_TYPE_2D:
- return (VkExtent3D){ imageExtent.width, imageExtent.height, 1 };
- case VK_IMAGE_TYPE_3D:
- return imageExtent;
- default:
- unreachable("invalid image type");
+ case VK_IMAGE_TYPE_1D:
+ return (VkExtent3D) { imageExtent.width, 1, 1 };
+ case VK_IMAGE_TYPE_2D:
+ return (VkExtent3D) { imageExtent.width, imageExtent.height, 1 };
+ case VK_IMAGE_TYPE_3D:
+ return imageExtent;
+ default:
+ unreachable("invalid image type");
}
}
const struct VkOffset3D imageOffset)
{
switch (imageType) {
- case VK_IMAGE_TYPE_1D:
- return (VkOffset3D){ imageOffset.x, 0, 0 };
- case VK_IMAGE_TYPE_2D:
- return (VkOffset3D){ imageOffset.x, imageOffset.y, 0 };
- case VK_IMAGE_TYPE_3D:
- return imageOffset;
- default:
- unreachable("invalid image type");
+ case VK_IMAGE_TYPE_1D:
+ return (VkOffset3D) { imageOffset.x, 0, 0 };
+ case VK_IMAGE_TYPE_2D:
+ return (VkOffset3D) { imageOffset.x, imageOffset.y, 0 };
+ case VK_IMAGE_TYPE_3D:
+ return imageOffset;
+ default:
+ unreachable("invalid image type");
}
}
struct tu_attachment_info attachments[0];
};
-struct tu_subpass_barrier
-{
- VkPipelineStageFlags src_stage_mask;
- VkAccessFlags src_access_mask;
- VkAccessFlags dst_access_mask;
-};
-
-void
-tu_subpass_barrier(struct tu_cmd_buffer *cmd_buffer,
- const struct tu_subpass_barrier *barrier);
-
struct tu_subpass_attachment
{
uint32_t attachment;
- VkImageLayout layout;
};
struct tu_subpass
struct tu_subpass_attachment *resolve_attachments;
struct tu_subpass_attachment depth_stencil_attachment;
- /** Subpass has at least one resolve attachment */
- bool has_resolve;
-
- struct tu_subpass_barrier start_barrier;
-
- uint32_t view_mask;
- VkSampleCountFlagBits max_sample_count;
+ VkSampleCountFlagBits samples;
};
struct tu_render_pass_attachment
{
VkFormat format;
- uint32_t samples;
+ uint32_t cpp;
VkAttachmentLoadOp load_op;
VkAttachmentLoadOp stencil_load_op;
- VkImageLayout initial_layout;
- VkImageLayout final_layout;
- uint32_t view_mask;
+ VkAttachmentStoreOp store_op;
+ VkAttachmentStoreOp stencil_store_op;
+ bool needs_gmem;
};
struct tu_render_pass
uint32_t subpass_count;
struct tu_subpass_attachment *subpass_attachments;
struct tu_render_pass_attachment *attachments;
- struct tu_subpass_barrier end_barrier;
struct tu_subpass subpasses[0];
};
struct tu_query_pool
{
- struct radeon_winsys_bo *bo;
uint32_t stride;
uint32_t availability_offset;
uint64_t size;
struct tu_semaphore
{
- /* use a winsys sem for non-exportable */
- struct radeon_winsys_sem *sem;
uint32_t syncobj;
uint32_t temp_syncobj;
};
struct tu_device *device,
struct tu_cmd_buffer *cmd_buffer,
struct tu_descriptor_set *set,
- VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
+ VkDescriptorUpdateTemplate descriptorUpdateTemplate,
const void *pData);
void
uint32_t descriptorWriteCount,
const VkWriteDescriptorSet *pDescriptorWrites);
-struct tu_fence
-{
- struct radeon_winsys_fence *fence;
- bool submitted;
- bool signalled;
+int
+tu_drm_get_gpu_id(const struct tu_physical_device *dev, uint32_t *id);
- uint32_t syncobj;
- uint32_t temp_syncobj;
-};
+int
+tu_drm_get_gmem_size(const struct tu_physical_device *dev, uint32_t *size);
-/* tu_nir_to_llvm.c */
-struct tu_shader_variant_info;
-struct tu_nir_compiler_options;
+int
+tu_drm_submitqueue_new(const struct tu_device *dev,
+ int priority,
+ uint32_t *queue_id);
-struct radeon_winsys_sem;
+void
+tu_drm_submitqueue_close(const struct tu_device *dev, uint32_t queue_id);
uint32_t
-tu_gem_new(struct tu_device *dev, uint64_t size, uint32_t flags);
+tu_gem_new(const struct tu_device *dev, uint64_t size, uint32_t flags);
+uint32_t
+tu_gem_import_dmabuf(const struct tu_device *dev,
+ int prime_fd,
+ uint64_t size);
+int
+tu_gem_export_dmabuf(const struct tu_device *dev, uint32_t gem_handle);
void
-tu_gem_close(struct tu_device *dev, uint32_t gem_handle);
+tu_gem_close(const struct tu_device *dev, uint32_t gem_handle);
uint64_t
-tu_gem_info_offset(struct tu_device *dev, uint32_t gem_handle);
+tu_gem_info_offset(const struct tu_device *dev, uint32_t gem_handle);
uint64_t
-tu_gem_info_iova(struct tu_device *dev, uint32_t gem_handle);
-int
-tu_drm_query_param(struct tu_physical_device *dev, uint32_t param, uint64_t *value);
+tu_gem_info_iova(const struct tu_device *dev, uint32_t gem_handle);
#define TU_DEFINE_HANDLE_CASTS(__tu_type, __VkType) \
- \
+ \
static inline struct __tu_type *__tu_type##_from_handle(__VkType _handle) \
- { \
- return (struct __tu_type *)_handle; \
- } \
- \
+ { \
+ return (struct __tu_type *) _handle; \
+ } \
+ \
static inline __VkType __tu_type##_to_handle(struct __tu_type *_obj) \
- { \
- return (__VkType)_obj; \
+ { \
+ return (__VkType) _obj; \
}
#define TU_DEFINE_NONDISP_HANDLE_CASTS(__tu_type, __VkType) \
- \
+ \
static inline struct __tu_type *__tu_type##_from_handle(__VkType _handle) \
- { \
- return (struct __tu_type *)(uintptr_t)_handle; \
- } \
- \
+ { \
+ return (struct __tu_type *) (uintptr_t) _handle; \
+ } \
+ \
static inline __VkType __tu_type##_to_handle(struct __tu_type *_obj) \
- { \
- return (__VkType)(uintptr_t)_obj; \
+ { \
+ return (__VkType)(uintptr_t) _obj; \
}
#define TU_FROM_HANDLE(__tu_type, __name, __handle) \
TU_DEFINE_NONDISP_HANDLE_CASTS(tu_descriptor_pool, VkDescriptorPool)
TU_DEFINE_NONDISP_HANDLE_CASTS(tu_descriptor_set, VkDescriptorSet)
TU_DEFINE_NONDISP_HANDLE_CASTS(tu_descriptor_set_layout,
- VkDescriptorSetLayout)
+ VkDescriptorSetLayout)
TU_DEFINE_NONDISP_HANDLE_CASTS(tu_descriptor_update_template,
- VkDescriptorUpdateTemplateKHR)
+ VkDescriptorUpdateTemplate)
TU_DEFINE_NONDISP_HANDLE_CASTS(tu_device_memory, VkDeviceMemory)
TU_DEFINE_NONDISP_HANDLE_CASTS(tu_fence, VkFence)
TU_DEFINE_NONDISP_HANDLE_CASTS(tu_event, VkEvent)