#include "util/macros.h"
#include "util/u_atomic.h"
#include "vk_alloc.h"
+#include "vk_object.h"
#include "vk_debug_report.h"
#include "wsi_common.h"
-#include "drm-uapi/msm_drm.h"
#include "ir3/ir3_compiler.h"
#include "ir3/ir3_shader.h"
#define tu_printflike(a, b) __attribute__((__format__(__printf__, a, b)))
-static inline uint32_t
-tu_minify(uint32_t n, uint32_t levels)
-{
- if (unlikely(n == 0))
- return 0;
- else
- return MAX2(n >> levels, 1);
-}
-
#define for_each_bit(b, dword) \
for (uint32_t __dword = (dword); \
(b) = __builtin_ffs(__dword) - 1, __dword; __dword &= ~(1 << (b)))
-#define typed_memcpy(dest, src, count) \
- ({ \
- STATIC_ASSERT(sizeof(*src) == sizeof(*dest)); \
- memcpy((dest), (src), (count) * sizeof(*(src))); \
- })
-
#define COND(bool, val) ((bool) ? (val) : 0)
#define BIT(bit) (1u << (bit))
struct tu_physical_device
{
- VK_LOADER_DATA _loader_data;
+ struct vk_object_base base;
struct tu_instance *instance;
int msm_major_version;
int msm_minor_version;
+ bool limited_z24s8;
+
/* This is the drivers on-disk cache used as a fallback as opposed to
* the pipeline cache defined by apps.
*/
struct tu_instance
{
- VK_LOADER_DATA _loader_data;
+ struct vk_object_base base;
VkAllocationCallbacks alloc;
struct tu_pipeline_cache
{
+ struct vk_object_base base;
+
struct tu_device *device;
pthread_mutex_t mutex;
struct tu_fence
{
+ struct vk_object_base base;
struct wsi_fence *fence_wsi;
bool signaled;
int fd;
struct tu_queue
{
- VK_LOADER_DATA _loader_data;
+ struct vk_object_base base;
+
struct tu_device *device;
uint32_t queue_family_index;
int queue_idx;
enum global_shader {
GLOBAL_SH_VS,
- GLOBAL_SH_VS_LAYER,
- GLOBAL_SH_GS_LAYER,
GLOBAL_SH_FS_BLIT,
GLOBAL_SH_FS_CLEAR0,
GLOBAL_SH_FS_CLEAR_MAX = GLOBAL_SH_FS_CLEAR0 + MAX_RTS,
GLOBAL_SH_COUNT,
};
+#define TU_BORDER_COLOR_COUNT 4096
+#define TU_BORDER_COLOR_BUILTIN 6
+
/* This struct defines the layout of the global_bo */
struct tu6_global
{
- /* 6 bcolor_entry entries, one for each VK_BORDER_COLOR */
- uint8_t border_color[128 * 6];
-
/* clear/blit shaders, all <= 16 instrs (16 instr = 1 instrlen unit) */
instr_t shaders[GLOBAL_SH_COUNT][16];
volatile uint32_t vsc_draw_overflow;
uint32_t _pad1;
volatile uint32_t vsc_prim_overflow;
- uint32_t _pad2[3];
+ uint32_t _pad2;
+ uint64_t predicate;
/* scratch space for VPC_SO[i].FLUSH_BASE_LO/HI, start on 32 byte boundary. */
struct {
uint32_t offset;
uint32_t pad[7];
} flush_base[4];
+
+ /* note: larger global bo will be used for customBorderColors */
+ struct bcolor_entry bcolor_builtin[TU_BORDER_COLOR_BUILTIN], bcolor[];
};
#define gb_offset(member) offsetof(struct tu6_global, member)
#define global_iova(cmd, member) ((cmd)->device->global_bo.iova + gb_offset(member))
struct tu_device
{
- VK_LOADER_DATA _loader_data;
-
- VkAllocationCallbacks alloc;
-
+ struct vk_device vk;
struct tu_instance *instance;
struct tu_queue *queues[TU_MAX_QUEUE_FAMILIES];
uint32_t vsc_draw_strm_pitch;
uint32_t vsc_prim_strm_pitch;
- mtx_t vsc_pitch_mtx;
+ BITSET_DECLARE(custom_border_color, TU_BORDER_COLOR_COUNT);
+ mtx_t mutex;
};
VkResult _tu_device_set_lost(struct tu_device *device,
struct tu_device_memory
{
+ struct vk_object_base base;
+
struct tu_bo bo;
VkDeviceSize size;
struct tu_descriptor_set
{
+ struct vk_object_base base;
+
const struct tu_descriptor_set_layout *layout;
struct tu_descriptor_pool *pool;
uint32_t size;
struct tu_descriptor_pool
{
+ struct vk_object_base base;
+
struct tu_bo bo;
uint64_t current_offset;
uint64_t size;
struct tu_descriptor_update_template
{
+ struct vk_object_base base;
+
uint32_t entry_count;
struct tu_descriptor_update_template_entry entry[0];
};
struct tu_buffer
{
+ struct vk_object_base base;
+
VkDeviceSize size;
VkBufferUsageFlags usage;
enum tu_cmd_dirty_bits
{
- TU_CMD_DIRTY_COMPUTE_PIPELINE = 1 << 1,
TU_CMD_DIRTY_VERTEX_BUFFERS = 1 << 2,
- TU_CMD_DIRTY_DESCRIPTOR_SETS = 1 << 3,
- TU_CMD_DIRTY_COMPUTE_DESCRIPTOR_SETS = 1 << 4,
+ TU_CMD_DIRTY_DESC_SETS_LOAD = 1 << 3,
+ TU_CMD_DIRTY_COMPUTE_DESC_SETS_LOAD = 1 << 4,
TU_CMD_DIRTY_SHADER_CONSTS = 1 << 5,
/* all draw states were disabled and need to be re-enabled: */
TU_CMD_DIRTY_DRAW_STATE = 1 << 7,
TU_ACCESS_CCU_DEPTH_INCOHERENT_READ = 1 << 8,
TU_ACCESS_CCU_DEPTH_INCOHERENT_WRITE = 1 << 9,
- TU_ACCESS_SYSMEM_READ = 1 << 10,
- TU_ACCESS_SYSMEM_WRITE = 1 << 11,
+ /* Accesses by the host */
+ TU_ACCESS_HOST_READ = 1 << 10,
+ TU_ACCESS_HOST_WRITE = 1 << 11,
- /* Set if a WFI is required due to data being read by the CP or the 2D
- * engine.
+ /* Accesses by a GPU engine which bypasses any cache. e.g. writes via
+ * CP_EVENT_WRITE::BLIT and the CP are SYSMEM_WRITE.
*/
- TU_ACCESS_WFI_READ = 1 << 12,
+ TU_ACCESS_SYSMEM_READ = 1 << 12,
+ TU_ACCESS_SYSMEM_WRITE = 1 << 13,
+
+ /* Set if a WFI is required. This can be required for:
+ * - 2D engine which (on some models) doesn't wait for flushes to complete
+ * before starting
+ * - CP draw indirect opcodes, where we need to wait for any flushes to
+ * complete but the CP implicitly waits for WFI's to complete and
+ * therefore we only need a WFI after the flushes.
+ */
+ TU_ACCESS_WFI_READ = 1 << 14,
+
+ /* Set if a CP_WAIT_FOR_ME is required due to the data being read by the CP
+ * without it waiting for any WFI.
+ */
+ TU_ACCESS_WFM_READ = 1 << 15,
+
+ /* Memory writes from the CP start in-order with draws and event writes,
+ * but execute asynchronously and hence need a CP_WAIT_MEM_WRITES if read.
+ */
+ TU_ACCESS_CP_WRITE = 1 << 16,
TU_ACCESS_READ =
TU_ACCESS_UCHE_READ |
TU_ACCESS_CCU_DEPTH_READ |
TU_ACCESS_CCU_COLOR_INCOHERENT_READ |
TU_ACCESS_CCU_DEPTH_INCOHERENT_READ |
- TU_ACCESS_SYSMEM_READ,
+ TU_ACCESS_HOST_READ |
+ TU_ACCESS_SYSMEM_READ |
+ TU_ACCESS_WFI_READ |
+ TU_ACCESS_WFM_READ,
TU_ACCESS_WRITE =
TU_ACCESS_UCHE_WRITE |
TU_ACCESS_CCU_COLOR_INCOHERENT_WRITE |
TU_ACCESS_CCU_DEPTH_WRITE |
TU_ACCESS_CCU_DEPTH_INCOHERENT_WRITE |
- TU_ACCESS_SYSMEM_WRITE,
+ TU_ACCESS_HOST_WRITE |
+ TU_ACCESS_SYSMEM_WRITE |
+ TU_ACCESS_CP_WRITE,
TU_ACCESS_ALL =
TU_ACCESS_READ |
TU_CMD_FLAG_CCU_INVALIDATE_COLOR = 1 << 3,
TU_CMD_FLAG_CACHE_FLUSH = 1 << 4,
TU_CMD_FLAG_CACHE_INVALIDATE = 1 << 5,
+ TU_CMD_FLAG_WAIT_MEM_WRITES = 1 << 6,
+ TU_CMD_FLAG_WAIT_FOR_IDLE = 1 << 7,
+ TU_CMD_FLAG_WAIT_FOR_ME = 1 << 8,
TU_CMD_FLAG_ALL_FLUSH =
TU_CMD_FLAG_CCU_FLUSH_DEPTH |
TU_CMD_FLAG_CCU_FLUSH_COLOR |
- TU_CMD_FLAG_CACHE_FLUSH,
+ TU_CMD_FLAG_CACHE_FLUSH |
+ /* Treat the CP as a sort of "cache" which may need to be "flushed" via
+ * waiting for writes to land with WAIT_FOR_MEM_WRITES.
+ */
+ TU_CMD_FLAG_WAIT_MEM_WRITES,
- TU_CMD_FLAG_ALL_INVALIDATE =
+ TU_CMD_FLAG_GPU_INVALIDATE =
TU_CMD_FLAG_CCU_INVALIDATE_DEPTH |
TU_CMD_FLAG_CCU_INVALIDATE_COLOR |
TU_CMD_FLAG_CACHE_INVALIDATE,
- TU_CMD_FLAG_WFI = 1 << 6,
+ TU_CMD_FLAG_ALL_INVALIDATE =
+ TU_CMD_FLAG_GPU_INVALIDATE |
+ /* Treat the CP as a sort of "cache" which may need to be "invalidated"
+ * via waiting for UCHE/CCU flushes to land with WFI/WFM.
+ */
+ TU_CMD_FLAG_WAIT_FOR_IDLE |
+ TU_CMD_FLAG_WAIT_FOR_ME,
};
/* Changing the CCU from sysmem mode to gmem mode or vice-versa is pretty
/* saved states to re-emit in TU_CMD_DIRTY_DRAW_STATE case */
struct tu_draw_state dynamic_state[TU_DYNAMIC_STATE_COUNT];
- struct tu_cs_entry vertex_buffers_ib;
- struct tu_cs_entry shader_const_ib[MESA_SHADER_STAGES];
- struct tu_cs_entry desc_sets_ib, desc_sets_load_ib;
- struct tu_cs_entry ia_gmem_ib, ia_sysmem_ib;
+ struct tu_draw_state vertex_buffers;
+ struct tu_draw_state shader_const[MESA_SHADER_STAGES];
+ struct tu_draw_state desc_sets;
struct tu_draw_state vs_params;
struct tu_cs_entry tile_store_ib;
bool xfb_used;
+ bool has_tess;
+ bool has_subpass_predication;
+ bool predication_active;
};
struct tu_cmd_pool
{
+ struct vk_object_base base;
+
VkAllocationCallbacks alloc;
struct list_head cmd_buffers;
struct list_head free_cmd_buffers;
TU_CMD_BUFFER_STATUS_PENDING,
};
+#ifndef MSM_SUBMIT_BO_READ
+#define MSM_SUBMIT_BO_READ 0x0001
+#define MSM_SUBMIT_BO_WRITE 0x0002
+#define MSM_SUBMIT_BO_DUMP 0x0004
+
+struct drm_msm_gem_submit_bo {
+ uint32_t flags; /* in, mask of MSM_SUBMIT_BO_x */
+ uint32_t handle; /* in, GEM handle */
+ uint64_t presumed; /* in/out, presumed buffer address */
+};
+#endif
+
struct tu_bo_list
{
uint32_t count;
struct tu_cmd_buffer
{
- VK_LOADER_DATA _loader_data;
+ struct vk_object_base base;
struct tu_device *device;
struct tu_cs draw_epilogue_cs;
struct tu_cs sub_cs;
- bool has_tess;
-
uint32_t vsc_draw_strm_pitch;
uint32_t vsc_prim_strm_pitch;
};
struct tu_event
{
+ struct vk_object_base base;
struct tu_bo bo;
};
struct tu_shader_module
{
+ struct vk_object_base base;
+
unsigned char sha1[20];
uint32_t code_size;
struct tu_pipeline
{
+ struct vk_object_base base;
+
struct tu_cs cs;
struct tu_pipeline_layout *layout;
/* gras_su_cntl without line width, used for dynamic line width state */
uint32_t gras_su_cntl;
+ /* draw states for the pipeline */
+ struct tu_draw_state load_state, rast_state, ds_state, blend_state;
+
struct
{
- struct tu_cs_entry state_ib;
- struct tu_cs_entry binning_state_ib;
+ struct tu_draw_state state;
+ struct tu_draw_state binning_state;
struct tu_program_descriptor_linkage link[MESA_SHADER_STAGES];
} program;
struct
{
- struct tu_cs_entry state_ib;
- } load_state;
-
- struct
- {
- struct tu_cs_entry state_ib;
- struct tu_cs_entry binning_state_ib;
+ struct tu_draw_state state;
+ struct tu_draw_state binning_state;
uint32_t bindings_used;
} vi;
bool upper_left_domain_origin;
} tess;
- struct
- {
- struct tu_cs_entry state_ib;
- } rast;
-
- struct
- {
- struct tu_cs_entry state_ib;
- } ds;
-
- struct
- {
- struct tu_cs_entry state_ib;
- } blend;
-
struct
{
uint32_t local_size[3];
const struct ir3_shader_variant *hs,
const struct ir3_shader_variant *ds,
const struct ir3_shader_variant *gs,
- const struct ir3_shader_variant *fs);
+ const struct ir3_shader_variant *fs,
+ uint32_t patch_control_points,
+ bool vshs_workgroup);
void
tu6_emit_fs_inputs(struct tu_cs *cs, const struct ir3_shader_variant *fs);
struct tu_image
{
+ struct vk_object_base base;
+
VkImageType type;
/* The original VkFormat provided by the client. This may not match any
* of the actual surface formats.
struct tu_image_view
{
+ struct vk_object_base base;
+
struct tu_image *image; /**< VkImageViewCreateInfo::image */
uint64_t base_addr;
uint32_t RB_2D_DST_INFO;
uint32_t RB_BLIT_DST_INFO;
+
+ /* for d32s8 separate stencil */
+ uint64_t stencil_base_addr;
+ uint32_t stencil_layer_size;
+ uint32_t stencil_PITCH;
};
struct tu_sampler_ycbcr_conversion {
+ struct vk_object_base base;
+
VkFormat format;
VkSamplerYcbcrModelConversion ycbcr_model;
VkSamplerYcbcrRange ycbcr_range;
};
struct tu_sampler {
+ struct vk_object_base base;
+
uint32_t descriptor[A6XX_TEX_SAMP_DWORDS];
struct tu_sampler_ycbcr_conversion *ycbcr_sampler;
};
void
tu_cs_image_flag_ref(struct tu_cs *cs, const struct tu_image_view *iview, uint32_t layer);
+void
+tu_cs_image_stencil_ref(struct tu_cs *cs, const struct tu_image_view *iview, uint32_t layer);
+
+#define tu_image_view_stencil(iview, x) \
+ ((iview->x & ~A6XX_##x##_COLOR_FORMAT__MASK) | A6XX_##x##_COLOR_FORMAT(FMT6_8_UINT))
+
VkResult
tu_image_create(VkDevice _device,
const VkImageCreateInfo *pCreateInfo,
VkImage *out_image_h);
void
-tu_image_view_init(struct tu_image_view *view,
- const VkImageViewCreateInfo *pCreateInfo);
+tu_image_view_init(struct tu_image_view *iview,
+ const VkImageViewCreateInfo *pCreateInfo,
+ bool limited_z24s8);
struct tu_buffer_view
{
+ struct vk_object_base base;
+
uint32_t descriptor[A6XX_TEX_CONST_DWORDS];
struct tu_buffer *buffer;
struct tu_framebuffer
{
+ struct vk_object_base base;
+
uint32_t width;
uint32_t height;
uint32_t layers;
bool load;
bool store;
int32_t gmem_offset;
+ /* for D32S8 separate stencil: */
+ bool load_stencil;
+ bool store_stencil;
+ int32_t gmem_offset_stencil;
};
struct tu_render_pass
{
+ struct vk_object_base base;
+
uint32_t attachment_count;
uint32_t subpass_count;
uint32_t gmem_pixels;
struct tu_query_pool
{
+ struct vk_object_base base;
+
VkQueryType type;
uint32_t stride;
uint64_t size;
struct tu_semaphore
{
+ struct vk_object_base base;
+
struct tu_semaphore_part permanent;
struct tu_semaphore_part temporary;
};
VkDescriptorUpdateTemplate descriptorUpdateTemplate,
const void *pData);
-int
-tu_drm_get_gpu_id(const struct tu_physical_device *dev, uint32_t *id);
-
-int
-tu_drm_get_gmem_size(const struct tu_physical_device *dev, uint32_t *size);
-
-int
-tu_drm_get_gmem_base(const struct tu_physical_device *dev, uint64_t *base);
+VkResult
+tu_physical_device_init(struct tu_physical_device *device,
+ struct tu_instance *instance);
+VkResult
+tu_enumerate_devices(struct tu_instance *instance);
int
tu_drm_submitqueue_new(const struct tu_device *dev,
void
tu_drm_submitqueue_close(const struct tu_device *dev, uint32_t queue_id);
-uint32_t
-tu_gem_new(const struct tu_device *dev, uint64_t size, uint32_t flags);
-uint32_t
-tu_gem_import_dmabuf(const struct tu_device *dev,
- int prime_fd,
- uint64_t size);
-int
-tu_gem_export_dmabuf(const struct tu_device *dev, uint32_t gem_handle);
-void
-tu_gem_close(const struct tu_device *dev, uint32_t gem_handle);
-uint64_t
-tu_gem_info_offset(const struct tu_device *dev, uint32_t gem_handle);
-uint64_t
-tu_gem_info_iova(const struct tu_device *dev, uint32_t gem_handle);
-
#define TU_DEFINE_HANDLE_CASTS(__tu_type, __VkType) \
\
static inline struct __tu_type *__tu_type##_from_handle(__VkType _handle) \