#endif
#include "c11/threads.h"
-#include "compiler/shader_enums.h"
#include "main/macros.h"
#include "util/list.h"
#include "util/macros.h"
#include "vk_alloc.h"
#include "vk_debug_report.h"
+#include "wsi_common.h"
-#include "drm/msm_drm.h"
+#include "drm-uapi/msm_drm.h"
#include "ir3/ir3_compiler.h"
#include "ir3/ir3_shader.h"
*/
#define TU_BUFFER_OPS_CS_THRESHOLD 4096
+#define A6XX_TEX_CONST_DWORDS 16
+#define A6XX_TEX_SAMP_DWORDS 4
+
enum tu_mem_heap
{
TU_MEM_HEAP_VRAM,
memcpy((dest), (src), (count) * sizeof(*(src))); \
})
+#define COND(bool, val) ((bool) ? (val) : 0)
+
/* Whenever we generate an error, pass it through this function. Useful for
* debugging, where we can break on it. Only call at error site, not when
* propagating errors. Might be useful to plug in a stack trace here.
uint8_t device_uuid[VK_UUID_SIZE];
uint8_t cache_uuid[VK_UUID_SIZE];
+ struct wsi_device wsi_device;
+
int local_fd;
int master_fd;
struct tu_instance_extension_table enabled_extensions;
};
+VkResult
+tu_wsi_init(struct tu_physical_device *physical_device);
+void
+tu_wsi_finish(struct tu_physical_device *physical_device);
+
bool
tu_instance_extension_supported(const char *name);
uint32_t
uint64_t va;
uint32_t *mapped_ptr;
struct tu_descriptor_range *dynamic_descriptors;
+
+ struct tu_bo *descriptors[0];
};
struct tu_push_descriptor_set
struct tu_descriptor_pool
{
- uint8_t *mapped_ptr;
+ struct tu_bo bo;
uint64_t current_offset;
uint64_t size;
uint32_t pipe_sizes[MAX_VSC_PIPES];
};
+enum tu_cmd_dirty_bits
+{
+ TU_CMD_DIRTY_PIPELINE = 1 << 0,
+ TU_CMD_DIRTY_VERTEX_BUFFERS = 1 << 1,
+ TU_CMD_DIRTY_DESCRIPTOR_SETS = 1 << 2,
+
+ TU_CMD_DIRTY_DYNAMIC_LINE_WIDTH = 1 << 16,
+ TU_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK = 1 << 17,
+ TU_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK = 1 << 18,
+ TU_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE = 1 << 19,
+};
+
struct tu_cmd_state
{
- /* Vertex descriptors */
- uint64_t vb_va;
- unsigned vb_size;
+ uint32_t dirty;
+
+ struct tu_pipeline *pipeline;
+
+ /* Vertex buffers */
+ struct
+ {
+ struct tu_buffer *buffers[MAX_VBS];
+ VkDeviceSize offsets[MAX_VBS];
+ } vb;
struct tu_dynamic_state dynamic;
struct tu_bo_list bo_list;
struct tu_cs cs;
+ struct tu_cs draw_cs;
+ struct tu_cs draw_state;
struct tu_cs tile_cs;
uint16_t marker_reg;
struct tu_device_memory *memory,
int *pFD);
+static inline struct tu_descriptor_state *
+tu_get_descriptors_state(struct tu_cmd_buffer *cmd_buffer,
+ VkPipelineBindPoint bind_point)
+{
+ assert(bind_point == VK_PIPELINE_BIND_POINT_GRAPHICS);
+ return &cmd_buffer->descriptors[bind_point];
+}
+
/*
* Takes x,y,z as exact numbers of invocations, instead of blocks.
*
struct tu_event
{
- uint64_t *map;
+ struct tu_bo bo;
};
struct tu_shader_module;
bool include_binning_pass;
};
+struct tu_descriptor_map
+{
+ unsigned num;
+ int set[32];
+ int binding[32];
+};
+
struct tu_shader
{
struct ir3_shader ir3_shader;
+ struct tu_descriptor_map texture_map;
+ struct tu_descriptor_map sampler_map;
+ struct tu_descriptor_map ubo_map;
+
/* This may be true for vertex shaders. When true, variants[1] is the
* binning variant and binning_binary is non-NULL.
*/
const struct tu_shader_compile_options *options,
const VkAllocationCallbacks *alloc);
+struct tu_program_descriptor_linkage
+{
+ struct ir3_ubo_analysis_state ubo_state;
+
+ uint32_t constlen;
+
+ uint32_t offset_ubo; /* ubo pointers const offset */
+ uint32_t num_ubo; /* number of ubo pointers */
+
+ struct tu_descriptor_map texture_map;
+ struct tu_descriptor_map sampler_map;
+ struct tu_descriptor_map ubo_map;
+};
+
struct tu_pipeline
{
struct tu_cs cs;
struct tu_bo binary_bo;
struct tu_cs_entry state_ib;
struct tu_cs_entry binning_state_ib;
+
+ struct tu_program_descriptor_linkage link[MESA_SHADER_STAGES];
} program;
struct
VkExtent3D extent;
uint32_t level_count;
uint32_t layer_count;
+ VkSampleCountFlagBits samples;
+
VkDeviceSize size;
uint32_t alignment;
VkDeviceSize layer_size;
struct tu_image_level levels[15];
unsigned tile_mode;
+ unsigned cpp;
unsigned queue_family_mask;
bool exclusive;
VkDeviceMemory owned_memory;
/* Set when bound */
- const struct tu_bo *bo;
+ struct tu_bo *bo;
VkDeviceSize bo_offset;
};
uint32_t level_count;
VkExtent3D extent; /**< Extent of VkImageViewCreateInfo::baseMipLevel. */
- uint32_t descriptor[16];
+ uint32_t descriptor[A6XX_TEX_CONST_DWORDS];
/* Descriptor for use as a storage image as opposed to a sampled image.
* This has a few differences for cube maps (e.g. type).
*/
- uint32_t storage_descriptor[16];
+ uint32_t storage_descriptor[A6XX_TEX_CONST_DWORDS];
};
struct tu_sampler
{
+ uint32_t state[A6XX_TEX_SAMP_DWORDS];
+
+ bool needs_border;
};
struct tu_image_create_info
struct tu_device *device,
struct tu_cmd_buffer *cmd_buffer,
struct tu_descriptor_set *set,
- VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
+ VkDescriptorUpdateTemplate descriptorUpdateTemplate,
const void *pData);
void
TU_DEFINE_NONDISP_HANDLE_CASTS(tu_descriptor_set_layout,
VkDescriptorSetLayout)
TU_DEFINE_NONDISP_HANDLE_CASTS(tu_descriptor_update_template,
- VkDescriptorUpdateTemplateKHR)
+ VkDescriptorUpdateTemplate)
TU_DEFINE_NONDISP_HANDLE_CASTS(tu_device_memory, VkDeviceMemory)
TU_DEFINE_NONDISP_HANDLE_CASTS(tu_fence, VkFence)
TU_DEFINE_NONDISP_HANDLE_CASTS(tu_event, VkEvent)