* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
*/
#ifndef TU_PRIVATE_H
#include "vk_alloc.h"
#include "vk_debug_report.h"
+#include "drm/msm_drm.h"
#include "tu_descriptor_set.h"
#include "tu_extensions.h"
#include <vulkan/vulkan.h>
#include <vulkan/vulkan_intel.h>
-#include "drm/freedreno_ringbuffer.h"
-
#include "tu_entrypoints.h"
#define MAX_VBS 32
#define MAX_VERTEX_ATTRIBS 32
#define MAX_RTS 8
+#define MAX_VSC_PIPES 32
#define MAX_VIEWPORTS 16
#define MAX_SCISSORS 16
#define MAX_DISCARD_RECTANGLES 4
#define MAX_PUSH_DESCRIPTORS 32
#define MAX_DYNAMIC_UNIFORM_BUFFERS 16
#define MAX_DYNAMIC_STORAGE_BUFFERS 8
-#define MAX_DYNAMIC_BUFFERS \
+#define MAX_DYNAMIC_BUFFERS \
(MAX_DYNAMIC_UNIFORM_BUFFERS + MAX_DYNAMIC_STORAGE_BUFFERS)
#define MAX_SAMPLES_LOG2 4
#define NUM_META_FS_KEYS 13
}
}
-#define for_each_bit(b, dword) \
- for (uint32_t __dword = (dword); (b) = __builtin_ffs(__dword) - 1, __dword; \
- __dword &= ~(1 << (b)))
+#define for_each_bit(b, dword) \
+ for (uint32_t __dword = (dword); \
+ (b) = __builtin_ffs(__dword) - 1, __dword; __dword &= ~(1 << (b)))
-#define typed_memcpy(dest, src, count) \
- ({ \
- STATIC_ASSERT(sizeof(*src) == sizeof(*dest)); \
- memcpy((dest), (src), (count) * sizeof(*(src))); \
+#define typed_memcpy(dest, src, count) \
+ ({ \
+ STATIC_ASSERT(sizeof(*src) == sizeof(*dest)); \
+ memcpy((dest), (src), (count) * sizeof(*(src))); \
})
/* Whenever we generate an error, pass it through this function. Useful for
const char *format,
...);
-#define vk_error(instance, error) \
+#define vk_error(instance, error) \
__vk_errorf(instance, error, __FILE__, __LINE__, NULL);
-#define vk_errorf(instance, error, format, ...) \
+#define vk_errorf(instance, error, format, ...) \
__vk_errorf(instance, error, __FILE__, __LINE__, format, ##__VA_ARGS__);
void
__tu_finishme(const char *file, int line, const char *format, ...)
- tu_printflike(3, 4);
+ tu_printflike(3, 4);
void
tu_loge(const char *format, ...) tu_printflike(1, 2);
void
/**
* Print a FINISHME message, including its source location.
*/
-#define tu_finishme(format, ...) \
- do { \
- static bool reported = false; \
- if (!reported) { \
- __tu_finishme(__FILE__, __LINE__, format, ##__VA_ARGS__); \
- reported = true; \
- } \
+#define tu_finishme(format, ...) \
+ do { \
+ static bool reported = false; \
+ if (!reported) { \
+ __tu_finishme(__FILE__, __LINE__, format, ##__VA_ARGS__); \
+ reported = true; \
+ } \
} while (0)
/* A non-fatal assert. Useful for debugging. */
#ifdef DEBUG
-#define tu_assert(x) \
- ({ \
- if (unlikely(!(x))) \
- fprintf(stderr, "%s:%d ASSERT: %s\n", __FILE__, __LINE__, #x); \
+#define tu_assert(x) \
+ ({ \
+ if (unlikely(!(x))) \
+ fprintf(stderr, "%s:%d ASSERT: %s\n", __FILE__, __LINE__, #x); \
})
#else
#define tu_assert(x)
#endif
-#define stub_return(v) \
- do { \
- tu_finishme("stub %s", __func__); \
- return (v); \
- } while (0)
+/* Suppress -Wunused in stub functions */
+#define tu_use_args(...) __tu_use_args(0, ##__VA_ARGS__)
+static inline void
+__tu_use_args(int ignore, ...)
+{
+}
-#define stub() \
- do { \
- tu_finishme("stub %s", __func__); \
- return; \
+#define tu_stub() \
+ do { \
+ tu_finishme("stub %s", __func__); \
} while (0)
void *
int local_fd;
int master_fd;
- struct fd_device *drm_device;
unsigned gpu_id;
uint32_t gmem_size;
+ uint32_t tile_align_w;
+ uint32_t tile_align_h;
/* This is the drivers on-disk cache used as a fallback as opposed to
* the pipeline cache defined by apps.
uint32_t queue_family_index;
int queue_idx;
VkDeviceQueueCreateFlags flags;
-};
-struct tu_bo_list
-{
- unsigned capacity;
- pthread_mutex_t mutex;
+ uint32_t msm_queue_id;
+ int submit_fence_fd;
};
struct tu_device
VkAllocationCallbacks alloc;
struct tu_instance *instance;
- struct radeon_winsys *ws;
struct tu_meta_state meta_state;
mtx_t shader_slab_mutex;
struct tu_device_extension_table enabled_extensions;
-
- /* Whether the driver uses a global BO list. */
- bool use_global_bo_list;
-
- struct tu_bo_list bo_list;
};
struct tu_bo
{
uint32_t gem_handle;
uint64_t size;
- uint64_t offset;
uint64_t iova;
void *map;
};
const struct tu_descriptor_set_layout *layout;
uint32_t size;
- struct radeon_winsys_bo *bo;
uint64_t va;
uint32_t *mapped_ptr;
struct tu_descriptor_range *dynamic_descriptors;
struct tu_descriptor_pool
{
- struct radeon_winsys_bo *bo;
uint8_t *mapped_ptr;
uint64_t current_offset;
uint64_t size;
uint32_t dynamic_buffers[4 * MAX_DYNAMIC_BUFFERS];
};
+struct tu_tile
+{
+ uint8_t pipe;
+ uint8_t slot;
+ VkOffset2D begin;
+ VkOffset2D end;
+};
+
+struct tu_tiling_config
+{
+ VkRect2D render_area;
+ uint32_t buffer_cpp[MAX_RTS + 2];
+ uint32_t buffer_count;
+
+ /* position and size of the first tile */
+ VkRect2D tile0;
+ /* number of tiles */
+ VkExtent2D tile_count;
+
+ uint32_t gmem_offsets[MAX_RTS + 2];
+
+ /* size of the first VSC pipe */
+ VkExtent2D pipe0;
+ /* number of VSC pipes */
+ VkExtent2D pipe_count;
+
+ /* pipe register values */
+ uint32_t pipe_config[MAX_VSC_PIPES];
+ uint32_t pipe_sizes[MAX_VSC_PIPES];
+};
+
struct tu_cmd_state
{
/* Vertex descriptors */
uint32_t index_type;
uint32_t max_index_count;
uint64_t index_va;
+
+ const struct tu_render_pass *pass;
+ const struct tu_subpass *subpass;
+ const struct tu_framebuffer *framebuffer;
+ struct tu_attachment_state *attachments;
+
+ struct tu_tiling_config tiling_config;
};
struct tu_cmd_pool
uint8_t *map;
unsigned offset;
uint64_t size;
- struct radeon_winsys_bo *upload_bo;
struct list_head list;
};
TU_CMD_BUFFER_STATUS_PENDING,
};
+struct tu_bo_list
+{
+ uint32_t count;
+ uint32_t capacity;
+ struct drm_msm_gem_submit_bo *bo_infos;
+};
+
+#define TU_BO_LIST_FAILED (~0)
+
+void
+tu_bo_list_init(struct tu_bo_list *list);
+void
+tu_bo_list_destroy(struct tu_bo_list *list);
+void
+tu_bo_list_reset(struct tu_bo_list *list);
+uint32_t
+tu_bo_list_add(struct tu_bo_list *list,
+ const struct tu_bo *bo,
+ uint32_t flags);
+VkResult
+tu_bo_list_merge(struct tu_bo_list *list, const struct tu_bo_list *other);
+
+struct tu_cs_entry
+{
+ /* No ownership */
+ const struct tu_bo *bo;
+
+ uint32_t size;
+ uint64_t offset;
+};
+
+struct tu_cs
+{
+ uint32_t *start;
+ uint32_t *cur;
+ uint32_t *end;
+
+ /* for tu_cs_reserve_space_assert */
+ uint32_t *reserved_end;
+
+ uint32_t next_bo_size;
+
+ struct tu_cs_entry *entries;
+ uint32_t entry_count;
+ uint32_t entry_capacity;
+
+ struct tu_bo **bos;
+ uint32_t bo_count;
+ uint32_t bo_capacity;
+};
+
struct tu_cmd_buffer
{
VK_LOADER_DATA _loader_data;
VkCommandBufferUsageFlags usage_flags;
VkCommandBufferLevel level;
enum tu_cmd_buffer_status status;
- struct radeon_cmdbuf *cs;
+
struct tu_cmd_state state;
struct tu_vertex_binding vertex_bindings[MAX_VBS];
uint32_t queue_family_index;
struct tu_cmd_buffer_upload upload;
- uint32_t scratch_size_needed;
- uint32_t compute_scratch_size_needed;
- uint32_t esgs_ring_size_needed;
- uint32_t gsvs_ring_size_needed;
- bool tess_rings_needed;
- bool sample_positions_needed;
-
VkResult record_result;
- uint32_t gfx9_fence_offset;
- struct radeon_winsys_bo *gfx9_fence_bo;
- uint32_t gfx9_fence_idx;
- uint64_t gfx9_eop_bug_va;
+ struct tu_bo_list bo_list;
+ struct tu_cs cs;
- /**
- * Whether a query pool has been resetted and we have to flush caches.
- */
- bool pending_reset_query;
+ uint16_t marker_reg;
+ uint32_t marker_seqno;
+
+ struct tu_bo scratch_bo;
+ uint32_t scratch_seqno;
+
+ /* current cs; command packets are always emitted to it */
+ struct tu_cs *cur_cs;
};
bool
struct tu_event
{
- struct radeon_winsys_bo *bo;
uint64_t *map;
};
#define TU_STAGE_MASK ((1 << MESA_SHADER_STAGES) - 1)
-#define tu_foreach_stage(stage, stage_bits) \
- for (gl_shader_stage stage, \
- __tmp = (gl_shader_stage)((stage_bits)&TU_STAGE_MASK); \
- stage = __builtin_ffs(__tmp) - 1, __tmp; \
- __tmp &= ~(1 << (stage)))
+#define tu_foreach_stage(stage, stage_bits) \
+ for (gl_shader_stage stage, \
+ __tmp = (gl_shader_stage)((stage_bits) &TU_STAGE_MASK); \
+ stage = __builtin_ffs(__tmp) - 1, __tmp; __tmp &= ~(1 << (stage)))
struct tu_shader_module
{
bool
tu_dcc_formats_compatible(VkFormat format1, VkFormat format2);
+struct tu_image_level
+{
+ VkDeviceSize offset;
+ VkDeviceSize size;
+ uint32_t pitch;
+};
+
struct tu_image
{
VkImageType type;
VkImageUsageFlags usage; /**< Superset of VkImageCreateInfo::usage. */
VkImageTiling tiling; /** VkImageCreateInfo::tiling */
VkImageCreateFlags flags; /** VkImageCreateInfo::flags */
+ VkExtent3D extent;
+ uint32_t level_count;
+ uint32_t layer_count;
VkDeviceSize size;
uint32_t alignment;
+ /* memory layout */
+ VkDeviceSize layer_size;
+ struct tu_image_level levels[15];
+ unsigned tile_mode;
+
unsigned queue_family_mask;
bool exclusive;
bool shareable;
/* For VK_ANDROID_native_buffer, the WSI image owns the memory, */
VkDeviceMemory owned_memory;
+
+ /* Set when bound */
+ const struct tu_bo *bo;
+ VkDeviceSize bo_offset;
};
unsigned
tu_get_layerCount(const struct tu_image *image,
const VkImageSubresourceRange *range)
{
- abort();
+ return range->layerCount == VK_REMAINING_ARRAY_LAYERS
+ ? image->layer_count - range->baseArrayLayer
+ : range->layerCount;
}
static inline uint32_t
tu_get_levelCount(const struct tu_image *image,
const VkImageSubresourceRange *range)
{
- abort();
+ return range->levelCount == VK_REMAINING_MIP_LEVELS
+ ? image->level_count - range->baseMipLevel
+ : range->levelCount;
}
struct tu_image_view
struct tu_buffer_view
{
- struct radeon_winsys_bo *bo;
VkFormat vk_format;
uint64_t range; /**< VkBufferViewCreateInfo::range */
uint32_t state[4];
const struct VkExtent3D imageExtent)
{
switch (imageType) {
- case VK_IMAGE_TYPE_1D:
- return (VkExtent3D){ imageExtent.width, 1, 1 };
- case VK_IMAGE_TYPE_2D:
- return (VkExtent3D){ imageExtent.width, imageExtent.height, 1 };
- case VK_IMAGE_TYPE_3D:
- return imageExtent;
- default:
- unreachable("invalid image type");
+ case VK_IMAGE_TYPE_1D:
+ return (VkExtent3D) { imageExtent.width, 1, 1 };
+ case VK_IMAGE_TYPE_2D:
+ return (VkExtent3D) { imageExtent.width, imageExtent.height, 1 };
+ case VK_IMAGE_TYPE_3D:
+ return imageExtent;
+ default:
+ unreachable("invalid image type");
}
}
const struct VkOffset3D imageOffset)
{
switch (imageType) {
- case VK_IMAGE_TYPE_1D:
- return (VkOffset3D){ imageOffset.x, 0, 0 };
- case VK_IMAGE_TYPE_2D:
- return (VkOffset3D){ imageOffset.x, imageOffset.y, 0 };
- case VK_IMAGE_TYPE_3D:
- return imageOffset;
- default:
- unreachable("invalid image type");
+ case VK_IMAGE_TYPE_1D:
+ return (VkOffset3D) { imageOffset.x, 0, 0 };
+ case VK_IMAGE_TYPE_2D:
+ return (VkOffset3D) { imageOffset.x, imageOffset.y, 0 };
+ case VK_IMAGE_TYPE_3D:
+ return imageOffset;
+ default:
+ unreachable("invalid image type");
}
}
struct tu_query_pool
{
- struct radeon_winsys_bo *bo;
uint32_t stride;
uint32_t availability_offset;
uint64_t size;
struct tu_semaphore
{
- /* use a winsys sem for non-exportable */
- struct radeon_winsys_sem *sem;
uint32_t syncobj;
uint32_t temp_syncobj;
};
struct tu_fence
{
- struct radeon_winsys_fence *fence;
- bool submitted;
- bool signalled;
-
uint32_t syncobj;
uint32_t temp_syncobj;
};
-/* tu_nir_to_llvm.c */
-struct tu_shader_variant_info;
-struct tu_nir_compiler_options;
+int
+tu_drm_get_gpu_id(const struct tu_physical_device *dev, uint32_t *id);
+
+int
+tu_drm_get_gmem_size(const struct tu_physical_device *dev, uint32_t *size);
-struct radeon_winsys_sem;
+int
+tu_drm_submitqueue_new(const struct tu_device *dev,
+ int priority,
+ uint32_t *queue_id);
+
+void
+tu_drm_submitqueue_close(const struct tu_device *dev, uint32_t queue_id);
uint32_t
-tu_gem_new(struct tu_device *dev, uint64_t size, uint32_t flags);
+tu_gem_new(const struct tu_device *dev, uint64_t size, uint32_t flags);
void
-tu_gem_close(struct tu_device *dev, uint32_t gem_handle);
+tu_gem_close(const struct tu_device *dev, uint32_t gem_handle);
uint64_t
-tu_gem_info_offset(struct tu_device *dev, uint32_t gem_handle);
+tu_gem_info_offset(const struct tu_device *dev, uint32_t gem_handle);
uint64_t
-tu_gem_info_iova(struct tu_device *dev, uint32_t gem_handle);
+tu_gem_info_iova(const struct tu_device *dev, uint32_t gem_handle);
#define TU_DEFINE_HANDLE_CASTS(__tu_type, __VkType) \
- \
+ \
static inline struct __tu_type *__tu_type##_from_handle(__VkType _handle) \
- { \
- return (struct __tu_type *)_handle; \
- } \
- \
+ { \
+ return (struct __tu_type *) _handle; \
+ } \
+ \
static inline __VkType __tu_type##_to_handle(struct __tu_type *_obj) \
- { \
- return (__VkType)_obj; \
+ { \
+ return (__VkType) _obj; \
}
#define TU_DEFINE_NONDISP_HANDLE_CASTS(__tu_type, __VkType) \
- \
+ \
static inline struct __tu_type *__tu_type##_from_handle(__VkType _handle) \
- { \
- return (struct __tu_type *)(uintptr_t)_handle; \
- } \
- \
+ { \
+ return (struct __tu_type *) (uintptr_t) _handle; \
+ } \
+ \
static inline __VkType __tu_type##_to_handle(struct __tu_type *_obj) \
- { \
- return (__VkType)(uintptr_t)_obj; \
+ { \
+ return (__VkType)(uintptr_t) _obj; \
}
#define TU_FROM_HANDLE(__tu_type, __name, __handle) \
TU_DEFINE_NONDISP_HANDLE_CASTS(tu_descriptor_pool, VkDescriptorPool)
TU_DEFINE_NONDISP_HANDLE_CASTS(tu_descriptor_set, VkDescriptorSet)
TU_DEFINE_NONDISP_HANDLE_CASTS(tu_descriptor_set_layout,
- VkDescriptorSetLayout)
+ VkDescriptorSetLayout)
TU_DEFINE_NONDISP_HANDLE_CASTS(tu_descriptor_update_template,
- VkDescriptorUpdateTemplateKHR)
+ VkDescriptorUpdateTemplateKHR)
TU_DEFINE_NONDISP_HANDLE_CASTS(tu_device_memory, VkDeviceMemory)
TU_DEFINE_NONDISP_HANDLE_CASTS(tu_fence, VkFence)
TU_DEFINE_NONDISP_HANDLE_CASTS(tu_event, VkEvent)