#define NUM_DEPTH_CLEAR_PIPELINES 3
+enum radv_mem_heap {
+ RADV_MEM_HEAP_VRAM,
+ RADV_MEM_HEAP_VRAM_CPU_ACCESS,
+ RADV_MEM_HEAP_GTT,
+ RADV_MEM_HEAP_COUNT
+};
+
+enum radv_mem_type {
+ RADV_MEM_TYPE_VRAM,
+ RADV_MEM_TYPE_GTT_WRITE_COMBINE,
+ RADV_MEM_TYPE_VRAM_CPU_ACCESS,
+ RADV_MEM_TYPE_GTT_CACHED,
+ RADV_MEM_TYPE_COUNT
+};
+
+
+enum {
+ RADV_DEBUG_FAST_CLEARS = 0x1,
+ RADV_DEBUG_NO_DCC = 0x2,
+ RADV_DEBUG_DUMP_SHADERS = 0x4,
+ RADV_DEBUG_NO_CACHE = 0x8,
+ RADV_DEBUG_DUMP_SHADER_STATS = 0x10,
+ RADV_DEBUG_NO_HIZ = 0x20,
+ RADV_DEBUG_NO_COMPUTE_QUEUE = 0x40,
+ RADV_DEBUG_UNSAFE_MATH = 0x80,
+};
+
#define radv_noreturn __attribute__((__noreturn__))
#define radv_printflike(a, b) __attribute__((__format__(__printf__, a, b)))
__dword &= ~(1 << (b)))
#define typed_memcpy(dest, src, count) ({ \
- static_assert(sizeof(*src) == sizeof(*dest), ""); \
+ STATIC_ASSERT(sizeof(*src) == sizeof(*dest)); \
memcpy((dest), (src), (count) * sizeof(*(src))); \
})
#define zero(x) (memset(&(x), 0, sizeof(x)))
-/* Define no kernel as 1, since that's an illegal offset for a kernel */
-#define NO_KERNEL 1
-
-struct radv_common {
- VkStructureType sType;
- const void* pNext;
-};
-
/* Whenever we generate an error, pass it through this function. Useful for
* debugging, where we can break on it. Only call at error site, not when
* propagating errors. Might be useful to plug in a stack trace here.
* Print a FINISHME message, including its source location.
*/
#define radv_finishme(format, ...) \
- __radv_finishme(__FILE__, __LINE__, format, ##__VA_ARGS__);
+ do { \
+ static bool reported = false; \
+ if (!reported) { \
+ __radv_finishme(__FILE__, __LINE__, format, ##__VA_ARGS__); \
+ reported = true; \
+ } \
+ } while (0)
/* A non-fatal assert. Useful for debugging. */
#ifdef DEBUG
struct radeon_winsys *ws;
struct radeon_info rad_info;
- uint32_t chipset_id;
char path[20];
const char * name;
- uint64_t aperture_size;
- int cmd_parser_version;
- uint32_t pci_vendor_id;
- uint32_t pci_device_id;
-
uint8_t uuid[VK_UUID_SIZE];
struct wsi_device wsi_device;
uint32_t apiVersion;
int physicalDeviceCount;
struct radv_physical_device physicalDevice;
+
+ uint64_t debug_flags;
};
VkResult radv_init_wsi(struct radv_physical_device *physical_device);
VkDescriptorSetLayout img_ds_layout;
VkPipeline pipeline;
} btoi;
+ struct {
+ VkPipelineLayout img_p_layout;
+ VkDescriptorSetLayout img_ds_layout;
+ VkPipeline pipeline;
+ } itoi;
+ struct {
+ VkPipelineLayout img_p_layout;
+ VkDescriptorSetLayout img_ds_layout;
+ VkPipeline pipeline;
+ } cleari;
struct {
VkPipeline pipeline;
} buffer;
};
+/* queue types */
+#define RADV_QUEUE_GENERAL 0
+#define RADV_QUEUE_COMPUTE 1
+#define RADV_QUEUE_TRANSFER 2
+
+#define RADV_MAX_QUEUE_FAMILIES 3
+
+enum ring_type radv_queue_family_to_ring(int f);
+
struct radv_queue {
VK_LOADER_DATA _loader_data;
-
struct radv_device * device;
-
- struct radv_state_pool * pool;
+ int queue_family_index;
+ int queue_idx;
};
struct radv_device {
struct radeon_winsys_ctx *hw_ctx;
struct radv_meta_state meta_state;
- struct radv_queue queue;
- struct radeon_winsys_cs *empty_cs;
- bool allow_fast_clears;
- bool allow_dcc;
- bool shader_stats_dump;
+ struct radv_queue *queues[RADV_MAX_QUEUE_FAMILIES];
+ int queue_count[RADV_MAX_QUEUE_FAMILIES];
+ struct radeon_winsys_cs *empty_cs[RADV_MAX_QUEUE_FAMILIES];
+
+ uint64_t debug_flags;
/* MSAA sample locations.
* The first index is the sample index.
float sample_locations_4x[4][2];
float sample_locations_8x[8][2];
float sample_locations_16x[16][2];
+
+ struct radeon_winsys_bo *trace_bo;
+ uint32_t *trace_id_ptr;
};
struct radv_device_memory {
enum radv_cmd_flush_bits flush_bits;
unsigned active_occlusion_queries;
float offset_scale;
+ uint32_t descriptors_dirty;
+ uint32_t trace_id;
};
+
struct radv_cmd_pool {
VkAllocationCallbacks alloc;
struct list_head cmd_buffers;
+ uint32_t queue_family_index;
};
struct radv_cmd_buffer_upload {
VkCommandBufferLevel level;
struct radeon_winsys_cs *cs;
struct radv_cmd_state state;
+ uint32_t queue_family_index;
uint8_t push_constants[MAX_PUSH_CONSTANTS_SIZE];
uint32_t dynamic_buffers[16 * MAX_DYNAMIC_BUFFERS];
struct radv_image;
+bool radv_cmd_buffer_uses_mec(struct radv_cmd_buffer *cmd_buffer);
+
+void si_init_compute(struct radv_physical_device *physical_device,
+ struct radv_cmd_buffer *cmd_buffer);
void si_init_config(struct radv_physical_device *physical_device,
struct radv_cmd_buffer *cmd_buffer);
void si_write_viewport(struct radeon_winsys_cs *cs, int first_vp,
void radv_fill_buffer(struct radv_cmd_buffer *cmd_buffer,
struct radeon_winsys_bo *bo,
uint64_t offset, uint64_t size, uint32_t value);
+void radv_cmd_buffer_trace_emit(struct radv_cmd_buffer *cmd_buffer);
/*
* Takes x,y,z as exact numbers of invocations, instead of blocks.
VkDeviceSize size;
uint32_t alignment;
+ bool exclusive;
+ unsigned queue_family_mask;
+
/* Set when bound */
struct radeon_winsys_bo *bo;
VkDeviceSize offset;
VkImageLayout layout);
bool radv_layout_can_expclear(const struct radv_image *image,
VkImageLayout layout);
-bool radv_layout_has_cmask(const struct radv_image *image,
- VkImageLayout layout);
+bool radv_layout_can_fast_clear(const struct radv_image *image,
+ VkImageLayout layout,
+ unsigned queue_mask);
+
+
+unsigned radv_image_queue_family_mask(const struct radv_image *image, int family);
+
static inline uint32_t
radv_get_layerCount(const struct radv_image *image,
const VkImageSubresourceRange *range)
return (const __VkType *) __radv_obj; \
}
-#define RADV_COMMON_TO_STRUCT(__VkType, __vk_name, __common_name) \
- const __VkType *__vk_name = radv_common_to_ ## __VkType(__common_name)
-
-RADV_DEFINE_STRUCT_CASTS(radv_common, VkMemoryBarrier)
-RADV_DEFINE_STRUCT_CASTS(radv_common, VkBufferMemoryBarrier)
-RADV_DEFINE_STRUCT_CASTS(radv_common, VkImageMemoryBarrier)
-
-
#endif /* RADV_PRIVATE_H */