#define MAX_DYNAMIC_STORAGE_BUFFERS 8
#define MAX_DYNAMIC_BUFFERS (MAX_DYNAMIC_UNIFORM_BUFFERS + MAX_DYNAMIC_STORAGE_BUFFERS)
#define MAX_SAMPLES_LOG2 4
-#define NUM_META_FS_KEYS 13
+#define NUM_META_FS_KEYS 12
#define RADV_MAX_DRM_DEVICES 8
#define MAX_VIEWS 8
+#define MAX_SO_STREAMS 4
+#define MAX_SO_BUFFERS 4
+#define MAX_SO_OUTPUTS 64
#define NUM_DEPTH_CLEAR_PIPELINES 3
*/
#define RADV_BUFFER_OPS_CS_THRESHOLD 4096
+#define RADV_BUFFER_UPDATE_THRESHOLD 1024
+
enum radv_mem_heap {
RADV_MEM_HEAP_VRAM,
RADV_MEM_HEAP_VRAM_CPU_ACCESS,
VkPhysicalDeviceMemoryProperties memory_properties;
enum radv_mem_type mem_type_indices[RADV_MEM_TYPE_COUNT];
+ drmPciBusInfo bus_info;
+
struct radv_device_extension_table supported_extensions;
};
struct radv_device *device);
void
radv_pipeline_cache_finish(struct radv_pipeline_cache *cache);
-void
+bool
radv_pipeline_cache_load(struct radv_pipeline_cache *cache,
const void *data, size_t size);
struct radv_pipeline_cache cache;
+ /*
+ * For on-demand pipeline creation, makes sure that
+ * only one thread tries to build a pipeline at the same time.
+ */
+ mtx_t mtx;
+
/**
* Use array element `i` for images with `2^i` samples.
*/
VkPipeline pipeline;
VkPipeline pipeline_3d;
} btoi;
+ struct {
+ VkPipelineLayout img_p_layout;
+ VkDescriptorSetLayout img_ds_layout;
+ VkPipeline pipeline;
+ } btoi_r32g32b32;
struct {
VkPipelineLayout img_p_layout;
VkDescriptorSetLayout img_ds_layout;
VkPipeline pipeline;
VkPipeline pipeline_3d;
} itoi;
+ struct {
+ VkPipelineLayout img_p_layout;
+ VkDescriptorSetLayout img_ds_layout;
+ VkPipeline pipeline;
+ } itoi_r32g32b32;
struct {
VkPipelineLayout img_p_layout;
VkDescriptorSetLayout img_ds_layout;
VkPipeline pipeline;
VkPipeline pipeline_3d;
} cleari;
+ struct {
+ VkPipelineLayout img_p_layout;
+ VkDescriptorSetLayout img_ds_layout;
+ VkPipeline pipeline;
+ } cleari_r32g32b32;
struct {
VkPipelineLayout p_layout;
bool use_global_bo_list;
struct radv_bo_list bo_list;
+
+ /* Whether anisotropy is forced with RADV_TEX_ANISO (-1 is disabled). */
+ int force_aniso;
};
struct radv_device_memory {
RADV_CMD_DIRTY_INDEX_BUFFER = 1 << 11,
RADV_CMD_DIRTY_FRAMEBUFFER = 1 << 12,
RADV_CMD_DIRTY_VERTEX_BUFFER = 1 << 13,
+ RADV_CMD_DIRTY_STREAMOUT_BUFFER = 1 << 14,
};
enum radv_cmd_flush_bits {
/* Pipeline query controls. */
RADV_CMD_FLAG_START_PIPELINE_STATS = 1 << 13,
RADV_CMD_FLAG_STOP_PIPELINE_STATS = 1 << 14,
+ RADV_CMD_FLAG_VGT_STREAMOUT_SYNC = 1 << 15,
RADV_CMD_FLUSH_AND_INV_FRAMEBUFFER = (RADV_CMD_FLAG_FLUSH_AND_INV_CB |
RADV_CMD_FLAG_FLUSH_AND_INV_CB_META |
VkDeviceSize offset;
};
+struct radv_streamout_binding {
+ struct radv_buffer *buffer;
+ VkDeviceSize offset;
+ VkDeviceSize size;
+};
+
+struct radv_streamout_state {
+ /* Mask of bound streamout buffers. */
+ uint8_t enabled_mask;
+
+ /* External state that comes from the last vertex stage, it must be
+ * set explicitely when binding a new graphics pipeline.
+ */
+ uint16_t stride_in_dw[MAX_SO_BUFFERS];
+ uint32_t enabled_stream_buffers_mask; /* stream0 buffers0-3 in 4 LSB */
+
+ /* State of VGT_STRMOUT_BUFFER_(CONFIG|END) */
+ uint32_t hw_enabled_mask;
+
+ /* State of VGT_STRMOUT_(CONFIG|EN) */
+ bool streamout_enabled;
+};
+
struct radv_viewport_state {
uint32_t count;
VkViewport viewports[MAX_VIEWPORTS];
const struct radv_subpass * subpass;
struct radv_dynamic_state dynamic;
struct radv_attachment_state * attachments;
+ struct radv_streamout_state streamout;
VkRect2D render_area;
/* Index buffer */
/* Whether CP DMA is busy/idle. */
bool dma_is_busy;
+
+ /* Conditional rendering info. */
+ int predication_type; /* -1: disabled, 0: normal, 1: inverted */
+ uint64_t predication_va;
};
struct radv_cmd_pool {
struct radeon_cmdbuf *cs;
struct radv_cmd_state state;
struct radv_vertex_binding vertex_bindings[MAX_VBS];
+ struct radv_streamout_binding streamout_bindings[MAX_SO_BUFFERS];
uint32_t queue_family_index;
uint8_t push_constants[MAX_PUSH_CONSTANTS_SIZE];
VkResult record_result;
- int ring_offsets_idx; /* just used for verification */
uint32_t gfx9_fence_offset;
struct radeon_winsys_bo *gfx9_fence_bo;
uint32_t gfx9_fence_idx;
+ uint64_t gfx9_eop_bug_va;
/**
* Whether a query pool has been resetted and we have to flush caches.
bool radv_cmd_buffer_uses_mec(struct radv_cmd_buffer *cmd_buffer);
-void si_init_compute(struct radv_cmd_buffer *cmd_buffer);
-void si_init_config(struct radv_cmd_buffer *cmd_buffer);
+void si_emit_graphics(struct radv_physical_device *physical_device,
+ struct radeon_cmdbuf *cs);
+void si_emit_compute(struct radv_physical_device *physical_device,
+ struct radeon_cmdbuf *cs);
void cik_create_gfx_config(struct radv_device *device);
unsigned data_sel,
uint64_t va,
uint32_t old_fence,
- uint32_t new_fence);
+ uint32_t new_fence,
+ uint64_t gfx9_eop_bug_va);
void si_emit_wait_fence(struct radeon_cmdbuf *cs,
uint64_t va, uint32_t ref,
enum chip_class chip_class,
uint32_t *fence_ptr, uint64_t va,
bool is_mec,
- enum radv_cmd_flush_bits flush_bits);
+ enum radv_cmd_flush_bits flush_bits,
+ uint64_t gfx9_eop_bug_va);
void si_emit_cache_flush(struct radv_cmd_buffer *cmd_buffer);
-void si_emit_set_predication_state(struct radv_cmd_buffer *cmd_buffer, uint64_t va);
+void si_emit_set_predication_state(struct radv_cmd_buffer *cmd_buffer,
+ bool inverted, uint64_t va);
void si_cp_dma_buffer_copy(struct radv_cmd_buffer *cmd_buffer,
uint64_t src_va, uint64_t dest_va,
uint64_t size);
stage = __builtin_ffs(__tmp) - 1, __tmp; \
__tmp &= ~(1 << (stage)))
+extern const VkFormat radv_fs_key_format_exemplars[NUM_META_FS_KEYS];
unsigned radv_format_meta_fs_key(VkFormat format);
struct radv_multisample_state {
unsigned max_waves;
unsigned scratch_bytes_per_wave;
+
+ /* Not NULL if graphics pipeline uses streamout. */
+ struct radv_shader_variant *streamout_shader;
};
static inline bool radv_pipeline_has_gs(const struct radv_pipeline *pipeline)
VkAccessFlags dst_access_mask;
};
+void radv_subpass_barrier(struct radv_cmd_buffer *cmd_buffer,
+ const struct radv_subpass_barrier *barrier);
+
struct radv_subpass_attachment {
uint32_t attachment;
VkImageLayout layout;