VkImageLayout dst_layout,
uint32_t src_family,
uint32_t dst_family,
- const VkImageSubresourceRange *range,
- VkImageAspectFlags pending_clears);
+ const VkImageSubresourceRange *range);
const struct radv_dynamic_state default_dynamic_state = {
.viewport = {
cmd_buffer->state.dirty |= dest_mask;
}
+static void
+radv_bind_streamout_state(struct radv_cmd_buffer *cmd_buffer,
+ struct radv_pipeline *pipeline)
+{
+ struct radv_streamout_state *so = &cmd_buffer->state.streamout;
+ struct radv_shader_info *info;
+
+ if (!pipeline->streamout_shader)
+ return;
+
+ info = &pipeline->streamout_shader->info.info;
+ for (int i = 0; i < MAX_SO_BUFFERS; i++)
+ so->stride_in_dw[i] = info->so.strides[i];
+
+ so->enabled_stream_buffers_mask = info->so.enabled_stream_buffers_mask;
+}
+
bool radv_cmd_buffer_uses_mec(struct radv_cmd_buffer *cmd_buffer)
{
return cmd_buffer->queue_family_index == RADV_QUEUE_COMPUTE &&
if (cmd_buffer->upload.upload_bo)
radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs,
- cmd_buffer->upload.upload_bo, 8);
+ cmd_buffer->upload.upload_bo);
cmd_buffer->upload.offset = 0;
cmd_buffer->record_result = VK_SUCCESS;
- cmd_buffer->ring_offsets_idx = -1;
-
for (unsigned i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; i++) {
cmd_buffer->descriptors[i].dirty = 0;
cmd_buffer->descriptors[i].valid = 0;
}
if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) {
+ unsigned num_db = cmd_buffer->device->physical_device->rad_info.num_render_backends;
+ unsigned eop_bug_offset;
void *fence_ptr;
+
radv_cmd_buffer_upload_alloc(cmd_buffer, 8, 0,
&cmd_buffer->gfx9_fence_offset,
&fence_ptr);
cmd_buffer->gfx9_fence_bo = cmd_buffer->upload.upload_bo;
+
+ /* Allocate a buffer for the EOP bug on GFX9. */
+ radv_cmd_buffer_upload_alloc(cmd_buffer, 16 * num_db, 0,
+ &eop_bug_offset, &fence_ptr);
+ cmd_buffer->gfx9_eop_bug_va =
+ radv_buffer_get_va(cmd_buffer->upload.upload_bo);
+ cmd_buffer->gfx9_eop_bug_va += eop_bug_offset;
}
cmd_buffer->status = RADV_CMD_BUFFER_STATUS_INITIAL;
return false;
}
- radv_cs_add_buffer(device->ws, cmd_buffer->cs, bo, 8);
+ radv_cs_add_buffer(device->ws, cmd_buffer->cs, bo);
if (cmd_buffer->upload.upload_bo) {
upload = malloc(sizeof(*upload));
}
static void
-radv_emit_write_data_packet(struct radeon_winsys_cs *cs, uint64_t va,
+radv_emit_write_data_packet(struct radv_cmd_buffer *cmd_buffer, uint64_t va,
unsigned count, const uint32_t *data)
{
+ struct radeon_cmdbuf *cs = cmd_buffer->cs;
+
+ radeon_check_space(cmd_buffer->device->ws, cs, 4 + count);
+
radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 2 + count, 0));
radeon_emit(cs, S_370_DST_SEL(V_370_MEM_ASYNC) |
S_370_WR_CONFIRM(1) |
void radv_cmd_buffer_trace_emit(struct radv_cmd_buffer *cmd_buffer)
{
struct radv_device *device = cmd_buffer->device;
- struct radeon_winsys_cs *cs = cmd_buffer->cs;
+ struct radeon_cmdbuf *cs = cmd_buffer->cs;
uint64_t va;
va = radv_buffer_get_va(device->trace_bo);
if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY)
va += 4;
- MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 7);
-
++cmd_buffer->state.trace_id;
- radv_cs_add_buffer(device->ws, cs, device->trace_bo, 8);
- radv_emit_write_data_packet(cs, va, 1, &cmd_buffer->state.trace_id);
+ radv_emit_write_data_packet(cmd_buffer, va, 1,
+ &cmd_buffer->state.trace_id);
+
+ radeon_check_space(cmd_buffer->device->ws, cs, 2);
+
radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
radeon_emit(cs, AC_ENCODE_TRACE_POINT(cmd_buffer->state.trace_id));
}
ptr = &cmd_buffer->gfx9_fence_idx;
}
+ radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 4);
+
/* Force wait for graphics or compute engines to be idle. */
si_cs_emit_cache_flush(cmd_buffer->cs,
cmd_buffer->device->physical_device->rad_info.chip_class,
ptr, va,
radv_cmd_buffer_uses_mec(cmd_buffer),
- flags);
+ flags, cmd_buffer->gfx9_eop_bug_va);
}
if (unlikely(cmd_buffer->device->trace_bo))
struct radv_pipeline *pipeline, enum ring_type ring)
{
struct radv_device *device = cmd_buffer->device;
- struct radeon_winsys_cs *cs = cmd_buffer->cs;
uint32_t data[2];
uint64_t va;
assert(!"invalid ring type");
}
- MAYBE_UNUSED unsigned cdw_max = radeon_check_space(device->ws,
- cmd_buffer->cs, 6);
-
data[0] = (uintptr_t)pipeline;
data[1] = (uintptr_t)pipeline >> 32;
- radv_cs_add_buffer(device->ws, cs, device->trace_bo, 8);
- radv_emit_write_data_packet(cs, va, 2, data);
+ radv_emit_write_data_packet(cmd_buffer, va, 2, data);
}
void radv_set_descriptor_set(struct radv_cmd_buffer *cmd_buffer,
radv_get_descriptors_state(cmd_buffer, bind_point);
descriptors_state->sets[idx] = set;
- if (set)
- descriptors_state->valid |= (1u << idx);
- else
- descriptors_state->valid &= ~(1u << idx);
+
+ descriptors_state->valid |= (1u << idx); /* active descriptors */
descriptors_state->dirty |= (1u << idx);
}
struct radv_descriptor_state *descriptors_state =
radv_get_descriptors_state(cmd_buffer, bind_point);
struct radv_device *device = cmd_buffer->device;
- struct radeon_winsys_cs *cs = cmd_buffer->cs;
uint32_t data[MAX_SETS * 2] = {};
uint64_t va;
unsigned i;
va = radv_buffer_get_va(device->trace_bo) + 24;
- MAYBE_UNUSED unsigned cdw_max = radeon_check_space(device->ws,
- cmd_buffer->cs, 4 + MAX_SETS * 2);
-
for_each_bit(i, descriptors_state->valid) {
struct radv_descriptor_set *set = descriptors_state->sets[i];
data[i * 2] = (uintptr_t)set;
data[i * 2 + 1] = (uintptr_t)set >> 32;
}
- radv_cs_add_buffer(device->ws, cs, device->trace_bo, 8);
- radv_emit_write_data_packet(cs, va, MAX_SETS * 2, data);
+ radv_emit_write_data_packet(cmd_buffer, va, MAX_SETS * 2, data);
}
struct radv_userdata_info *
if (loc->sgpr_idx == -1)
return;
- assert(loc->num_sgprs == (HAVE_32BIT_POINTERS ? 1 : 2));
+ assert(loc->num_sgprs == 1);
assert(!loc->indirect);
radv_emit_shader_pointer(cmd_buffer->device, cmd_buffer->cs,
gl_shader_stage stage)
{
struct radv_device *device = cmd_buffer->device;
- struct radeon_winsys_cs *cs = cmd_buffer->cs;
+ struct radeon_cmdbuf *cs = cmd_buffer->cs;
uint32_t sh_base = pipeline->user_data_0[stage];
struct radv_userdata_locations *locs =
&pipeline->shaders[stage]->info.user_sgprs_locs;
- unsigned mask;
+ unsigned mask = locs->descriptor_sets_enabled;
- mask = descriptors_state->dirty & descriptors_state->valid;
-
- for (int i = 0; i < MAX_SETS; i++) {
- struct radv_userdata_info *loc = &locs->descriptor_sets[i];
- if (loc->sgpr_idx != -1 && !loc->indirect)
- continue;
- mask &= ~(1 << i);
- }
+ mask &= descriptors_state->dirty & descriptors_state->valid;
while (mask) {
int start, count;
struct radv_userdata_info *loc = &locs->descriptor_sets[start];
unsigned sh_offset = sh_base + loc->sgpr_idx * 4;
- radv_emit_shader_pointer_head(cs, sh_offset, count,
- HAVE_32BIT_POINTERS);
+ radv_emit_shader_pointer_head(cs, sh_offset, count, true);
for (int i = 0; i < count; i++) {
struct radv_descriptor_set *set =
descriptors_state->sets[start + i];
- radv_emit_shader_pointer_body(device, cs, set->va,
- HAVE_32BIT_POINTERS);
+ radv_emit_shader_pointer_body(device, cs, set->va, true);
}
}
}
unsigned sx_blend_opt_control = 0;
for (unsigned i = 0; i < subpass->color_count; ++i) {
- if (subpass->color_attachments[i].attachment == VK_ATTACHMENT_UNUSED)
+ if (subpass->color_attachments[i].attachment == VK_ATTACHMENT_UNUSED) {
+ sx_blend_opt_control |= S_02875C_MRT0_COLOR_OPT_DISABLE(1) << (i * 4);
+ sx_blend_opt_control |= S_02875C_MRT0_ALPHA_OPT_DISABLE(1) << (i * 4);
continue;
+ }
int idx = subpass->color_attachments[i].attachment;
struct radv_color_buffer_info *cb = &framebuffer->attachments[idx].cb;
}
}
+ for (unsigned i = subpass->color_count; i < 8; ++i) {
+ sx_blend_opt_control |= S_02875C_MRT0_COLOR_OPT_DISABLE(1) << (i * 4);
+ sx_blend_opt_control |= S_02875C_MRT0_ALPHA_OPT_DISABLE(1) << (i * 4);
+ }
radeon_set_context_reg_seq(cmd_buffer->cs, R_028754_SX_PS_DOWNCONVERT, 3);
radeon_emit(cmd_buffer->cs, sx_ps_downconvert);
radeon_emit(cmd_buffer->cs, sx_blend_opt_epsilon);
continue;
radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs,
- pipeline->shaders[i]->bo, 8);
+ pipeline->shaders[i]->bo);
}
if (radv_pipeline_has_gs(pipeline))
radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs,
- pipeline->gs_copy_shader->bo, 8);
+ pipeline->gs_copy_shader->bo);
if (unlikely(cmd_buffer->device->trace_bo))
radv_save_pipeline(cmd_buffer, pipeline, RING_GFX);
radeon_set_context_reg(cmd_buffer->cs, R_028C94_CB_COLOR0_DCC_BASE + index * 0x3c, cb->cb_dcc_base);
}
}
+
+ if (radv_image_has_dcc(image)) {
+ /* Drawing with DCC enabled also compresses colorbuffers. */
+ radv_update_dcc_metadata(cmd_buffer, image, true);
+ }
}
static void
radv_update_zrange_precision(struct radv_cmd_buffer *cmd_buffer,
struct radv_ds_buffer_info *ds,
struct radv_image *image, VkImageLayout layout,
- bool requires_cond_write)
+ bool requires_cond_exec)
{
uint32_t db_z_info = ds->db_z_info;
uint32_t db_z_info_reg;
}
/* When we don't know the last fast clear value we need to emit a
- * conditional packet, otherwise we can update DB_Z_INFO directly.
+ * conditional packet that will eventually skip the following
+ * SET_CONTEXT_REG packet.
*/
- if (requires_cond_write) {
- radeon_emit(cmd_buffer->cs, PKT3(PKT3_COND_WRITE, 7, 0));
-
- const uint32_t write_space = 0 << 8; /* register */
- const uint32_t poll_space = 1 << 4; /* memory */
- const uint32_t function = 3 << 0; /* equal to the reference */
- const uint32_t options = write_space | poll_space | function;
- radeon_emit(cmd_buffer->cs, options);
-
- /* poll address - location of the depth clear value */
+ if (requires_cond_exec) {
uint64_t va = radv_buffer_get_va(image->bo);
- va += image->offset + image->clear_value_offset;
-
- /* In presence of stencil format, we have to adjust the base
- * address because the first value is the stencil clear value.
- */
- if (vk_format_is_stencil(image->vk_format))
- va += 4;
+ va += image->offset + image->tc_compat_zrange_offset;
+ radeon_emit(cmd_buffer->cs, PKT3(PKT3_COND_EXEC, 3, 0));
radeon_emit(cmd_buffer->cs, va);
radeon_emit(cmd_buffer->cs, va >> 32);
-
- radeon_emit(cmd_buffer->cs, fui(0.0f)); /* reference value */
- radeon_emit(cmd_buffer->cs, (uint32_t)-1); /* comparison mask */
- radeon_emit(cmd_buffer->cs, db_z_info_reg >> 2); /* write address low */
- radeon_emit(cmd_buffer->cs, 0u); /* write address high */
- radeon_emit(cmd_buffer->cs, db_z_info);
- } else {
- radeon_set_context_reg(cmd_buffer->cs, db_z_info_reg, db_z_info);
+ radeon_emit(cmd_buffer->cs, 0);
+ radeon_emit(cmd_buffer->cs, 3); /* SET_CONTEXT_REG size */
}
+
+ radeon_set_context_reg(cmd_buffer->cs, db_z_info_reg, db_z_info);
}
static void
{
struct radv_framebuffer *framebuffer = cmd_buffer->state.framebuffer;
const struct radv_subpass *subpass = cmd_buffer->state.subpass;
- struct radeon_winsys_cs *cs = cmd_buffer->cs;
+ struct radeon_cmdbuf *cs = cmd_buffer->cs;
struct radv_attachment_info *att;
uint32_t att_idx;
/**
* Set the clear depth/stencil values to the image's metadata.
*/
-void
+static void
radv_set_ds_clear_metadata(struct radv_cmd_buffer *cmd_buffer,
struct radv_image *image,
VkClearDepthStencilValue ds_clear_value,
VkImageAspectFlags aspects)
{
- struct radeon_winsys_cs *cs = cmd_buffer->cs;
+ struct radeon_cmdbuf *cs = cmd_buffer->cs;
uint64_t va = radv_buffer_get_va(image->bo);
unsigned reg_offset = 0, reg_count = 0;
va += image->offset + image->clear_value_offset;
- assert(radv_image_has_htile(image));
-
if (aspects & VK_IMAGE_ASPECT_STENCIL_BIT) {
++reg_count;
} else {
radeon_emit(cs, ds_clear_value.stencil);
if (aspects & VK_IMAGE_ASPECT_DEPTH_BIT)
radeon_emit(cs, fui(ds_clear_value.depth));
+}
+
+/**
+ * Update the TC-compat metadata value for this image.
+ */
+static void
+radv_set_tc_compat_zrange_metadata(struct radv_cmd_buffer *cmd_buffer,
+ struct radv_image *image,
+ uint32_t value)
+{
+ struct radeon_cmdbuf *cs = cmd_buffer->cs;
+ uint64_t va = radv_buffer_get_va(image->bo);
+ va += image->offset + image->tc_compat_zrange_offset;
+
+ radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 3, 0));
+ radeon_emit(cs, S_370_DST_SEL(V_370_MEM_ASYNC) |
+ S_370_WR_CONFIRM(1) |
+ S_370_ENGINE_SEL(V_370_PFP));
+ radeon_emit(cs, va);
+ radeon_emit(cs, va >> 32);
+ radeon_emit(cs, value);
+}
+
+static void
+radv_update_tc_compat_zrange_metadata(struct radv_cmd_buffer *cmd_buffer,
+ struct radv_image *image,
+ VkClearDepthStencilValue ds_clear_value)
+{
+ struct radeon_cmdbuf *cs = cmd_buffer->cs;
+ uint64_t va = radv_buffer_get_va(image->bo);
+ va += image->offset + image->tc_compat_zrange_offset;
+ uint32_t cond_val;
+
+ /* Conditionally set DB_Z_INFO.ZRANGE_PRECISION to 0 when the last
+ * depth clear value is 0.0f.
+ */
+ cond_val = ds_clear_value.depth == 0.0f ? UINT_MAX : 0;
+
+ radv_set_tc_compat_zrange_metadata(cmd_buffer, image, cond_val);
+}
+
+/**
+ * Update the clear depth/stencil values for this image.
+ */
+void
+radv_update_ds_clear_metadata(struct radv_cmd_buffer *cmd_buffer,
+ struct radv_image *image,
+ VkClearDepthStencilValue ds_clear_value,
+ VkImageAspectFlags aspects)
+{
+ assert(radv_image_has_htile(image));
+
+ radv_set_ds_clear_metadata(cmd_buffer, image, ds_clear_value, aspects);
+
+ if (radv_image_is_tc_compat_htile(image) &&
+ (aspects & VK_IMAGE_ASPECT_DEPTH_BIT)) {
+ radv_update_tc_compat_zrange_metadata(cmd_buffer, image,
+ ds_clear_value);
+ }
radv_update_bound_fast_clear_ds(cmd_buffer, image, ds_clear_value,
aspects);
radv_load_ds_clear_metadata(struct radv_cmd_buffer *cmd_buffer,
struct radv_image *image)
{
- struct radeon_winsys_cs *cs = cmd_buffer->cs;
+ struct radeon_cmdbuf *cs = cmd_buffer->cs;
VkImageAspectFlags aspects = vk_format_aspects(image->vk_format);
uint64_t va = radv_buffer_get_va(image->bo);
unsigned reg_offset = 0, reg_count = 0;
if (aspects & VK_IMAGE_ASPECT_DEPTH_BIT)
++reg_count;
- radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
- radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_MEM) |
- COPY_DATA_DST_SEL(COPY_DATA_REG) |
- (reg_count == 2 ? COPY_DATA_COUNT_SEL : 0));
- radeon_emit(cs, va);
- radeon_emit(cs, va >> 32);
- radeon_emit(cs, (R_028028_DB_STENCIL_CLEAR + 4 * reg_offset) >> 2);
- radeon_emit(cs, 0);
+ uint32_t reg = R_028028_DB_STENCIL_CLEAR + 4 * reg_offset;
- radeon_emit(cs, PKT3(PKT3_PFP_SYNC_ME, 0, 0));
- radeon_emit(cs, 0);
+ if (cmd_buffer->device->physical_device->rad_info.chip_class >= VI) {
+ radeon_emit(cs, PKT3(PKT3_LOAD_CONTEXT_REG, 3, 0));
+ radeon_emit(cs, va);
+ radeon_emit(cs, va >> 32);
+ radeon_emit(cs, (reg - SI_CONTEXT_REG_OFFSET) >> 2);
+ radeon_emit(cs, reg_count);
+ } else {
+ radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
+ radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_SRC_MEM) |
+ COPY_DATA_DST_SEL(COPY_DATA_REG) |
+ (reg_count == 2 ? COPY_DATA_COUNT_SEL : 0));
+ radeon_emit(cs, va);
+ radeon_emit(cs, va >> 32);
+ radeon_emit(cs, reg >> 2);
+ radeon_emit(cs, 0);
+
+ radeon_emit(cs, PKT3(PKT3_PFP_SYNC_ME, 0, 0));
+ radeon_emit(cs, 0);
+ }
}
/*
* cmask eliminate is required.
*/
void
-radv_set_dcc_need_cmask_elim_pred(struct radv_cmd_buffer *cmd_buffer,
- struct radv_image *image,
- bool value)
+radv_update_fce_metadata(struct radv_cmd_buffer *cmd_buffer,
+ struct radv_image *image, bool value)
+{
+ uint64_t pred_val = value;
+ uint64_t va = radv_buffer_get_va(image->bo);
+ va += image->offset + image->fce_pred_offset;
+
+ assert(radv_image_has_dcc(image));
+
+ radeon_emit(cmd_buffer->cs, PKT3(PKT3_WRITE_DATA, 4, 0));
+ radeon_emit(cmd_buffer->cs, S_370_DST_SEL(V_370_MEM_ASYNC) |
+ S_370_WR_CONFIRM(1) |
+ S_370_ENGINE_SEL(V_370_PFP));
+ radeon_emit(cmd_buffer->cs, va);
+ radeon_emit(cmd_buffer->cs, va >> 32);
+ radeon_emit(cmd_buffer->cs, pred_val);
+ radeon_emit(cmd_buffer->cs, pred_val >> 32);
+}
+
+/**
+ * Update the DCC predicate to reflect the compression state.
+ */
+void
+radv_update_dcc_metadata(struct radv_cmd_buffer *cmd_buffer,
+ struct radv_image *image, bool value)
{
uint64_t pred_val = value;
uint64_t va = radv_buffer_get_va(image->bo);
{
struct radv_framebuffer *framebuffer = cmd_buffer->state.framebuffer;
const struct radv_subpass *subpass = cmd_buffer->state.subpass;
- struct radeon_winsys_cs *cs = cmd_buffer->cs;
+ struct radeon_cmdbuf *cs = cmd_buffer->cs;
struct radv_attachment_info *att;
uint32_t att_idx;
/**
* Set the clear color values to the image's metadata.
*/
-void
+static void
radv_set_color_clear_metadata(struct radv_cmd_buffer *cmd_buffer,
struct radv_image *image,
- int cb_idx,
uint32_t color_values[2])
{
- struct radeon_winsys_cs *cs = cmd_buffer->cs;
+ struct radeon_cmdbuf *cs = cmd_buffer->cs;
uint64_t va = radv_buffer_get_va(image->bo);
va += image->offset + image->clear_value_offset;
radeon_emit(cs, va >> 32);
radeon_emit(cs, color_values[0]);
radeon_emit(cs, color_values[1]);
+}
+
+/**
+ * Update the clear color values for this image.
+ */
+void
+radv_update_color_clear_metadata(struct radv_cmd_buffer *cmd_buffer,
+ struct radv_image *image,
+ int cb_idx,
+ uint32_t color_values[2])
+{
+ assert(radv_image_has_cmask(image) || radv_image_has_dcc(image));
+
+ radv_set_color_clear_metadata(cmd_buffer, image, color_values);
radv_update_bound_fast_clear_color(cmd_buffer, image, cb_idx,
color_values);
struct radv_image *image,
int cb_idx)
{
- struct radeon_winsys_cs *cs = cmd_buffer->cs;
+ struct radeon_cmdbuf *cs = cmd_buffer->cs;
uint64_t va = radv_buffer_get_va(image->bo);
va += image->offset + image->clear_value_offset;
uint32_t reg = R_028C8C_CB_COLOR0_CLEAR_WORD0 + cb_idx * 0x3c;
- radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, cmd_buffer->state.predicating));
- radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_MEM) |
- COPY_DATA_DST_SEL(COPY_DATA_REG) |
- COPY_DATA_COUNT_SEL);
- radeon_emit(cs, va);
- radeon_emit(cs, va >> 32);
- radeon_emit(cs, reg >> 2);
- radeon_emit(cs, 0);
+ if (cmd_buffer->device->physical_device->rad_info.chip_class >= VI) {
+ radeon_emit(cs, PKT3(PKT3_LOAD_CONTEXT_REG, 3, cmd_buffer->state.predicating));
+ radeon_emit(cs, va);
+ radeon_emit(cs, va >> 32);
+ radeon_emit(cs, (reg - SI_CONTEXT_REG_OFFSET) >> 2);
+ radeon_emit(cs, 2);
+ } else {
+ /* TODO: Figure out how to use LOAD_CONTEXT_REG on SI/CIK. */
+ radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, cmd_buffer->state.predicating));
+ radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_SRC_MEM) |
+ COPY_DATA_DST_SEL(COPY_DATA_REG) |
+ COPY_DATA_COUNT_SEL);
+ radeon_emit(cs, va);
+ radeon_emit(cs, va >> 32);
+ radeon_emit(cs, reg >> 2);
+ radeon_emit(cs, 0);
- radeon_emit(cs, PKT3(PKT3_PFP_SYNC_ME, 0, cmd_buffer->state.predicating));
- radeon_emit(cs, 0);
+ radeon_emit(cs, PKT3(PKT3_PFP_SYNC_ME, 0, cmd_buffer->state.predicating));
+ radeon_emit(cs, 0);
+ }
}
static void
int i;
struct radv_framebuffer *framebuffer = cmd_buffer->state.framebuffer;
const struct radv_subpass *subpass = cmd_buffer->state.subpass;
+ unsigned num_bpp64_colorbufs = 0;
/* this may happen for inherited secondary recording */
if (!framebuffer)
struct radv_image *image = att->attachment->image;
VkImageLayout layout = subpass->color_attachments[i].layout;
- radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, att->attachment->bo, 8);
+ radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, att->attachment->bo);
assert(att->attachment->aspect_mask & VK_IMAGE_ASPECT_COLOR_BIT);
radv_emit_fb_color_state(cmd_buffer, i, att, image, layout);
radv_load_color_clear_metadata(cmd_buffer, image, i);
+
+ if (image->surface.bpe >= 8)
+ num_bpp64_colorbufs++;
}
if(subpass->depth_stencil_attachment.attachment != VK_ATTACHMENT_UNUSED) {
VkImageLayout layout = subpass->depth_stencil_attachment.layout;
struct radv_attachment_info *att = &framebuffer->attachments[idx];
struct radv_image *image = att->attachment->image;
- radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, att->attachment->bo, 8);
+ radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, att->attachment->bo);
MAYBE_UNUSED uint32_t queue_mask = radv_image_queue_family_mask(image,
cmd_buffer->queue_family_index,
cmd_buffer->queue_family_index);
S_028208_BR_X(framebuffer->width) |
S_028208_BR_Y(framebuffer->height));
+ if (cmd_buffer->device->physical_device->rad_info.chip_class >= VI) {
+ uint8_t watermark = 4; /* Default value for VI. */
+
+ /* For optimal DCC performance. */
+ if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) {
+ if (num_bpp64_colorbufs >= 5) {
+ watermark = 8;
+ } else {
+ watermark = 6;
+ }
+ }
+
+ radeon_set_context_reg(cmd_buffer->cs, R_028424_CB_DCC_CONTROL,
+ S_028424_OVERWRITE_COMBINER_MRT_SHARING_DISABLE(1) |
+ S_028424_OVERWRITE_COMBINER_WATERMARK(watermark));
+ }
+
if (cmd_buffer->device->dfsm_allowed) {
radeon_emit(cmd_buffer->cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
radeon_emit(cmd_buffer->cs, EVENT_TYPE(V_028A90_BREAK_BATCH) | EVENT_INDEX(0));
static void
radv_emit_index_buffer(struct radv_cmd_buffer *cmd_buffer)
{
- struct radeon_winsys_cs *cs = cmd_buffer->cs;
+ struct radeon_cmdbuf *cs = cmd_buffer->cs;
struct radv_cmd_state *state = &cmd_buffer->state;
if (state->index_type != state->last_index_type) {
R_028A4C_PA_SC_MODE_CNTL_1,
pa_sc_mode_cntl_1);
}
- db_count_control = 0;
- } else {
- db_count_control = S_028004_ZPASS_INCREMENT_DISABLE(1);
}
+ db_count_control = S_028004_ZPASS_INCREMENT_DISABLE(1);
} else {
const struct radv_subpass *subpass = cmd_buffer->state.subpass;
uint32_t sample_rate = subpass ? util_logbase2(subpass->max_sample_count) : 0;
{
struct radv_descriptor_state *descriptors_state =
radv_get_descriptors_state(cmd_buffer, bind_point);
- uint32_t size = MAX_SETS * 2 * 4;
+ uint32_t size = MAX_SETS * 4;
uint32_t offset;
void *ptr;
return;
for (unsigned i = 0; i < MAX_SETS; i++) {
- uint32_t *uptr = ((uint32_t *)ptr) + i * 2;
+ uint32_t *uptr = ((uint32_t *)ptr) + i;
uint64_t set_va = 0;
struct radv_descriptor_set *set = descriptors_state->sets[i];
if (descriptors_state->valid & (1u << i))
set_va = set->va;
uptr[0] = set_va & 0xffffffff;
- uptr[1] = set_va >> 32;
}
uint64_t va = radv_buffer_get_va(cmd_buffer->upload.upload_bo);
VK_PIPELINE_BIND_POINT_GRAPHICS;
struct radv_descriptor_state *descriptors_state =
radv_get_descriptors_state(cmd_buffer, bind_point);
+ struct radv_cmd_state *state = &cmd_buffer->state;
+ bool flush_indirect_descriptors;
if (!descriptors_state->dirty)
return;
if (descriptors_state->push_dirty)
radv_flush_push_descriptors(cmd_buffer, bind_point);
- if ((cmd_buffer->state.pipeline && cmd_buffer->state.pipeline->need_indirect_descriptor_sets) ||
- (cmd_buffer->state.compute_pipeline && cmd_buffer->state.compute_pipeline->need_indirect_descriptor_sets)) {
+ flush_indirect_descriptors =
+ (bind_point == VK_PIPELINE_BIND_POINT_GRAPHICS &&
+ state->pipeline && state->pipeline->need_indirect_descriptor_sets) ||
+ (bind_point == VK_PIPELINE_BIND_POINT_COMPUTE &&
+ state->compute_pipeline && state->compute_pipeline->need_indirect_descriptor_sets);
+
+ if (flush_indirect_descriptors)
radv_flush_indirect_descriptor_sets(cmd_buffer, bind_point);
- }
MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws,
cmd_buffer->cs,
descriptors_state->dirty = 0;
descriptors_state->push_dirty = false;
+ assert(cmd_buffer->cs->cdw <= cdw_max);
+
if (unlikely(cmd_buffer->device->trace_bo))
radv_save_descriptors(cmd_buffer, bind_point);
-
- assert(cmd_buffer->cs->cdw <= cdw_max);
}
static void
struct radv_pipeline *pipeline = stages & VK_SHADER_STAGE_COMPUTE_BIT
? cmd_buffer->state.compute_pipeline
: cmd_buffer->state.pipeline;
+ VkPipelineBindPoint bind_point = stages & VK_SHADER_STAGE_COMPUTE_BIT ?
+ VK_PIPELINE_BIND_POINT_COMPUTE :
+ VK_PIPELINE_BIND_POINT_GRAPHICS;
+ struct radv_descriptor_state *descriptors_state =
+ radv_get_descriptors_state(cmd_buffer, bind_point);
struct radv_pipeline_layout *layout = pipeline->layout;
struct radv_shader_variant *shader, *prev_shader;
unsigned offset;
return;
memcpy(ptr, cmd_buffer->push_constants, layout->push_constant_size);
- memcpy((char*)ptr + layout->push_constant_size, cmd_buffer->dynamic_buffers,
+ memcpy((char*)ptr + layout->push_constant_size,
+ descriptors_state->dynamic_buffers,
16 * layout->dynamic_offset_count);
va = radv_buffer_get_va(cmd_buffer->upload.upload_bo);
cmd_buffer->state.dirty &= ~RADV_CMD_DIRTY_VERTEX_BUFFER;
}
+static void
+radv_emit_streamout_buffers(struct radv_cmd_buffer *cmd_buffer, uint64_t va)
+{
+ struct radv_pipeline *pipeline = cmd_buffer->state.pipeline;
+ struct radv_userdata_info *loc;
+ uint32_t base_reg;
+
+ for (unsigned stage = 0; stage < MESA_SHADER_STAGES; ++stage) {
+ if (!radv_get_shader(pipeline, stage))
+ continue;
+
+ loc = radv_lookup_user_sgpr(pipeline, stage,
+ AC_UD_STREAMOUT_BUFFERS);
+ if (loc->sgpr_idx == -1)
+ continue;
+
+ base_reg = pipeline->user_data_0[stage];
+
+ radv_emit_shader_pointer(cmd_buffer->device, cmd_buffer->cs,
+ base_reg + loc->sgpr_idx * 4, va, false);
+ }
+
+ if (pipeline->gs_copy_shader) {
+ loc = &pipeline->gs_copy_shader->info.user_sgprs_locs.shader_data[AC_UD_STREAMOUT_BUFFERS];
+ if (loc->sgpr_idx != -1) {
+ base_reg = R_00B130_SPI_SHADER_USER_DATA_VS_0;
+
+ radv_emit_shader_pointer(cmd_buffer->device, cmd_buffer->cs,
+ base_reg + loc->sgpr_idx * 4, va, false);
+ }
+ }
+}
+
+static void
+radv_flush_streamout_descriptors(struct radv_cmd_buffer *cmd_buffer)
+{
+ if (cmd_buffer->state.dirty & RADV_CMD_DIRTY_STREAMOUT_BUFFER) {
+ struct radv_streamout_binding *sb = cmd_buffer->streamout_bindings;
+ struct radv_streamout_state *so = &cmd_buffer->state.streamout;
+ unsigned so_offset;
+ void *so_ptr;
+ uint64_t va;
+
+ /* Allocate some descriptor state for streamout buffers. */
+ if (!radv_cmd_buffer_upload_alloc(cmd_buffer,
+ MAX_SO_BUFFERS * 16, 256,
+ &so_offset, &so_ptr))
+ return;
+
+ for (uint32_t i = 0; i < MAX_SO_BUFFERS; i++) {
+ struct radv_buffer *buffer = sb[i].buffer;
+ uint32_t *desc = &((uint32_t *)so_ptr)[i * 4];
+
+ if (!(so->enabled_mask & (1 << i)))
+ continue;
+
+ va = radv_buffer_get_va(buffer->bo) + buffer->offset;
+
+ va += sb[i].offset;
+
+ /* Set the descriptor.
+ *
+ * On VI, the format must be non-INVALID, otherwise
+ * the buffer will be considered not bound and store
+ * instructions will be no-ops.
+ */
+ desc[0] = va;
+ desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32);
+ desc[2] = 0xffffffff;
+ desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
+ S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
+ S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
+ S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
+ S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
+ }
+
+ va = radv_buffer_get_va(cmd_buffer->upload.upload_bo);
+ va += so_offset;
+
+ radv_emit_streamout_buffers(cmd_buffer, va);
+ }
+
+ cmd_buffer->state.dirty &= ~RADV_CMD_DIRTY_STREAMOUT_BUFFER;
+}
+
static void
radv_upload_graphics_shader_descriptors(struct radv_cmd_buffer *cmd_buffer, bool pipeline_is_dirty)
{
radv_flush_vertex_descriptors(cmd_buffer, pipeline_is_dirty);
+ radv_flush_streamout_descriptors(cmd_buffer);
radv_flush_descriptors(cmd_buffer, VK_SHADER_STAGE_ALL_GRAPHICS);
radv_flush_constants(cmd_buffer, VK_SHADER_STAGE_ALL_GRAPHICS);
}
{
struct radeon_info *info = &cmd_buffer->device->physical_device->rad_info;
struct radv_cmd_state *state = &cmd_buffer->state;
- struct radeon_winsys_cs *cs = cmd_buffer->cs;
+ struct radeon_cmdbuf *cs = cmd_buffer->cs;
uint32_t ia_multi_vgt_param;
int32_t primitive_reset_en;
cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_CS_PARTIAL_FLUSH;
}
- if (src_stage_mask & (VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT |
- VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT |
- VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT |
- VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
+ if (src_stage_mask & (VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT |
VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT |
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT |
cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_PS_PARTIAL_FLUSH;
} else if (src_stage_mask & (VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT |
VK_PIPELINE_STAGE_VERTEX_INPUT_BIT |
- VK_PIPELINE_STAGE_VERTEX_SHADER_BIT)) {
+ VK_PIPELINE_STAGE_VERTEX_SHADER_BIT |
+ VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT |
+ VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT |
+ VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT |
+ VK_PIPELINE_STAGE_TRANSFORM_FEEDBACK_BIT_EXT)) {
cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_VS_PARTIAL_FLUSH;
}
}
static enum radv_cmd_flush_bits
radv_src_access_flush(struct radv_cmd_buffer *cmd_buffer,
- VkAccessFlags src_flags)
+ VkAccessFlags src_flags,
+ struct radv_image *image)
{
+ bool flush_CB_meta = true, flush_DB_meta = true;
enum radv_cmd_flush_bits flush_bits = 0;
uint32_t b;
+
+ if (image) {
+ if (!radv_image_has_CB_metadata(image))
+ flush_CB_meta = false;
+ if (!radv_image_has_htile(image))
+ flush_DB_meta = false;
+ }
+
for_each_bit(b, src_flags) {
switch ((VkAccessFlagBits)(1 << b)) {
case VK_ACCESS_SHADER_WRITE_BIT:
+ case VK_ACCESS_TRANSFORM_FEEDBACK_WRITE_BIT_EXT:
+ case VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_WRITE_BIT_EXT:
flush_bits |= RADV_CMD_FLAG_WRITEBACK_GLOBAL_L2;
break;
case VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT:
- flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB |
- RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
+ flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB;
+ if (flush_CB_meta)
+ flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
break;
case VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT:
- flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB |
- RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
+ flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB;
+ if (flush_DB_meta)
+ flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
break;
case VK_ACCESS_TRANSFER_WRITE_BIT:
flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB |
- RADV_CMD_FLAG_FLUSH_AND_INV_CB_META |
RADV_CMD_FLAG_FLUSH_AND_INV_DB |
- RADV_CMD_FLAG_FLUSH_AND_INV_DB_META |
RADV_CMD_FLAG_INV_GLOBAL_L2;
+
+ if (flush_CB_meta)
+ flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
+ if (flush_DB_meta)
+ flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
break;
default:
break;
VkAccessFlags dst_flags,
struct radv_image *image)
{
+ bool flush_CB_meta = true, flush_DB_meta = true;
enum radv_cmd_flush_bits flush_bits = 0;
+ bool flush_CB = true, flush_DB = true;
+ bool image_is_coherent = false;
uint32_t b;
+
+ if (image) {
+ if (!(image->usage & VK_IMAGE_USAGE_STORAGE_BIT)) {
+ flush_CB = false;
+ flush_DB = false;
+ }
+
+ if (!radv_image_has_CB_metadata(image))
+ flush_CB_meta = false;
+ if (!radv_image_has_htile(image))
+ flush_DB_meta = false;
+
+ if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) {
+ if (image->info.samples == 1 &&
+ (image->usage & (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT |
+ VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) &&
+ !vk_format_is_stencil(image->vk_format)) {
+ /* Single-sample color and single-sample depth
+ * (not stencil) are coherent with shaders on
+ * GFX9.
+ */
+ image_is_coherent = true;
+ }
+ }
+ }
+
for_each_bit(b, dst_flags) {
switch ((VkAccessFlagBits)(1 << b)) {
case VK_ACCESS_INDIRECT_COMMAND_READ_BIT:
case VK_ACCESS_INDEX_READ_BIT:
+ case VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_WRITE_BIT_EXT:
break;
case VK_ACCESS_UNIFORM_READ_BIT:
flush_bits |= RADV_CMD_FLAG_INV_VMEM_L1 | RADV_CMD_FLAG_INV_SMEM_L1;
break;
case VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT:
- case VK_ACCESS_SHADER_READ_BIT:
case VK_ACCESS_TRANSFER_READ_BIT:
case VK_ACCESS_INPUT_ATTACHMENT_READ_BIT:
flush_bits |= RADV_CMD_FLAG_INV_VMEM_L1 |
RADV_CMD_FLAG_INV_GLOBAL_L2;
break;
+ case VK_ACCESS_SHADER_READ_BIT:
+ flush_bits |= RADV_CMD_FLAG_INV_VMEM_L1;
+
+ if (!image_is_coherent)
+ flush_bits |= RADV_CMD_FLAG_INV_GLOBAL_L2;
+ break;
case VK_ACCESS_COLOR_ATTACHMENT_READ_BIT:
- /* TODO: change to image && when the image gets passed
- * through from the subpass. */
- if (!image || (image->usage & VK_IMAGE_USAGE_STORAGE_BIT))
- flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB |
- RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
+ if (flush_CB)
+ flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB;
+ if (flush_CB_meta)
+ flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
break;
case VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT:
- if (!image || (image->usage & VK_IMAGE_USAGE_STORAGE_BIT))
- flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB |
- RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
+ if (flush_DB)
+ flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB;
+ if (flush_DB_meta)
+ flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
break;
default:
break;
return flush_bits;
}
-static void radv_subpass_barrier(struct radv_cmd_buffer *cmd_buffer, const struct radv_subpass_barrier *barrier)
+void radv_subpass_barrier(struct radv_cmd_buffer *cmd_buffer,
+ const struct radv_subpass_barrier *barrier)
{
- cmd_buffer->state.flush_bits |= radv_src_access_flush(cmd_buffer, barrier->src_access_mask);
+ cmd_buffer->state.flush_bits |= radv_src_access_flush(cmd_buffer, barrier->src_access_mask,
+ NULL);
radv_stage_flush(cmd_buffer, barrier->src_stage_mask);
cmd_buffer->state.flush_bits |= radv_dst_access_flush(cmd_buffer, barrier->dst_access_mask,
NULL);
}
static void radv_handle_subpass_image_transition(struct radv_cmd_buffer *cmd_buffer,
- VkAttachmentReference att)
+ struct radv_subpass_attachment att)
{
unsigned idx = att.attachment;
struct radv_image_view *view = cmd_buffer->state.framebuffer->attachments[idx].attachment;
radv_handle_image_transition(cmd_buffer,
view->image,
cmd_buffer->state.attachments[idx].current_layout,
- att.layout, 0, 0, &range,
- cmd_buffer->state.attachments[idx].pending_clear_aspects);
+ att.layout, 0, 0, &range);
cmd_buffer->state.attachments[idx].current_layout = att.layout;
return radv_reset_cmd_buffer(cmd_buffer);
}
-static void emit_gfx_buffer_state(struct radv_cmd_buffer *cmd_buffer)
-{
- struct radv_device *device = cmd_buffer->device;
- if (device->gfx_init) {
- uint64_t va = radv_buffer_get_va(device->gfx_init);
- radv_cs_add_buffer(device->ws, cmd_buffer->cs, device->gfx_init, 8);
- radeon_emit(cmd_buffer->cs, PKT3(PKT3_INDIRECT_BUFFER_CIK, 2, 0));
- radeon_emit(cmd_buffer->cs, va);
- radeon_emit(cmd_buffer->cs, va >> 32);
- radeon_emit(cmd_buffer->cs, device->gfx_init_size_dw & 0xffff);
- } else
- si_init_config(cmd_buffer);
-}
-
VkResult radv_BeginCommandBuffer(
VkCommandBuffer commandBuffer,
const VkCommandBufferBeginInfo *pBeginInfo)
cmd_buffer->state.last_num_instances = -1;
cmd_buffer->state.last_vertex_offset = -1;
cmd_buffer->state.last_first_instance = -1;
+ cmd_buffer->state.predication_type = -1;
cmd_buffer->usage_flags = pBeginInfo->flags;
- /* setup initial configuration into command buffer */
- if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
- switch (cmd_buffer->queue_family_index) {
- case RADV_QUEUE_GENERAL:
- emit_gfx_buffer_state(cmd_buffer);
- break;
- case RADV_QUEUE_COMPUTE:
- si_init_compute(cmd_buffer);
- break;
- case RADV_QUEUE_TRANSFER:
- default:
- break;
- }
- }
-
- if (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
+ if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY &&
+ (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
assert(pBeginInfo->pInheritanceInfo);
cmd_buffer->state.framebuffer = radv_framebuffer_from_handle(pBeginInfo->pInheritanceInfo->framebuffer);
cmd_buffer->state.pass = radv_render_pass_from_handle(pBeginInfo->pInheritanceInfo->renderPass);
radv_cmd_buffer_set_subpass(cmd_buffer, subpass, false);
}
- if (unlikely(cmd_buffer->device->trace_bo))
+ if (unlikely(cmd_buffer->device->trace_bo)) {
+ struct radv_device *device = cmd_buffer->device;
+
+ radv_cs_add_buffer(device->ws, cmd_buffer->cs,
+ device->trace_bo);
+
radv_cmd_buffer_trace_emit(cmd_buffer);
+ }
cmd_buffer->status = RADV_CMD_BUFFER_STATUS_RECORDING;
vb[idx].offset = pOffsets[i];
radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs,
- vb[idx].buffer->bo, 8);
+ vb[idx].buffer->bo);
}
if (!changed) {
int index_size_shift = cmd_buffer->state.index_type ? 2 : 1;
cmd_buffer->state.max_index_count = (index_buffer->size - offset) >> index_size_shift;
cmd_buffer->state.dirty |= RADV_CMD_DIRTY_INDEX_BUFFER;
- radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, index_buffer->bo, 8);
+ radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, index_buffer->bo);
}
struct radeon_winsys *ws = cmd_buffer->device->ws;
radv_set_descriptor_set(cmd_buffer, bind_point, set, idx);
- if (!set)
- return;
+ assert(set);
assert(!(set->layout->flags & VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR));
if (!cmd_buffer->device->use_global_bo_list) {
for (unsigned j = 0; j < set->layout->buffer_count; ++j)
if (set->descriptors[j])
- radv_cs_add_buffer(ws, cmd_buffer->cs, set->descriptors[j], 7);
+ radv_cs_add_buffer(ws, cmd_buffer->cs, set->descriptors[j]);
}
if(set->bo)
- radv_cs_add_buffer(ws, cmd_buffer->cs, set->bo, 8);
+ radv_cs_add_buffer(ws, cmd_buffer->cs, set->bo);
}
void radv_CmdBindDescriptorSets(
unsigned dyn_idx = 0;
const bool no_dynamic_bounds = cmd_buffer->device->instance->debug_flags & RADV_DEBUG_NO_DYNAMIC_BOUNDS;
+ struct radv_descriptor_state *descriptors_state =
+ radv_get_descriptors_state(cmd_buffer, pipelineBindPoint);
for (unsigned i = 0; i < descriptorSetCount; ++i) {
unsigned idx = i + firstSet;
for(unsigned j = 0; j < set->layout->dynamic_offset_count; ++j, ++dyn_idx) {
unsigned idx = j + layout->set[i + firstSet].dynamic_offset_start;
- uint32_t *dst = cmd_buffer->dynamic_buffers + idx * 4;
+ uint32_t *dst = descriptors_state->dynamic_buffers + idx * 4;
assert(dyn_idx < dynamicOffsetCount);
struct radv_descriptor_range *range = set->dynamic_descriptors + j;
si_emit_cache_flush(cmd_buffer);
}
+ /* Make sure CP DMA is idle at the end of IBs because the kernel
+ * doesn't wait for it.
+ */
+ si_cp_dma_wait_for_idle(cmd_buffer);
+
vk_free(&cmd_buffer->pool->alloc, cmd_buffer->state.attachments);
if (!cmd_buffer->device->ws->cs_finalize(cmd_buffer->cs))
pipeline->max_waves * pipeline->scratch_bytes_per_wave);
radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs,
- pipeline->shaders[MESA_SHADER_COMPUTE]->bo, 8);
+ pipeline->shaders[MESA_SHADER_COMPUTE]->bo);
if (unlikely(cmd_buffer->device->trace_bo))
radv_save_pipeline(cmd_buffer, pipeline, RING_COMPUTE);
cmd_buffer->state.prefetch_L2_mask |= RADV_PREFETCH_SHADERS;
radv_bind_dynamic_state(cmd_buffer, &pipeline->dynamic_state);
+ radv_bind_streamout_state(cmd_buffer, pipeline);
if (pipeline->graphics.esgs_ring_size > cmd_buffer->esgs_ring_size_needed)
cmd_buffer->esgs_ring_size_needed = pipeline->graphics.esgs_ring_size;
if (radv_pipeline_has_tess(pipeline))
cmd_buffer->tess_rings_needed = true;
-
- if (radv_pipeline_has_gs(pipeline)) {
- struct radv_userdata_info *loc = radv_lookup_user_sgpr(cmd_buffer->state.pipeline, MESA_SHADER_GEOMETRY,
- AC_UD_SCRATCH_RING_OFFSETS);
- if (cmd_buffer->ring_offsets_idx == -1)
- cmd_buffer->ring_offsets_idx = loc->sgpr_idx;
- else if (loc->sgpr_idx != -1)
- assert(loc->sgpr_idx == cmd_buffer->ring_offsets_idx);
- }
break;
default:
assert(!"invalid bind point");
if (secondary->sample_positions_needed)
primary->sample_positions_needed = true;
- if (secondary->ring_offsets_idx != -1) {
- if (primary->ring_offsets_idx == -1)
- primary->ring_offsets_idx = secondary->ring_offsets_idx;
- else
- assert(secondary->ring_offsets_idx == primary->ring_offsets_idx);
- }
primary->device->ws->cs_execute_secondary(primary->cs, secondary->cs);
radv_cmd_buffer_clear_subpass(cmd_buffer);
}
+void radv_CmdBeginRenderPass2KHR(
+ VkCommandBuffer commandBuffer,
+ const VkRenderPassBeginInfo* pRenderPassBeginInfo,
+ const VkSubpassBeginInfoKHR* pSubpassBeginInfo)
+{
+ radv_CmdBeginRenderPass(commandBuffer, pRenderPassBeginInfo,
+ pSubpassBeginInfo->contents);
+}
+
void radv_CmdNextSubpass(
VkCommandBuffer commandBuffer,
VkSubpassContents contents)
radv_cmd_buffer_clear_subpass(cmd_buffer);
}
+void radv_CmdNextSubpass2KHR(
+ VkCommandBuffer commandBuffer,
+ const VkSubpassBeginInfoKHR* pSubpassBeginInfo,
+ const VkSubpassEndInfoKHR* pSubpassEndInfo)
+{
+ radv_CmdNextSubpass(commandBuffer, pSubpassBeginInfo->contents);
+}
+
static void radv_emit_view_index(struct radv_cmd_buffer *cmd_buffer, unsigned index)
{
struct radv_pipeline *pipeline = cmd_buffer->state.pipeline;
for (unsigned stage = 0; stage < MESA_SHADER_STAGES; ++stage) {
- if (!pipeline->shaders[stage])
+ if (!radv_get_shader(pipeline, stage))
continue;
+
struct radv_userdata_info *loc = radv_lookup_user_sgpr(pipeline, stage, AC_UD_VIEW_INDEX);
if (loc->sgpr_idx == -1)
continue;
static void
radv_cs_emit_draw_packet(struct radv_cmd_buffer *cmd_buffer,
- uint32_t vertex_count)
+ uint32_t vertex_count,
+ bool use_opaque)
{
radeon_emit(cmd_buffer->cs, PKT3(PKT3_DRAW_INDEX_AUTO, 1, cmd_buffer->state.predicating));
radeon_emit(cmd_buffer->cs, vertex_count);
radeon_emit(cmd_buffer->cs, V_0287F0_DI_SRC_SEL_AUTO_INDEX |
- S_0287F0_USE_OPAQUE(0));
+ S_0287F0_USE_OPAQUE(use_opaque));
}
static void
uint64_t index_va,
uint32_t index_count)
{
- radeon_emit(cmd_buffer->cs, PKT3(PKT3_DRAW_INDEX_2, 4, false));
+ radeon_emit(cmd_buffer->cs, PKT3(PKT3_DRAW_INDEX_2, 4, cmd_buffer->state.predicating));
radeon_emit(cmd_buffer->cs, cmd_buffer->state.max_index_count);
radeon_emit(cmd_buffer->cs, index_va);
radeon_emit(cmd_buffer->cs, index_va >> 32);
uint64_t count_va,
uint32_t stride)
{
- struct radeon_winsys_cs *cs = cmd_buffer->cs;
+ struct radeon_cmdbuf *cs = cmd_buffer->cs;
unsigned di_src_sel = indexed ? V_0287F0_DI_SRC_SEL_DMA
: V_0287F0_DI_SRC_SEL_AUTO_INDEX;
bool draw_id_enable = radv_get_shader(cmd_buffer->state.pipeline, MESA_SHADER_VERTEX)->info.info.vs.needs_draw_id;
uint32_t base_reg = cmd_buffer->state.pipeline->graphics.vtx_base_sgpr;
+ bool predicating = cmd_buffer->state.predicating;
assert(base_reg);
/* just reset draw state for vertex data */
if (draw_count == 1 && !count_va && !draw_id_enable) {
radeon_emit(cs, PKT3(indexed ? PKT3_DRAW_INDEX_INDIRECT :
- PKT3_DRAW_INDIRECT, 3, false));
+ PKT3_DRAW_INDIRECT, 3, predicating));
radeon_emit(cs, 0);
radeon_emit(cs, (base_reg - SI_SH_REG_OFFSET) >> 2);
radeon_emit(cs, ((base_reg + 4) - SI_SH_REG_OFFSET) >> 2);
} else {
radeon_emit(cs, PKT3(indexed ? PKT3_DRAW_INDEX_INDIRECT_MULTI :
PKT3_DRAW_INDIRECT_MULTI,
- 8, false));
+ 8, predicating));
radeon_emit(cs, 0);
radeon_emit(cs, (base_reg - SI_SH_REG_OFFSET) >> 2);
radeon_emit(cs, ((base_reg + 4) - SI_SH_REG_OFFSET) >> 2);
*/
struct radv_buffer *count_buffer;
uint64_t count_buffer_offset;
+
+ /**
+ * Stream output parameters resource.
+ */
+ struct radv_buffer *strmout_buffer;
+ uint64_t strmout_buffer_offset;
};
static void
{
struct radv_cmd_state *state = &cmd_buffer->state;
struct radeon_winsys *ws = cmd_buffer->device->ws;
- struct radeon_winsys_cs *cs = cmd_buffer->cs;
+ struct radeon_cmdbuf *cs = cmd_buffer->cs;
+
+ if (info->strmout_buffer) {
+ uint64_t va = radv_buffer_get_va(info->strmout_buffer->bo);
+
+ va += info->strmout_buffer->offset +
+ info->strmout_buffer_offset;
+
+ radeon_set_context_reg(cs, R_028B30_VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE,
+ info->stride);
+
+ radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
+ radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_SRC_MEM) |
+ COPY_DATA_DST_SEL(COPY_DATA_REG) |
+ COPY_DATA_WR_CONFIRM);
+ radeon_emit(cs, va);
+ radeon_emit(cs, va >> 32);
+ radeon_emit(cs, R_028B2C_VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE >> 2);
+ radeon_emit(cs, 0); /* unused */
+
+ radv_cs_add_buffer(ws, cs, info->strmout_buffer->bo);
+ }
if (info->indirect) {
uint64_t va = radv_buffer_get_va(info->indirect->bo);
va += info->indirect->offset + info->indirect_offset;
- radv_cs_add_buffer(ws, cs, info->indirect->bo, 8);
+ radv_cs_add_buffer(ws, cs, info->indirect->bo);
radeon_emit(cs, PKT3(PKT3_SET_BASE, 2, 0));
radeon_emit(cs, 1);
count_va += info->count_buffer->offset +
info->count_buffer_offset;
- radv_cs_add_buffer(ws, cs, info->count_buffer->bo, 8);
+ radv_cs_add_buffer(ws, cs, info->count_buffer->bo);
}
if (!state->subpass->view_mask) {
}
} else {
if (!state->subpass->view_mask) {
- radv_cs_emit_draw_packet(cmd_buffer, info->count);
+ radv_cs_emit_draw_packet(cmd_buffer,
+ info->count,
+ !!info->strmout_buffer);
} else {
unsigned i;
for_each_bit(i, state->subpass->view_mask) {
radv_emit_view_index(cmd_buffer, i);
radv_cs_emit_draw_packet(cmd_buffer,
- info->count);
+ info->count,
+ !!info->strmout_buffer);
}
}
}
uint32_t used_states = cmd_buffer->state.pipeline->graphics.needed_dynamic_state | ~RADV_CMD_DIRTY_DYNAMIC_ALL;
- /* Index & Vertex buffer don't change context regs, and pipeline is handled later. */
- used_states &= ~(RADV_CMD_DIRTY_INDEX_BUFFER | RADV_CMD_DIRTY_VERTEX_BUFFER | RADV_CMD_DIRTY_PIPELINE);
+ /* Index, vertex and streamout buffers don't change context regs, and
+ * pipeline is handled later.
+ */
+ used_states &= ~(RADV_CMD_DIRTY_INDEX_BUFFER |
+ RADV_CMD_DIRTY_VERTEX_BUFFER |
+ RADV_CMD_DIRTY_STREAMOUT_BUFFER |
+ RADV_CMD_DIRTY_PIPELINE);
/* Assume all state changes except these two can imply context rolls. */
if (cmd_buffer->state.dirty & used_states)
radv_draw(struct radv_cmd_buffer *cmd_buffer,
const struct radv_draw_info *info)
{
+ struct radeon_info *rad_info =
+ &cmd_buffer->device->physical_device->rad_info;
bool has_prefetch =
cmd_buffer->device->physical_device->rad_info.chip_class >= CIK;
bool pipeline_is_dirty =
(cmd_buffer->state.dirty & RADV_CMD_DIRTY_PIPELINE) &&
- cmd_buffer->state.pipeline &&
cmd_buffer->state.pipeline != cmd_buffer->state.emitted_pipeline;
MAYBE_UNUSED unsigned cdw_max =
}
}
+ /* Workaround for a VGT hang when streamout is enabled.
+ * It must be done after drawing.
+ */
+ if (cmd_buffer->state.streamout.streamout_enabled &&
+ (rad_info->family == CHIP_HAWAII ||
+ rad_info->family == CHIP_TONGA ||
+ rad_info->family == CHIP_FIJI)) {
+ cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_VGT_STREAMOUT_SYNC;
+ }
+
assert(cmd_buffer->cs->cdw <= cdw_max);
radv_cmd_buffer_after_draw(cmd_buffer, RADV_CMD_FLAG_PS_PARTIAL_FLUSH);
}
struct radv_shader_variant *compute_shader = pipeline->shaders[MESA_SHADER_COMPUTE];
unsigned dispatch_initiator = cmd_buffer->device->dispatch_initiator;
struct radeon_winsys *ws = cmd_buffer->device->ws;
- struct radeon_winsys_cs *cs = cmd_buffer->cs;
+ bool predicating = cmd_buffer->state.predicating;
+ struct radeon_cmdbuf *cs = cmd_buffer->cs;
struct radv_userdata_info *loc;
loc = radv_lookup_user_sgpr(pipeline, MESA_SHADER_COMPUTE,
va += info->indirect->offset + info->indirect_offset;
- radv_cs_add_buffer(ws, cs, info->indirect->bo, 8);
+ radv_cs_add_buffer(ws, cs, info->indirect->bo);
if (loc->sgpr_idx != -1) {
for (unsigned i = 0; i < 3; ++i) {
radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
- radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_MEM) |
+ radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_SRC_MEM) |
COPY_DATA_DST_SEL(COPY_DATA_REG));
radeon_emit(cs, (va + 4 * i));
radeon_emit(cs, (va + 4 * i) >> 32);
}
if (radv_cmd_buffer_uses_mec(cmd_buffer)) {
- radeon_emit(cs, PKT3(PKT3_DISPATCH_INDIRECT, 2, 0) |
+ radeon_emit(cs, PKT3(PKT3_DISPATCH_INDIRECT, 2, predicating) |
PKT3_SHADER_TYPE_S(1));
radeon_emit(cs, va);
radeon_emit(cs, va >> 32);
radeon_emit(cs, va);
radeon_emit(cs, va >> 32);
- radeon_emit(cs, PKT3(PKT3_DISPATCH_INDIRECT, 1, 0) |
+ radeon_emit(cs, PKT3(PKT3_DISPATCH_INDIRECT, 1, predicating) |
PKT3_SHADER_TYPE_S(1));
radeon_emit(cs, 0);
radeon_emit(cs, dispatch_initiator);
dispatch_initiator |= S_00B800_FORCE_START_AT_000(1);
}
- radeon_emit(cs, PKT3(PKT3_DISPATCH_DIRECT, 3, 0) |
+ radeon_emit(cs, PKT3(PKT3_DISPATCH_DIRECT, 3, predicating) |
PKT3_SHADER_TYPE_S(1));
radeon_emit(cs, blocks[0]);
radeon_emit(cs, blocks[1]);
for (unsigned i = 0; i < cmd_buffer->state.framebuffer->attachment_count; ++i) {
VkImageLayout layout = cmd_buffer->state.pass->attachments[i].final_layout;
radv_handle_subpass_image_transition(cmd_buffer,
- (VkAttachmentReference){i, layout});
+ (struct radv_subpass_attachment){i, layout});
}
vk_free(&cmd_buffer->pool->alloc, cmd_buffer->state.attachments);
cmd_buffer->state.framebuffer = NULL;
}
+void radv_CmdEndRenderPass2KHR(
+ VkCommandBuffer commandBuffer,
+ const VkSubpassEndInfoKHR* pSubpassEndInfo)
+{
+ radv_CmdEndRenderPass(commandBuffer);
+}
+
/*
* For HTILE we have the following interesting clear words:
* 0xfffff30f: Uncompressed, full depth range, for depth+stencil HTILE
assert(range->levelCount == 1 || range->levelCount == VK_REMAINING_ARRAY_LAYERS);
unsigned layer_count = radv_get_layerCount(image, range);
uint64_t size = image->surface.htile_slice_size * layer_count;
+ VkImageAspectFlags aspects = VK_IMAGE_ASPECT_DEPTH_BIT;
uint64_t offset = image->offset + image->htile_offset +
image->surface.htile_slice_size * range->baseArrayLayer;
struct radv_cmd_state *state = &cmd_buffer->state;
+ VkClearDepthStencilValue value = {};
state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB |
RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
- /* Initialize the depth clear registers and update the ZRANGE_PRECISION
- * value for the TC-compat bug (because ZRANGE_PRECISION is 1 by
- * default). This is only needed whean clearing Z to 0.0f.
- */
- if (radv_image_is_tc_compat_htile(image) && clear_word == 0) {
- VkImageAspectFlags aspects = VK_IMAGE_ASPECT_DEPTH_BIT;
- VkClearDepthStencilValue value = {};
+ if (vk_format_is_stencil(image->vk_format))
+ aspects |= VK_IMAGE_ASPECT_STENCIL_BIT;
- if (vk_format_is_stencil(image->vk_format))
- aspects |= VK_IMAGE_ASPECT_STENCIL_BIT;
+ radv_set_ds_clear_metadata(cmd_buffer, image, value, aspects);
- radv_set_ds_clear_metadata(cmd_buffer, image, value, aspects);
+ if (radv_image_is_tc_compat_htile(image)) {
+ /* Initialize the TC-compat metada value to 0 because by
+ * default DB_Z_INFO.RANGE_PRECISION is set to 1, and we only
+ * need have to conditionally update its value when performing
+ * a fast depth clear.
+ */
+ radv_set_tc_compat_zrange_metadata(cmd_buffer, image, 0);
}
}
VkImageLayout dst_layout,
unsigned src_queue_mask,
unsigned dst_queue_mask,
- const VkImageSubresourceRange *range,
- VkImageAspectFlags pending_clears)
+ const VkImageSubresourceRange *range)
{
if (!radv_image_has_htile(image))
return;
- if (dst_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL &&
- (pending_clears & vk_format_aspects(image->vk_format)) == vk_format_aspects(image->vk_format) &&
- cmd_buffer->state.render_area.offset.x == 0 && cmd_buffer->state.render_area.offset.y == 0 &&
- cmd_buffer->state.render_area.extent.width == image->info.width &&
- cmd_buffer->state.render_area.extent.height == image->info.height) {
- /* The clear will initialize htile. */
- return;
- } else if (src_layout == VK_IMAGE_LAYOUT_UNDEFINED &&
+ if (src_layout == VK_IMAGE_LAYOUT_UNDEFINED &&
radv_layout_has_htile(image, dst_layout, dst_queue_mask)) {
/* TODO: merge with the clear if applicable */
radv_initialize_htile(cmd_buffer, image, range, 0);
if (radv_image_has_dcc(image)) {
uint32_t value = 0xffffffffu; /* Fully expanded mode. */
+ bool need_decompress_pass = false;
if (radv_layout_dcc_compressed(image, dst_layout,
dst_queue_mask)) {
value = 0x20202020u;
+ need_decompress_pass = true;
}
radv_initialize_dcc(cmd_buffer, image, value);
+
+ radv_update_fce_metadata(cmd_buffer, image,
+ need_decompress_pass);
+ }
+
+ if (radv_image_has_cmask(image) || radv_image_has_dcc(image)) {
+ uint32_t color_values[2] = {};
+ radv_set_color_clear_metadata(cmd_buffer, image, color_values);
}
}
VkImageLayout dst_layout,
uint32_t src_family,
uint32_t dst_family,
- const VkImageSubresourceRange *range,
- VkImageAspectFlags pending_clears)
+ const VkImageSubresourceRange *range)
{
if (image->exclusive && src_family != dst_family) {
/* This is an acquire or a release operation and there will be
radv_handle_depth_image_transition(cmd_buffer, image,
src_layout, dst_layout,
src_queue_mask, dst_queue_mask,
- range, pending_clears);
+ range);
} else {
radv_handle_color_image_transition(cmd_buffer, image,
src_layout, dst_layout,
}
}
-void radv_CmdPipelineBarrier(
- VkCommandBuffer commandBuffer,
- VkPipelineStageFlags srcStageMask,
- VkPipelineStageFlags destStageMask,
- VkBool32 byRegion,
- uint32_t memoryBarrierCount,
- const VkMemoryBarrier* pMemoryBarriers,
- uint32_t bufferMemoryBarrierCount,
- const VkBufferMemoryBarrier* pBufferMemoryBarriers,
- uint32_t imageMemoryBarrierCount,
- const VkImageMemoryBarrier* pImageMemoryBarriers)
-{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+struct radv_barrier_info {
+ uint32_t eventCount;
+ const VkEvent *pEvents;
+ VkPipelineStageFlags srcStageMask;
+};
+
+static void
+radv_barrier(struct radv_cmd_buffer *cmd_buffer,
+ uint32_t memoryBarrierCount,
+ const VkMemoryBarrier *pMemoryBarriers,
+ uint32_t bufferMemoryBarrierCount,
+ const VkBufferMemoryBarrier *pBufferMemoryBarriers,
+ uint32_t imageMemoryBarrierCount,
+ const VkImageMemoryBarrier *pImageMemoryBarriers,
+ const struct radv_barrier_info *info)
+{
+ struct radeon_cmdbuf *cs = cmd_buffer->cs;
enum radv_cmd_flush_bits src_flush_bits = 0;
enum radv_cmd_flush_bits dst_flush_bits = 0;
+ for (unsigned i = 0; i < info->eventCount; ++i) {
+ RADV_FROM_HANDLE(radv_event, event, info->pEvents[i]);
+ uint64_t va = radv_buffer_get_va(event->bo);
+
+ radv_cs_add_buffer(cmd_buffer->device->ws, cs, event->bo);
+
+ MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 7);
+
+ radv_cp_wait_mem(cs, WAIT_REG_MEM_EQUAL, va, 1, 0xffffffff);
+ assert(cmd_buffer->cs->cdw <= cdw_max);
+ }
+
for (uint32_t i = 0; i < memoryBarrierCount; i++) {
- src_flush_bits |= radv_src_access_flush(cmd_buffer, pMemoryBarriers[i].srcAccessMask);
+ src_flush_bits |= radv_src_access_flush(cmd_buffer, pMemoryBarriers[i].srcAccessMask,
+ NULL);
dst_flush_bits |= radv_dst_access_flush(cmd_buffer, pMemoryBarriers[i].dstAccessMask,
NULL);
}
for (uint32_t i = 0; i < bufferMemoryBarrierCount; i++) {
- src_flush_bits |= radv_src_access_flush(cmd_buffer, pBufferMemoryBarriers[i].srcAccessMask);
+ src_flush_bits |= radv_src_access_flush(cmd_buffer, pBufferMemoryBarriers[i].srcAccessMask,
+ NULL);
dst_flush_bits |= radv_dst_access_flush(cmd_buffer, pBufferMemoryBarriers[i].dstAccessMask,
NULL);
}
for (uint32_t i = 0; i < imageMemoryBarrierCount; i++) {
RADV_FROM_HANDLE(radv_image, image, pImageMemoryBarriers[i].image);
- src_flush_bits |= radv_src_access_flush(cmd_buffer, pImageMemoryBarriers[i].srcAccessMask);
+
+ src_flush_bits |= radv_src_access_flush(cmd_buffer, pImageMemoryBarriers[i].srcAccessMask,
+ image);
dst_flush_bits |= radv_dst_access_flush(cmd_buffer, pImageMemoryBarriers[i].dstAccessMask,
image);
}
- radv_stage_flush(cmd_buffer, srcStageMask);
+ radv_stage_flush(cmd_buffer, info->srcStageMask);
cmd_buffer->state.flush_bits |= src_flush_bits;
for (uint32_t i = 0; i < imageMemoryBarrierCount; i++) {
pImageMemoryBarriers[i].newLayout,
pImageMemoryBarriers[i].srcQueueFamilyIndex,
pImageMemoryBarriers[i].dstQueueFamilyIndex,
- &pImageMemoryBarriers[i].subresourceRange,
- 0);
+ &pImageMemoryBarriers[i].subresourceRange);
}
+ /* Make sure CP DMA is idle because the driver might have performed a
+ * DMA operation for copying or filling buffers/images.
+ */
+ if (info->srcStageMask & (VK_PIPELINE_STAGE_TRANSFER_BIT |
+ VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT))
+ si_cp_dma_wait_for_idle(cmd_buffer);
+
cmd_buffer->state.flush_bits |= dst_flush_bits;
}
+void radv_CmdPipelineBarrier(
+ VkCommandBuffer commandBuffer,
+ VkPipelineStageFlags srcStageMask,
+ VkPipelineStageFlags destStageMask,
+ VkBool32 byRegion,
+ uint32_t memoryBarrierCount,
+ const VkMemoryBarrier* pMemoryBarriers,
+ uint32_t bufferMemoryBarrierCount,
+ const VkBufferMemoryBarrier* pBufferMemoryBarriers,
+ uint32_t imageMemoryBarrierCount,
+ const VkImageMemoryBarrier* pImageMemoryBarriers)
+{
+ RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ struct radv_barrier_info info;
+
+ info.eventCount = 0;
+ info.pEvents = NULL;
+ info.srcStageMask = srcStageMask;
+
+ radv_barrier(cmd_buffer, memoryBarrierCount, pMemoryBarriers,
+ bufferMemoryBarrierCount, pBufferMemoryBarriers,
+ imageMemoryBarrierCount, pImageMemoryBarriers, &info);
+}
+
static void write_event(struct radv_cmd_buffer *cmd_buffer,
struct radv_event *event,
VkPipelineStageFlags stageMask,
unsigned value)
{
- struct radeon_winsys_cs *cs = cmd_buffer->cs;
+ struct radeon_cmdbuf *cs = cmd_buffer->cs;
uint64_t va = radv_buffer_get_va(event->bo);
- radv_cs_add_buffer(cmd_buffer->device->ws, cs, event->bo, 8);
+ si_emit_cache_flush(cmd_buffer);
+
+ radv_cs_add_buffer(cmd_buffer->device->ws, cs, event->bo);
MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 18);
- /* TODO: this is overkill. Probably should figure something out from
- * the stage mask. */
+ /* Flags that only require a top-of-pipe event. */
+ VkPipelineStageFlags top_of_pipe_flags =
+ VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
+
+ /* Flags that only require a post-index-fetch event. */
+ VkPipelineStageFlags post_index_fetch_flags =
+ top_of_pipe_flags |
+ VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT |
+ VK_PIPELINE_STAGE_VERTEX_INPUT_BIT;
- si_cs_emit_write_event_eop(cs,
- cmd_buffer->state.predicating,
- cmd_buffer->device->physical_device->rad_info.chip_class,
- radv_cmd_buffer_uses_mec(cmd_buffer),
- V_028A90_BOTTOM_OF_PIPE_TS, 0,
- 1, va, 2, value);
+ /* Make sure CP DMA is idle because the driver might have performed a
+ * DMA operation for copying or filling buffers/images.
+ */
+ if (stageMask & (VK_PIPELINE_STAGE_TRANSFER_BIT |
+ VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT))
+ si_cp_dma_wait_for_idle(cmd_buffer);
+
+ /* TODO: Emit EOS events for syncing PS/CS stages. */
+
+ if (!(stageMask & ~top_of_pipe_flags)) {
+ /* Just need to sync the PFP engine. */
+ radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 3, 0));
+ radeon_emit(cs, S_370_DST_SEL(V_370_MEM_ASYNC) |
+ S_370_WR_CONFIRM(1) |
+ S_370_ENGINE_SEL(V_370_PFP));
+ radeon_emit(cs, va);
+ radeon_emit(cs, va >> 32);
+ radeon_emit(cs, value);
+ } else if (!(stageMask & ~post_index_fetch_flags)) {
+ /* Sync ME because PFP reads index and indirect buffers. */
+ radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 3, 0));
+ radeon_emit(cs, S_370_DST_SEL(V_370_MEM_ASYNC) |
+ S_370_WR_CONFIRM(1) |
+ S_370_ENGINE_SEL(V_370_ME));
+ radeon_emit(cs, va);
+ radeon_emit(cs, va >> 32);
+ radeon_emit(cs, value);
+ } else {
+ /* Otherwise, sync all prior GPU work using an EOP event. */
+ si_cs_emit_write_event_eop(cs,
+ cmd_buffer->device->physical_device->rad_info.chip_class,
+ radv_cmd_buffer_uses_mec(cmd_buffer),
+ V_028A90_BOTTOM_OF_PIPE_TS, 0,
+ EOP_DATA_SEL_VALUE_32BIT, va, 2, value,
+ cmd_buffer->gfx9_eop_bug_va);
+ }
assert(cmd_buffer->cs->cdw <= cdw_max);
}
const VkImageMemoryBarrier* pImageMemoryBarriers)
{
RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
- struct radeon_winsys_cs *cs = cmd_buffer->cs;
+ struct radv_barrier_info info;
- for (unsigned i = 0; i < eventCount; ++i) {
- RADV_FROM_HANDLE(radv_event, event, pEvents[i]);
- uint64_t va = radv_buffer_get_va(event->bo);
+ info.eventCount = eventCount;
+ info.pEvents = pEvents;
+ info.srcStageMask = 0;
- radv_cs_add_buffer(cmd_buffer->device->ws, cs, event->bo, 8);
+ radv_barrier(cmd_buffer, memoryBarrierCount, pMemoryBarriers,
+ bufferMemoryBarrierCount, pBufferMemoryBarriers,
+ imageMemoryBarrierCount, pImageMemoryBarriers, &info);
+}
- MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 7);
- si_emit_wait_fence(cs, false, va, 1, 0xffffffff);
- assert(cmd_buffer->cs->cdw <= cdw_max);
+void radv_CmdSetDeviceMask(VkCommandBuffer commandBuffer,
+ uint32_t deviceMask)
+{
+ /* No-op */
+}
+
+/* VK_EXT_conditional_rendering */
+void radv_CmdBeginConditionalRenderingEXT(
+ VkCommandBuffer commandBuffer,
+ const VkConditionalRenderingBeginInfoEXT* pConditionalRenderingBegin)
+{
+ RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ RADV_FROM_HANDLE(radv_buffer, buffer, pConditionalRenderingBegin->buffer);
+ bool draw_visible = true;
+ uint64_t va;
+
+ va = radv_buffer_get_va(buffer->bo) + pConditionalRenderingBegin->offset;
+
+ /* By default, if the 32-bit value at offset in buffer memory is zero,
+ * then the rendering commands are discarded, otherwise they are
+ * executed as normal. If the inverted flag is set, all commands are
+ * discarded if the value is non zero.
+ */
+ if (pConditionalRenderingBegin->flags &
+ VK_CONDITIONAL_RENDERING_INVERTED_BIT_EXT) {
+ draw_visible = false;
}
+ /* Enable predication for this command buffer. */
+ si_emit_set_predication_state(cmd_buffer, draw_visible, va);
+ cmd_buffer->state.predicating = true;
- for (uint32_t i = 0; i < imageMemoryBarrierCount; i++) {
- RADV_FROM_HANDLE(radv_image, image, pImageMemoryBarriers[i].image);
+ /* Store conditional rendering user info. */
+ cmd_buffer->state.predication_type = draw_visible;
+ cmd_buffer->state.predication_va = va;
+}
- radv_handle_image_transition(cmd_buffer, image,
- pImageMemoryBarriers[i].oldLayout,
- pImageMemoryBarriers[i].newLayout,
- pImageMemoryBarriers[i].srcQueueFamilyIndex,
- pImageMemoryBarriers[i].dstQueueFamilyIndex,
- &pImageMemoryBarriers[i].subresourceRange,
- 0);
+void radv_CmdEndConditionalRenderingEXT(
+ VkCommandBuffer commandBuffer)
+{
+ RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+
+ /* Disable predication for this command buffer. */
+ si_emit_set_predication_state(cmd_buffer, false, 0);
+ cmd_buffer->state.predicating = false;
+
+ /* Reset conditional rendering user info. */
+ cmd_buffer->state.predication_type = -1;
+ cmd_buffer->state.predication_va = 0;
+}
+
+/* VK_EXT_transform_feedback */
+void radv_CmdBindTransformFeedbackBuffersEXT(
+ VkCommandBuffer commandBuffer,
+ uint32_t firstBinding,
+ uint32_t bindingCount,
+ const VkBuffer* pBuffers,
+ const VkDeviceSize* pOffsets,
+ const VkDeviceSize* pSizes)
+{
+ RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ struct radv_streamout_binding *sb = cmd_buffer->streamout_bindings;
+ uint8_t enabled_mask = 0;
+
+ assert(firstBinding + bindingCount <= MAX_SO_BUFFERS);
+ for (uint32_t i = 0; i < bindingCount; i++) {
+ uint32_t idx = firstBinding + i;
+
+ sb[idx].buffer = radv_buffer_from_handle(pBuffers[i]);
+ sb[idx].offset = pOffsets[i];
+ sb[idx].size = pSizes[i];
+
+ radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs,
+ sb[idx].buffer->bo);
+
+ enabled_mask |= 1 << idx;
}
- /* TODO: figure out how to do memory barriers without waiting */
- cmd_buffer->state.flush_bits |= RADV_CMD_FLUSH_AND_INV_FRAMEBUFFER |
- RADV_CMD_FLAG_INV_GLOBAL_L2 |
- RADV_CMD_FLAG_INV_VMEM_L1 |
- RADV_CMD_FLAG_INV_SMEM_L1;
+ cmd_buffer->state.streamout.enabled_mask = enabled_mask;
+
+ cmd_buffer->state.dirty |= RADV_CMD_DIRTY_STREAMOUT_BUFFER;
}
+static void
+radv_emit_streamout_enable(struct radv_cmd_buffer *cmd_buffer)
+{
+ struct radv_streamout_state *so = &cmd_buffer->state.streamout;
+ struct radeon_cmdbuf *cs = cmd_buffer->cs;
-void radv_CmdSetDeviceMask(VkCommandBuffer commandBuffer,
- uint32_t deviceMask)
+ radeon_set_context_reg_seq(cs, R_028B94_VGT_STRMOUT_CONFIG, 2);
+ radeon_emit(cs,
+ S_028B94_STREAMOUT_0_EN(so->streamout_enabled) |
+ S_028B94_RAST_STREAM(0) |
+ S_028B94_STREAMOUT_1_EN(so->streamout_enabled) |
+ S_028B94_STREAMOUT_2_EN(so->streamout_enabled) |
+ S_028B94_STREAMOUT_3_EN(so->streamout_enabled));
+ radeon_emit(cs, so->hw_enabled_mask &
+ so->enabled_stream_buffers_mask);
+}
+
+static void
+radv_set_streamout_enable(struct radv_cmd_buffer *cmd_buffer, bool enable)
{
- /* No-op */
+ struct radv_streamout_state *so = &cmd_buffer->state.streamout;
+ bool old_streamout_enabled = so->streamout_enabled;
+ uint32_t old_hw_enabled_mask = so->hw_enabled_mask;
+
+ so->streamout_enabled = enable;
+
+ so->hw_enabled_mask = so->enabled_mask |
+ (so->enabled_mask << 4) |
+ (so->enabled_mask << 8) |
+ (so->enabled_mask << 12);
+
+ if ((old_streamout_enabled != so->streamout_enabled) ||
+ (old_hw_enabled_mask != so->hw_enabled_mask))
+ radv_emit_streamout_enable(cmd_buffer);
+}
+
+static void radv_flush_vgt_streamout(struct radv_cmd_buffer *cmd_buffer)
+{
+ struct radeon_cmdbuf *cs = cmd_buffer->cs;
+ unsigned reg_strmout_cntl;
+
+ /* The register is at different places on different ASICs. */
+ if (cmd_buffer->device->physical_device->rad_info.chip_class >= CIK) {
+ reg_strmout_cntl = R_0300FC_CP_STRMOUT_CNTL;
+ radeon_set_uconfig_reg(cs, reg_strmout_cntl, 0);
+ } else {
+ reg_strmout_cntl = R_0084FC_CP_STRMOUT_CNTL;
+ radeon_set_config_reg(cs, reg_strmout_cntl, 0);
+ }
+
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
+ radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_SO_VGTSTREAMOUT_FLUSH) | EVENT_INDEX(0));
+
+ radeon_emit(cs, PKT3(PKT3_WAIT_REG_MEM, 5, 0));
+ radeon_emit(cs, WAIT_REG_MEM_EQUAL); /* wait until the register is equal to the reference value */
+ radeon_emit(cs, reg_strmout_cntl >> 2); /* register */
+ radeon_emit(cs, 0);
+ radeon_emit(cs, S_0084FC_OFFSET_UPDATE_DONE(1)); /* reference value */
+ radeon_emit(cs, S_0084FC_OFFSET_UPDATE_DONE(1)); /* mask */
+ radeon_emit(cs, 4); /* poll interval */
+}
+
+void radv_CmdBeginTransformFeedbackEXT(
+ VkCommandBuffer commandBuffer,
+ uint32_t firstCounterBuffer,
+ uint32_t counterBufferCount,
+ const VkBuffer* pCounterBuffers,
+ const VkDeviceSize* pCounterBufferOffsets)
+{
+ RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ struct radv_streamout_binding *sb = cmd_buffer->streamout_bindings;
+ struct radv_streamout_state *so = &cmd_buffer->state.streamout;
+ struct radeon_cmdbuf *cs = cmd_buffer->cs;
+ uint32_t i;
+
+ radv_flush_vgt_streamout(cmd_buffer);
+
+ assert(firstCounterBuffer + counterBufferCount <= MAX_SO_BUFFERS);
+ for_each_bit(i, so->enabled_mask) {
+ int32_t counter_buffer_idx = i - firstCounterBuffer;
+ if (counter_buffer_idx >= 0 && counter_buffer_idx >= counterBufferCount)
+ counter_buffer_idx = -1;
+
+ /* SI binds streamout buffers as shader resources.
+ * VGT only counts primitives and tells the shader through
+ * SGPRs what to do.
+ */
+ radeon_set_context_reg_seq(cs, R_028AD0_VGT_STRMOUT_BUFFER_SIZE_0 + 16*i, 2);
+ radeon_emit(cs, sb[i].size >> 2); /* BUFFER_SIZE (in DW) */
+ radeon_emit(cs, so->stride_in_dw[i]); /* VTX_STRIDE (in DW) */
+
+ if (counter_buffer_idx >= 0 && pCounterBuffers && pCounterBuffers[counter_buffer_idx]) {
+ /* The array of counter buffers is optional. */
+ RADV_FROM_HANDLE(radv_buffer, buffer, pCounterBuffers[counter_buffer_idx]);
+ uint64_t va = radv_buffer_get_va(buffer->bo);
+
+ va += buffer->offset + pCounterBufferOffsets[counter_buffer_idx];
+
+ /* Append */
+ radeon_emit(cs, PKT3(PKT3_STRMOUT_BUFFER_UPDATE, 4, 0));
+ radeon_emit(cs, STRMOUT_SELECT_BUFFER(i) |
+ STRMOUT_DATA_TYPE(1) | /* offset in bytes */
+ STRMOUT_OFFSET_SOURCE(STRMOUT_OFFSET_FROM_MEM)); /* control */
+ radeon_emit(cs, 0); /* unused */
+ radeon_emit(cs, 0); /* unused */
+ radeon_emit(cs, va); /* src address lo */
+ radeon_emit(cs, va >> 32); /* src address hi */
+
+ radv_cs_add_buffer(cmd_buffer->device->ws, cs, buffer->bo);
+ } else {
+ /* Start from the beginning. */
+ radeon_emit(cs, PKT3(PKT3_STRMOUT_BUFFER_UPDATE, 4, 0));
+ radeon_emit(cs, STRMOUT_SELECT_BUFFER(i) |
+ STRMOUT_DATA_TYPE(1) | /* offset in bytes */
+ STRMOUT_OFFSET_SOURCE(STRMOUT_OFFSET_FROM_PACKET)); /* control */
+ radeon_emit(cs, 0); /* unused */
+ radeon_emit(cs, 0); /* unused */
+ radeon_emit(cs, 0); /* unused */
+ radeon_emit(cs, 0); /* unused */
+ }
+ }
+
+ radv_set_streamout_enable(cmd_buffer, true);
+}
+
+void radv_CmdEndTransformFeedbackEXT(
+ VkCommandBuffer commandBuffer,
+ uint32_t firstCounterBuffer,
+ uint32_t counterBufferCount,
+ const VkBuffer* pCounterBuffers,
+ const VkDeviceSize* pCounterBufferOffsets)
+{
+ RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ struct radv_streamout_state *so = &cmd_buffer->state.streamout;
+ struct radeon_cmdbuf *cs = cmd_buffer->cs;
+ uint32_t i;
+
+ radv_flush_vgt_streamout(cmd_buffer);
+
+ assert(firstCounterBuffer + counterBufferCount <= MAX_SO_BUFFERS);
+ for_each_bit(i, so->enabled_mask) {
+ int32_t counter_buffer_idx = i - firstCounterBuffer;
+ if (counter_buffer_idx >= 0 && counter_buffer_idx >= counterBufferCount)
+ counter_buffer_idx = -1;
+
+ if (counter_buffer_idx >= 0 && pCounterBuffers && pCounterBuffers[counter_buffer_idx]) {
+ /* The array of counters buffer is optional. */
+ RADV_FROM_HANDLE(radv_buffer, buffer, pCounterBuffers[counter_buffer_idx]);
+ uint64_t va = radv_buffer_get_va(buffer->bo);
+
+ va += buffer->offset + pCounterBufferOffsets[counter_buffer_idx];
+
+ radeon_emit(cs, PKT3(PKT3_STRMOUT_BUFFER_UPDATE, 4, 0));
+ radeon_emit(cs, STRMOUT_SELECT_BUFFER(i) |
+ STRMOUT_DATA_TYPE(1) | /* offset in bytes */
+ STRMOUT_OFFSET_SOURCE(STRMOUT_OFFSET_NONE) |
+ STRMOUT_STORE_BUFFER_FILLED_SIZE); /* control */
+ radeon_emit(cs, va); /* dst address lo */
+ radeon_emit(cs, va >> 32); /* dst address hi */
+ radeon_emit(cs, 0); /* unused */
+ radeon_emit(cs, 0); /* unused */
+
+ radv_cs_add_buffer(cmd_buffer->device->ws, cs, buffer->bo);
+ }
+
+ /* Deactivate transform feedback by zeroing the buffer size.
+ * The counters (primitives generated, primitives emitted) may
+ * be enabled even if there is not buffer bound. This ensures
+ * that the primitives-emitted query won't increment.
+ */
+ radeon_set_context_reg(cs, R_028AD0_VGT_STRMOUT_BUFFER_SIZE_0 + 16*i, 0);
+ }
+
+ radv_set_streamout_enable(cmd_buffer, false);
+}
+
+void radv_CmdDrawIndirectByteCountEXT(
+ VkCommandBuffer commandBuffer,
+ uint32_t instanceCount,
+ uint32_t firstInstance,
+ VkBuffer _counterBuffer,
+ VkDeviceSize counterBufferOffset,
+ uint32_t counterOffset,
+ uint32_t vertexStride)
+{
+ RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ RADV_FROM_HANDLE(radv_buffer, counterBuffer, _counterBuffer);
+ struct radv_draw_info info = {};
+
+ info.instance_count = instanceCount;
+ info.first_instance = firstInstance;
+ info.strmout_buffer = counterBuffer;
+ info.strmout_buffer_offset = counterBufferOffset;
+ info.stride = vertexStride;
+
+ radv_draw(cmd_buffer, &info);
}