cmd_buffer = vk_zalloc(&pool->alloc, sizeof(*cmd_buffer), 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (cmd_buffer == NULL)
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
cmd_buffer->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
cmd_buffer->device = device;
cmd_buffer->cs = device->ws->cs_create(device->ws, ring);
if (!cmd_buffer->cs) {
vk_free(&cmd_buffer->pool->alloc, cmd_buffer);
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
}
*pCommandBuffer = radv_cmd_buffer_to_handle(cmd_buffer);
if (cmd_buffer->upload.upload_bo)
radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs,
- cmd_buffer->upload.upload_bo, 8);
+ cmd_buffer->upload.upload_bo);
cmd_buffer->upload.offset = 0;
cmd_buffer->record_result = VK_SUCCESS;
}
if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) {
+ unsigned num_db = cmd_buffer->device->physical_device->rad_info.num_render_backends;
+ unsigned eop_bug_offset;
void *fence_ptr;
+
radv_cmd_buffer_upload_alloc(cmd_buffer, 8, 0,
&cmd_buffer->gfx9_fence_offset,
&fence_ptr);
cmd_buffer->gfx9_fence_bo = cmd_buffer->upload.upload_bo;
+
+ /* Allocate a buffer for the EOP bug on GFX9. */
+ radv_cmd_buffer_upload_alloc(cmd_buffer, 16 * num_db, 0,
+ &eop_bug_offset, &fence_ptr);
+ cmd_buffer->gfx9_eop_bug_va =
+ radv_buffer_get_va(cmd_buffer->upload.upload_bo);
+ cmd_buffer->gfx9_eop_bug_va += eop_bug_offset;
}
cmd_buffer->status = RADV_CMD_BUFFER_STATUS_INITIAL;
return false;
}
- radv_cs_add_buffer(device->ws, cmd_buffer->cs, bo, 8);
+ radv_cs_add_buffer(device->ws, cmd_buffer->cs, bo);
if (cmd_buffer->upload.upload_bo) {
upload = malloc(sizeof(*upload));
}
static void
-radv_emit_write_data_packet(struct radeon_winsys_cs *cs, uint64_t va,
+radv_emit_write_data_packet(struct radeon_cmdbuf *cs, uint64_t va,
unsigned count, const uint32_t *data)
{
radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 2 + count, 0));
void radv_cmd_buffer_trace_emit(struct radv_cmd_buffer *cmd_buffer)
{
struct radv_device *device = cmd_buffer->device;
- struct radeon_winsys_cs *cs = cmd_buffer->cs;
+ struct radeon_cmdbuf *cs = cmd_buffer->cs;
uint64_t va;
va = radv_buffer_get_va(device->trace_bo);
MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 7);
++cmd_buffer->state.trace_id;
- radv_cs_add_buffer(device->ws, cs, device->trace_bo, 8);
radv_emit_write_data_packet(cs, va, 1, &cmd_buffer->state.trace_id);
radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
radeon_emit(cs, AC_ENCODE_TRACE_POINT(cmd_buffer->state.trace_id));
cmd_buffer->device->physical_device->rad_info.chip_class,
ptr, va,
radv_cmd_buffer_uses_mec(cmd_buffer),
- flags);
+ flags, cmd_buffer->gfx9_eop_bug_va);
}
if (unlikely(cmd_buffer->device->trace_bo))
struct radv_pipeline *pipeline, enum ring_type ring)
{
struct radv_device *device = cmd_buffer->device;
- struct radeon_winsys_cs *cs = cmd_buffer->cs;
+ struct radeon_cmdbuf *cs = cmd_buffer->cs;
uint32_t data[2];
uint64_t va;
data[0] = (uintptr_t)pipeline;
data[1] = (uintptr_t)pipeline >> 32;
- radv_cs_add_buffer(device->ws, cs, device->trace_bo, 8);
radv_emit_write_data_packet(cs, va, 2, data);
}
struct radv_descriptor_state *descriptors_state =
radv_get_descriptors_state(cmd_buffer, bind_point);
struct radv_device *device = cmd_buffer->device;
- struct radeon_winsys_cs *cs = cmd_buffer->cs;
+ struct radeon_cmdbuf *cs = cmd_buffer->cs;
uint32_t data[MAX_SETS * 2] = {};
uint64_t va;
unsigned i;
data[i * 2 + 1] = (uintptr_t)set >> 32;
}
- radv_cs_add_buffer(device->ws, cs, device->trace_bo, 8);
radv_emit_write_data_packet(cs, va, MAX_SETS * 2, data);
}
gl_shader_stage stage,
int idx)
{
- if (stage == MESA_SHADER_VERTEX) {
- if (pipeline->shaders[MESA_SHADER_VERTEX])
- return &pipeline->shaders[MESA_SHADER_VERTEX]->info.user_sgprs_locs.shader_data[idx];
- if (pipeline->shaders[MESA_SHADER_TESS_CTRL])
- return &pipeline->shaders[MESA_SHADER_TESS_CTRL]->info.user_sgprs_locs.shader_data[idx];
- if (pipeline->shaders[MESA_SHADER_GEOMETRY])
- return &pipeline->shaders[MESA_SHADER_GEOMETRY]->info.user_sgprs_locs.shader_data[idx];
- } else if (stage == MESA_SHADER_TESS_EVAL) {
- if (pipeline->shaders[MESA_SHADER_TESS_EVAL])
- return &pipeline->shaders[MESA_SHADER_TESS_EVAL]->info.user_sgprs_locs.shader_data[idx];
- if (pipeline->shaders[MESA_SHADER_GEOMETRY])
- return &pipeline->shaders[MESA_SHADER_GEOMETRY]->info.user_sgprs_locs.shader_data[idx];
- }
- return &pipeline->shaders[stage]->info.user_sgprs_locs.shader_data[idx];
+ struct radv_shader_variant *shader = radv_get_shader(pipeline, stage);
+ return &shader->info.user_sgprs_locs.shader_data[idx];
}
static void
base_reg + loc->sgpr_idx * 4, va, false);
}
+static void
+radv_emit_descriptor_pointers(struct radv_cmd_buffer *cmd_buffer,
+ struct radv_pipeline *pipeline,
+ struct radv_descriptor_state *descriptors_state,
+ gl_shader_stage stage)
+{
+ struct radv_device *device = cmd_buffer->device;
+ struct radeon_cmdbuf *cs = cmd_buffer->cs;
+ uint32_t sh_base = pipeline->user_data_0[stage];
+ struct radv_userdata_locations *locs =
+ &pipeline->shaders[stage]->info.user_sgprs_locs;
+ unsigned mask = locs->descriptor_sets_enabled;
+
+ mask &= descriptors_state->dirty & descriptors_state->valid;
+
+ while (mask) {
+ int start, count;
+
+ u_bit_scan_consecutive_range(&mask, &start, &count);
+
+ struct radv_userdata_info *loc = &locs->descriptor_sets[start];
+ unsigned sh_offset = sh_base + loc->sgpr_idx * 4;
+
+ radv_emit_shader_pointer_head(cs, sh_offset, count,
+ HAVE_32BIT_POINTERS);
+ for (int i = 0; i < count; i++) {
+ struct radv_descriptor_set *set =
+ descriptors_state->sets[start + i];
+
+ radv_emit_shader_pointer_body(device, cs, set->va,
+ HAVE_32BIT_POINTERS);
+ }
+ }
+}
+
static void
radv_update_multisample_state(struct radv_cmd_buffer *cmd_buffer,
struct radv_pipeline *pipeline)
continue;
radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs,
- pipeline->shaders[i]->bo, 8);
+ pipeline->shaders[i]->bo);
}
if (radv_pipeline_has_gs(pipeline))
radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs,
- pipeline->gs_copy_shader->bo, 8);
+ pipeline->gs_copy_shader->bo);
if (unlikely(cmd_buffer->device->trace_bo))
radv_save_pipeline(cmd_buffer, pipeline, RING_GFX);
}
}
+static void
+radv_update_zrange_precision(struct radv_cmd_buffer *cmd_buffer,
+ struct radv_ds_buffer_info *ds,
+ struct radv_image *image, VkImageLayout layout,
+ bool requires_cond_write)
+{
+ uint32_t db_z_info = ds->db_z_info;
+ uint32_t db_z_info_reg;
+
+ if (!radv_image_is_tc_compat_htile(image))
+ return;
+
+ if (!radv_layout_has_htile(image, layout,
+ radv_image_queue_family_mask(image,
+ cmd_buffer->queue_family_index,
+ cmd_buffer->queue_family_index))) {
+ db_z_info &= C_028040_TILE_SURFACE_ENABLE;
+ }
+
+ db_z_info &= C_028040_ZRANGE_PRECISION;
+
+ if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) {
+ db_z_info_reg = R_028038_DB_Z_INFO;
+ } else {
+ db_z_info_reg = R_028040_DB_Z_INFO;
+ }
+
+ /* When we don't know the last fast clear value we need to emit a
+ * conditional packet, otherwise we can update DB_Z_INFO directly.
+ */
+ if (requires_cond_write) {
+ radeon_emit(cmd_buffer->cs, PKT3(PKT3_COND_WRITE, 7, 0));
+
+ const uint32_t write_space = 0 << 8; /* register */
+ const uint32_t poll_space = 1 << 4; /* memory */
+ const uint32_t function = 3 << 0; /* equal to the reference */
+ const uint32_t options = write_space | poll_space | function;
+ radeon_emit(cmd_buffer->cs, options);
+
+ /* poll address - location of the depth clear value */
+ uint64_t va = radv_buffer_get_va(image->bo);
+ va += image->offset + image->clear_value_offset;
+
+ /* In presence of stencil format, we have to adjust the base
+ * address because the first value is the stencil clear value.
+ */
+ if (vk_format_is_stencil(image->vk_format))
+ va += 4;
+
+ radeon_emit(cmd_buffer->cs, va);
+ radeon_emit(cmd_buffer->cs, va >> 32);
+
+ radeon_emit(cmd_buffer->cs, fui(0.0f)); /* reference value */
+ radeon_emit(cmd_buffer->cs, (uint32_t)-1); /* comparison mask */
+ radeon_emit(cmd_buffer->cs, db_z_info_reg >> 2); /* write address low */
+ radeon_emit(cmd_buffer->cs, 0u); /* write address high */
+ radeon_emit(cmd_buffer->cs, db_z_info);
+ } else {
+ radeon_set_context_reg(cmd_buffer->cs, db_z_info_reg, db_z_info);
+ }
+}
+
static void
radv_emit_fb_ds_state(struct radv_cmd_buffer *cmd_buffer,
struct radv_ds_buffer_info *ds,
}
+ /* Update the ZRANGE_PRECISION value for the TC-compat bug. */
+ radv_update_zrange_precision(cmd_buffer, ds, image, layout, true);
+
radeon_set_context_reg(cmd_buffer->cs, R_028B78_PA_SU_POLY_OFFSET_DB_FMT_CNTL,
ds->pa_su_poly_offset_db_fmt_cntl);
}
-void
-radv_set_depth_clear_regs(struct radv_cmd_buffer *cmd_buffer,
- struct radv_image *image,
- VkClearDepthStencilValue ds_clear_value,
- VkImageAspectFlags aspects)
+/**
+ * Update the fast clear depth/stencil values if the image is bound as a
+ * depth/stencil buffer.
+ */
+static void
+radv_update_bound_fast_clear_ds(struct radv_cmd_buffer *cmd_buffer,
+ struct radv_image *image,
+ VkClearDepthStencilValue ds_clear_value,
+ VkImageAspectFlags aspects)
+{
+ struct radv_framebuffer *framebuffer = cmd_buffer->state.framebuffer;
+ const struct radv_subpass *subpass = cmd_buffer->state.subpass;
+ struct radeon_cmdbuf *cs = cmd_buffer->cs;
+ struct radv_attachment_info *att;
+ uint32_t att_idx;
+
+ if (!framebuffer || !subpass)
+ return;
+
+ att_idx = subpass->depth_stencil_attachment.attachment;
+ if (att_idx == VK_ATTACHMENT_UNUSED)
+ return;
+
+ att = &framebuffer->attachments[att_idx];
+ if (att->attachment->image != image)
+ return;
+
+ radeon_set_context_reg_seq(cs, R_028028_DB_STENCIL_CLEAR, 2);
+ radeon_emit(cs, ds_clear_value.stencil);
+ radeon_emit(cs, fui(ds_clear_value.depth));
+
+ /* Update the ZRANGE_PRECISION value for the TC-compat bug. This is
+ * only needed when clearing Z to 0.0.
+ */
+ if ((aspects & VK_IMAGE_ASPECT_DEPTH_BIT) &&
+ ds_clear_value.depth == 0.0) {
+ VkImageLayout layout = subpass->depth_stencil_attachment.layout;
+
+ radv_update_zrange_precision(cmd_buffer, &att->ds, image,
+ layout, false);
+ }
+}
+
+/**
+ * Set the clear depth/stencil values to the image's metadata.
+ */
+static void
+radv_set_ds_clear_metadata(struct radv_cmd_buffer *cmd_buffer,
+ struct radv_image *image,
+ VkClearDepthStencilValue ds_clear_value,
+ VkImageAspectFlags aspects)
{
+ struct radeon_cmdbuf *cs = cmd_buffer->cs;
uint64_t va = radv_buffer_get_va(image->bo);
- va += image->offset + image->clear_value_offset;
unsigned reg_offset = 0, reg_count = 0;
- assert(radv_image_has_htile(image));
+ va += image->offset + image->clear_value_offset;
if (aspects & VK_IMAGE_ASPECT_STENCIL_BIT) {
++reg_count;
if (aspects & VK_IMAGE_ASPECT_DEPTH_BIT)
++reg_count;
- radeon_emit(cmd_buffer->cs, PKT3(PKT3_WRITE_DATA, 2 + reg_count, 0));
- radeon_emit(cmd_buffer->cs, S_370_DST_SEL(V_370_MEM_ASYNC) |
- S_370_WR_CONFIRM(1) |
- S_370_ENGINE_SEL(V_370_PFP));
- radeon_emit(cmd_buffer->cs, va);
- radeon_emit(cmd_buffer->cs, va >> 32);
+ radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 2 + reg_count, 0));
+ radeon_emit(cs, S_370_DST_SEL(V_370_MEM_ASYNC) |
+ S_370_WR_CONFIRM(1) |
+ S_370_ENGINE_SEL(V_370_PFP));
+ radeon_emit(cs, va);
+ radeon_emit(cs, va >> 32);
if (aspects & VK_IMAGE_ASPECT_STENCIL_BIT)
- radeon_emit(cmd_buffer->cs, ds_clear_value.stencil);
+ radeon_emit(cs, ds_clear_value.stencil);
if (aspects & VK_IMAGE_ASPECT_DEPTH_BIT)
- radeon_emit(cmd_buffer->cs, fui(ds_clear_value.depth));
+ radeon_emit(cs, fui(ds_clear_value.depth));
+}
- radeon_set_context_reg_seq(cmd_buffer->cs, R_028028_DB_STENCIL_CLEAR + 4 * reg_offset, reg_count);
- if (aspects & VK_IMAGE_ASPECT_STENCIL_BIT)
- radeon_emit(cmd_buffer->cs, ds_clear_value.stencil); /* R_028028_DB_STENCIL_CLEAR */
- if (aspects & VK_IMAGE_ASPECT_DEPTH_BIT)
- radeon_emit(cmd_buffer->cs, fui(ds_clear_value.depth)); /* R_02802C_DB_DEPTH_CLEAR */
+/**
+ * Update the clear depth/stencil values for this image.
+ */
+void
+radv_update_ds_clear_metadata(struct radv_cmd_buffer *cmd_buffer,
+ struct radv_image *image,
+ VkClearDepthStencilValue ds_clear_value,
+ VkImageAspectFlags aspects)
+{
+ assert(radv_image_has_htile(image));
+
+ radv_set_ds_clear_metadata(cmd_buffer, image, ds_clear_value, aspects);
+
+ radv_update_bound_fast_clear_ds(cmd_buffer, image, ds_clear_value,
+ aspects);
}
+/**
+ * Load the clear depth/stencil values from the image's metadata.
+ */
static void
-radv_load_depth_clear_regs(struct radv_cmd_buffer *cmd_buffer,
- struct radv_image *image)
+radv_load_ds_clear_metadata(struct radv_cmd_buffer *cmd_buffer,
+ struct radv_image *image)
{
+ struct radeon_cmdbuf *cs = cmd_buffer->cs;
VkImageAspectFlags aspects = vk_format_aspects(image->vk_format);
uint64_t va = radv_buffer_get_va(image->bo);
- va += image->offset + image->clear_value_offset;
unsigned reg_offset = 0, reg_count = 0;
+ va += image->offset + image->clear_value_offset;
+
if (!radv_image_has_htile(image))
return;
if (aspects & VK_IMAGE_ASPECT_DEPTH_BIT)
++reg_count;
- radeon_emit(cmd_buffer->cs, PKT3(PKT3_COPY_DATA, 4, 0));
- radeon_emit(cmd_buffer->cs, COPY_DATA_SRC_SEL(COPY_DATA_MEM) |
- COPY_DATA_DST_SEL(COPY_DATA_REG) |
- (reg_count == 2 ? COPY_DATA_COUNT_SEL : 0));
- radeon_emit(cmd_buffer->cs, va);
- radeon_emit(cmd_buffer->cs, va >> 32);
- radeon_emit(cmd_buffer->cs, (R_028028_DB_STENCIL_CLEAR + 4 * reg_offset) >> 2);
- radeon_emit(cmd_buffer->cs, 0);
+ radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
+ radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_MEM) |
+ COPY_DATA_DST_SEL(COPY_DATA_REG) |
+ (reg_count == 2 ? COPY_DATA_COUNT_SEL : 0));
+ radeon_emit(cs, va);
+ radeon_emit(cs, va >> 32);
+ radeon_emit(cs, (R_028028_DB_STENCIL_CLEAR + 4 * reg_offset) >> 2);
+ radeon_emit(cs, 0);
- radeon_emit(cmd_buffer->cs, PKT3(PKT3_PFP_SYNC_ME, 0, 0));
- radeon_emit(cmd_buffer->cs, 0);
+ radeon_emit(cs, PKT3(PKT3_PFP_SYNC_ME, 0, 0));
+ radeon_emit(cs, 0);
}
/*
radeon_emit(cmd_buffer->cs, pred_val >> 32);
}
-void
-radv_set_color_clear_regs(struct radv_cmd_buffer *cmd_buffer,
- struct radv_image *image,
- int idx,
- uint32_t color_values[2])
+/**
+ * Update the fast clear color values if the image is bound as a color buffer.
+ */
+static void
+radv_update_bound_fast_clear_color(struct radv_cmd_buffer *cmd_buffer,
+ struct radv_image *image,
+ int cb_idx,
+ uint32_t color_values[2])
+{
+ struct radv_framebuffer *framebuffer = cmd_buffer->state.framebuffer;
+ const struct radv_subpass *subpass = cmd_buffer->state.subpass;
+ struct radeon_cmdbuf *cs = cmd_buffer->cs;
+ struct radv_attachment_info *att;
+ uint32_t att_idx;
+
+ if (!framebuffer || !subpass)
+ return;
+
+ att_idx = subpass->color_attachments[cb_idx].attachment;
+ if (att_idx == VK_ATTACHMENT_UNUSED)
+ return;
+
+ att = &framebuffer->attachments[att_idx];
+ if (att->attachment->image != image)
+ return;
+
+ radeon_set_context_reg_seq(cs, R_028C8C_CB_COLOR0_CLEAR_WORD0 + cb_idx * 0x3c, 2);
+ radeon_emit(cs, color_values[0]);
+ radeon_emit(cs, color_values[1]);
+}
+
+/**
+ * Set the clear color values to the image's metadata.
+ */
+static void
+radv_set_color_clear_metadata(struct radv_cmd_buffer *cmd_buffer,
+ struct radv_image *image,
+ uint32_t color_values[2])
{
+ struct radeon_cmdbuf *cs = cmd_buffer->cs;
uint64_t va = radv_buffer_get_va(image->bo);
+
va += image->offset + image->clear_value_offset;
assert(radv_image_has_cmask(image) || radv_image_has_dcc(image));
- radeon_emit(cmd_buffer->cs, PKT3(PKT3_WRITE_DATA, 4, 0));
- radeon_emit(cmd_buffer->cs, S_370_DST_SEL(V_370_MEM_ASYNC) |
- S_370_WR_CONFIRM(1) |
- S_370_ENGINE_SEL(V_370_PFP));
- radeon_emit(cmd_buffer->cs, va);
- radeon_emit(cmd_buffer->cs, va >> 32);
- radeon_emit(cmd_buffer->cs, color_values[0]);
- radeon_emit(cmd_buffer->cs, color_values[1]);
+ radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 4, 0));
+ radeon_emit(cs, S_370_DST_SEL(V_370_MEM_ASYNC) |
+ S_370_WR_CONFIRM(1) |
+ S_370_ENGINE_SEL(V_370_PFP));
+ radeon_emit(cs, va);
+ radeon_emit(cs, va >> 32);
+ radeon_emit(cs, color_values[0]);
+ radeon_emit(cs, color_values[1]);
+}
+
+/**
+ * Update the clear color values for this image.
+ */
+void
+radv_update_color_clear_metadata(struct radv_cmd_buffer *cmd_buffer,
+ struct radv_image *image,
+ int cb_idx,
+ uint32_t color_values[2])
+{
+ assert(radv_image_has_cmask(image) || radv_image_has_dcc(image));
- radeon_set_context_reg_seq(cmd_buffer->cs, R_028C8C_CB_COLOR0_CLEAR_WORD0 + idx * 0x3c, 2);
- radeon_emit(cmd_buffer->cs, color_values[0]);
- radeon_emit(cmd_buffer->cs, color_values[1]);
+ radv_set_color_clear_metadata(cmd_buffer, image, color_values);
+
+ radv_update_bound_fast_clear_color(cmd_buffer, image, cb_idx,
+ color_values);
}
+/**
+ * Load the clear color values from the image's metadata.
+ */
static void
-radv_load_color_clear_regs(struct radv_cmd_buffer *cmd_buffer,
- struct radv_image *image,
- int idx)
+radv_load_color_clear_metadata(struct radv_cmd_buffer *cmd_buffer,
+ struct radv_image *image,
+ int cb_idx)
{
+ struct radeon_cmdbuf *cs = cmd_buffer->cs;
uint64_t va = radv_buffer_get_va(image->bo);
+
va += image->offset + image->clear_value_offset;
if (!radv_image_has_cmask(image) && !radv_image_has_dcc(image))
return;
- uint32_t reg = R_028C8C_CB_COLOR0_CLEAR_WORD0 + idx * 0x3c;
+ uint32_t reg = R_028C8C_CB_COLOR0_CLEAR_WORD0 + cb_idx * 0x3c;
- radeon_emit(cmd_buffer->cs, PKT3(PKT3_COPY_DATA, 4, cmd_buffer->state.predicating));
- radeon_emit(cmd_buffer->cs, COPY_DATA_SRC_SEL(COPY_DATA_MEM) |
- COPY_DATA_DST_SEL(COPY_DATA_REG) |
- COPY_DATA_COUNT_SEL);
- radeon_emit(cmd_buffer->cs, va);
- radeon_emit(cmd_buffer->cs, va >> 32);
- radeon_emit(cmd_buffer->cs, reg >> 2);
- radeon_emit(cmd_buffer->cs, 0);
+ radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, cmd_buffer->state.predicating));
+ radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_MEM) |
+ COPY_DATA_DST_SEL(COPY_DATA_REG) |
+ COPY_DATA_COUNT_SEL);
+ radeon_emit(cs, va);
+ radeon_emit(cs, va >> 32);
+ radeon_emit(cs, reg >> 2);
+ radeon_emit(cs, 0);
- radeon_emit(cmd_buffer->cs, PKT3(PKT3_PFP_SYNC_ME, 0, cmd_buffer->state.predicating));
- radeon_emit(cmd_buffer->cs, 0);
+ radeon_emit(cs, PKT3(PKT3_PFP_SYNC_ME, 0, cmd_buffer->state.predicating));
+ radeon_emit(cs, 0);
}
static void
struct radv_image *image = att->attachment->image;
VkImageLayout layout = subpass->color_attachments[i].layout;
- radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, att->attachment->bo, 8);
+ radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, att->attachment->bo);
assert(att->attachment->aspect_mask & VK_IMAGE_ASPECT_COLOR_BIT);
radv_emit_fb_color_state(cmd_buffer, i, att, image, layout);
- radv_load_color_clear_regs(cmd_buffer, image, i);
+ radv_load_color_clear_metadata(cmd_buffer, image, i);
}
if(subpass->depth_stencil_attachment.attachment != VK_ATTACHMENT_UNUSED) {
VkImageLayout layout = subpass->depth_stencil_attachment.layout;
struct radv_attachment_info *att = &framebuffer->attachments[idx];
struct radv_image *image = att->attachment->image;
- radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, att->attachment->bo, 8);
+ radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, att->attachment->bo);
MAYBE_UNUSED uint32_t queue_mask = radv_image_queue_family_mask(image,
cmd_buffer->queue_family_index,
cmd_buffer->queue_family_index);
cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS;
cmd_buffer->state.offset_scale = att->ds.offset_scale;
}
- radv_load_depth_clear_regs(cmd_buffer, image);
+ radv_load_ds_clear_metadata(cmd_buffer, image);
} else {
if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9)
radeon_set_context_reg_seq(cmd_buffer->cs, R_028038_DB_Z_INFO, 2);
static void
radv_emit_index_buffer(struct radv_cmd_buffer *cmd_buffer)
{
- struct radeon_winsys_cs *cs = cmd_buffer->cs;
+ struct radeon_cmdbuf *cs = cmd_buffer->cs;
struct radv_cmd_state *state = &cmd_buffer->state;
if (state->index_type != state->last_index_type) {
cmd_buffer->state.dirty &= ~states;
}
-static void
-emit_stage_descriptor_set_userdata(struct radv_cmd_buffer *cmd_buffer,
- struct radv_pipeline *pipeline,
- int idx,
- uint64_t va,
- gl_shader_stage stage)
-{
- struct radv_userdata_info *desc_set_loc = &pipeline->shaders[stage]->info.user_sgprs_locs.descriptor_sets[idx];
- uint32_t base_reg = pipeline->user_data_0[stage];
-
- if (desc_set_loc->sgpr_idx == -1 || desc_set_loc->indirect)
- return;
-
- assert(!desc_set_loc->indirect);
- assert(desc_set_loc->num_sgprs == (HAVE_32BIT_POINTERS ? 1 : 2));
-
- radv_emit_shader_pointer(cmd_buffer->device, cmd_buffer->cs,
- base_reg + desc_set_loc->sgpr_idx * 4, va, false);
-}
-
-static void
-radv_emit_descriptor_set_userdata(struct radv_cmd_buffer *cmd_buffer,
- VkShaderStageFlags stages,
- struct radv_descriptor_set *set,
- unsigned idx)
-{
- if (cmd_buffer->state.pipeline) {
- radv_foreach_stage(stage, stages) {
- if (cmd_buffer->state.pipeline->shaders[stage])
- emit_stage_descriptor_set_userdata(cmd_buffer, cmd_buffer->state.pipeline,
- idx, set->va,
- stage);
- }
- }
-
- if (cmd_buffer->state.compute_pipeline && (stages & VK_SHADER_STAGE_COMPUTE_BIT))
- emit_stage_descriptor_set_userdata(cmd_buffer, cmd_buffer->state.compute_pipeline,
- idx, set->va,
- MESA_SHADER_COMPUTE);
-}
-
static void
radv_flush_push_descriptors(struct radv_cmd_buffer *cmd_buffer,
VkPipelineBindPoint bind_point)
VK_PIPELINE_BIND_POINT_GRAPHICS;
struct radv_descriptor_state *descriptors_state =
radv_get_descriptors_state(cmd_buffer, bind_point);
- unsigned i;
if (!descriptors_state->dirty)
return;
cmd_buffer->cs,
MAX_SETS * MESA_SHADER_STAGES * 4);
- for_each_bit(i, descriptors_state->dirty) {
- struct radv_descriptor_set *set = descriptors_state->sets[i];
- if (!(descriptors_state->valid & (1u << i)))
- continue;
+ if (cmd_buffer->state.pipeline) {
+ radv_foreach_stage(stage, stages) {
+ if (!cmd_buffer->state.pipeline->shaders[stage])
+ continue;
- radv_emit_descriptor_set_userdata(cmd_buffer, stages, set, i);
+ radv_emit_descriptor_pointers(cmd_buffer,
+ cmd_buffer->state.pipeline,
+ descriptors_state, stage);
+ }
+ }
+
+ if (cmd_buffer->state.compute_pipeline &&
+ (stages & VK_SHADER_STAGE_COMPUTE_BIT)) {
+ radv_emit_descriptor_pointers(cmd_buffer,
+ cmd_buffer->state.compute_pipeline,
+ descriptors_state,
+ MESA_SHADER_COMPUTE);
}
+
descriptors_state->dirty = 0;
descriptors_state->push_dirty = false;
struct radv_pipeline *pipeline = stages & VK_SHADER_STAGE_COMPUTE_BIT
? cmd_buffer->state.compute_pipeline
: cmd_buffer->state.pipeline;
+ VkPipelineBindPoint bind_point = stages & VK_SHADER_STAGE_COMPUTE_BIT ?
+ VK_PIPELINE_BIND_POINT_COMPUTE :
+ VK_PIPELINE_BIND_POINT_GRAPHICS;
+ struct radv_descriptor_state *descriptors_state =
+ radv_get_descriptors_state(cmd_buffer, bind_point);
struct radv_pipeline_layout *layout = pipeline->layout;
+ struct radv_shader_variant *shader, *prev_shader;
unsigned offset;
void *ptr;
uint64_t va;
return;
memcpy(ptr, cmd_buffer->push_constants, layout->push_constant_size);
- memcpy((char*)ptr + layout->push_constant_size, cmd_buffer->dynamic_buffers,
+ memcpy((char*)ptr + layout->push_constant_size,
+ descriptors_state->dynamic_buffers,
16 * layout->dynamic_offset_count);
va = radv_buffer_get_va(cmd_buffer->upload.upload_bo);
MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws,
cmd_buffer->cs, MESA_SHADER_STAGES * 4);
+ prev_shader = NULL;
radv_foreach_stage(stage, stages) {
- if (pipeline->shaders[stage]) {
+ shader = radv_get_shader(pipeline, stage);
+
+ /* Avoid redundantly emitting the address for merged stages. */
+ if (shader && shader != prev_shader) {
radv_emit_userdata_address(cmd_buffer, pipeline, stage,
AC_UD_PUSH_CONSTANTS, va);
+
+ prev_shader = shader;
}
}
if ((pipeline_is_dirty ||
(cmd_buffer->state.dirty & RADV_CMD_DIRTY_VERTEX_BUFFER)) &&
cmd_buffer->state.pipeline->vertex_elements.count &&
- radv_get_vertex_shader(cmd_buffer->state.pipeline)->info.info.vs.has_vertex_buffers) {
+ radv_get_shader(cmd_buffer->state.pipeline, MESA_SHADER_VERTEX)->info.info.vs.has_vertex_buffers) {
struct radv_vertex_elements_info *velems = &cmd_buffer->state.pipeline->vertex_elements;
unsigned vb_offset;
void *vb_ptr;
{
struct radeon_info *info = &cmd_buffer->device->physical_device->rad_info;
struct radv_cmd_state *state = &cmd_buffer->state;
- struct radeon_winsys_cs *cs = cmd_buffer->cs;
+ struct radeon_cmdbuf *cs = cmd_buffer->cs;
uint32_t ia_multi_vgt_param;
int32_t primitive_reset_en;
static enum radv_cmd_flush_bits
radv_src_access_flush(struct radv_cmd_buffer *cmd_buffer,
- VkAccessFlags src_flags)
+ VkAccessFlags src_flags,
+ struct radv_image *image)
{
enum radv_cmd_flush_bits flush_bits = 0;
uint32_t b;
flush_bits |= RADV_CMD_FLAG_WRITEBACK_GLOBAL_L2;
break;
case VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT:
- flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB |
- RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
+ flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB;
+ if (!image || (image && radv_image_has_CB_metadata(image))) {
+ flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
+ }
break;
case VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT:
- flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB |
- RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
+ flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB;
+ if (!image || (image && radv_image_has_htile(image))) {
+ flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
+ }
break;
case VK_ACCESS_TRANSFER_WRITE_BIT:
flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB |
static void radv_subpass_barrier(struct radv_cmd_buffer *cmd_buffer, const struct radv_subpass_barrier *barrier)
{
- cmd_buffer->state.flush_bits |= radv_src_access_flush(cmd_buffer, barrier->src_access_mask);
+ cmd_buffer->state.flush_bits |= radv_src_access_flush(cmd_buffer, barrier->src_access_mask,
+ NULL);
radv_stage_flush(cmd_buffer, barrier->src_stage_mask);
cmd_buffer->state.flush_bits |= radv_dst_access_flush(cmd_buffer, barrier->dst_access_mask,
NULL);
}
static void radv_handle_subpass_image_transition(struct radv_cmd_buffer *cmd_buffer,
- VkAttachmentReference att)
+ struct radv_subpass_attachment att)
{
unsigned idx = att.attachment;
struct radv_image_view *view = cmd_buffer->state.framebuffer->attachments[idx].attachment;
struct radv_device *device = cmd_buffer->device;
if (device->gfx_init) {
uint64_t va = radv_buffer_get_va(device->gfx_init);
- radv_cs_add_buffer(device->ws, cmd_buffer->cs, device->gfx_init, 8);
+ radv_cs_add_buffer(device->ws, cmd_buffer->cs, device->gfx_init);
radeon_emit(cmd_buffer->cs, PKT3(PKT3_INDIRECT_BUFFER_CIK, 2, 0));
radeon_emit(cmd_buffer->cs, va);
radeon_emit(cmd_buffer->cs, va >> 32);
}
}
- if (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
+ if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY &&
+ (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
assert(pBeginInfo->pInheritanceInfo);
cmd_buffer->state.framebuffer = radv_framebuffer_from_handle(pBeginInfo->pInheritanceInfo->framebuffer);
cmd_buffer->state.pass = radv_render_pass_from_handle(pBeginInfo->pInheritanceInfo->renderPass);
radv_cmd_buffer_set_subpass(cmd_buffer, subpass, false);
}
- if (unlikely(cmd_buffer->device->trace_bo))
+ if (unlikely(cmd_buffer->device->trace_bo)) {
+ struct radv_device *device = cmd_buffer->device;
+
+ radv_cs_add_buffer(device->ws, cmd_buffer->cs,
+ device->trace_bo);
+
radv_cmd_buffer_trace_emit(cmd_buffer);
+ }
cmd_buffer->status = RADV_CMD_BUFFER_STATUS_RECORDING;
vb[idx].offset = pOffsets[i];
radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs,
- vb[idx].buffer->bo, 8);
+ vb[idx].buffer->bo);
}
if (!changed) {
int index_size_shift = cmd_buffer->state.index_type ? 2 : 1;
cmd_buffer->state.max_index_count = (index_buffer->size - offset) >> index_size_shift;
cmd_buffer->state.dirty |= RADV_CMD_DIRTY_INDEX_BUFFER;
- radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, index_buffer->bo, 8);
+ radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, index_buffer->bo);
}
if (!cmd_buffer->device->use_global_bo_list) {
for (unsigned j = 0; j < set->layout->buffer_count; ++j)
if (set->descriptors[j])
- radv_cs_add_buffer(ws, cmd_buffer->cs, set->descriptors[j], 7);
+ radv_cs_add_buffer(ws, cmd_buffer->cs, set->descriptors[j]);
}
if(set->bo)
- radv_cs_add_buffer(ws, cmd_buffer->cs, set->bo, 8);
+ radv_cs_add_buffer(ws, cmd_buffer->cs, set->bo);
}
void radv_CmdBindDescriptorSets(
unsigned dyn_idx = 0;
const bool no_dynamic_bounds = cmd_buffer->device->instance->debug_flags & RADV_DEBUG_NO_DYNAMIC_BOUNDS;
+ struct radv_descriptor_state *descriptors_state =
+ radv_get_descriptors_state(cmd_buffer, pipelineBindPoint);
for (unsigned i = 0; i < descriptorSetCount; ++i) {
unsigned idx = i + firstSet;
for(unsigned j = 0; j < set->layout->dynamic_offset_count; ++j, ++dyn_idx) {
unsigned idx = j + layout->set[i + firstSet].dynamic_offset_start;
- uint32_t *dst = cmd_buffer->dynamic_buffers + idx * 4;
+ uint32_t *dst = descriptors_state->dynamic_buffers + idx * 4;
assert(dyn_idx < dynamicOffsetCount);
struct radv_descriptor_range *range = set->dynamic_descriptors + j;
si_emit_cache_flush(cmd_buffer);
}
+ /* Make sure CP DMA is idle at the end of IBs because the kernel
+ * doesn't wait for it.
+ */
+ si_cp_dma_wait_for_idle(cmd_buffer);
+
vk_free(&cmd_buffer->pool->alloc, cmd_buffer->state.attachments);
if (!cmd_buffer->device->ws->cs_finalize(cmd_buffer->cs))
- return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
+ return vk_error(cmd_buffer->device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
cmd_buffer->status = RADV_CMD_BUFFER_STATUS_EXECUTABLE;
pipeline->max_waves * pipeline->scratch_bytes_per_wave);
radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs,
- pipeline->shaders[MESA_SHADER_COMPUTE]->bo, 8);
+ pipeline->shaders[MESA_SHADER_COMPUTE]->bo);
if (unlikely(cmd_buffer->device->trace_bo))
radv_save_pipeline(cmd_buffer, pipeline, RING_COMPUTE);
pool = vk_alloc2(&device->alloc, pAllocator, sizeof(*pool), 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (pool == NULL)
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
if (pAllocator)
pool->alloc = *pAllocator;
radv_cmd_buffer_clear_subpass(cmd_buffer);
}
+void radv_CmdBeginRenderPass2KHR(
+ VkCommandBuffer commandBuffer,
+ const VkRenderPassBeginInfo* pRenderPassBeginInfo,
+ const VkSubpassBeginInfoKHR* pSubpassBeginInfo)
+{
+ radv_CmdBeginRenderPass(commandBuffer, pRenderPassBeginInfo,
+ pSubpassBeginInfo->contents);
+}
+
void radv_CmdNextSubpass(
VkCommandBuffer commandBuffer,
VkSubpassContents contents)
radv_cmd_buffer_clear_subpass(cmd_buffer);
}
+void radv_CmdNextSubpass2KHR(
+ VkCommandBuffer commandBuffer,
+ const VkSubpassBeginInfoKHR* pSubpassBeginInfo,
+ const VkSubpassEndInfoKHR* pSubpassEndInfo)
+{
+ radv_CmdNextSubpass(commandBuffer, pSubpassBeginInfo->contents);
+}
+
static void radv_emit_view_index(struct radv_cmd_buffer *cmd_buffer, unsigned index)
{
struct radv_pipeline *pipeline = cmd_buffer->state.pipeline;
for (unsigned stage = 0; stage < MESA_SHADER_STAGES; ++stage) {
- if (!pipeline->shaders[stage])
+ if (!radv_get_shader(pipeline, stage))
continue;
+
struct radv_userdata_info *loc = radv_lookup_user_sgpr(pipeline, stage, AC_UD_VIEW_INDEX);
if (loc->sgpr_idx == -1)
continue;
uint64_t count_va,
uint32_t stride)
{
- struct radeon_winsys_cs *cs = cmd_buffer->cs;
+ struct radeon_cmdbuf *cs = cmd_buffer->cs;
unsigned di_src_sel = indexed ? V_0287F0_DI_SRC_SEL_DMA
: V_0287F0_DI_SRC_SEL_AUTO_INDEX;
- bool draw_id_enable = radv_get_vertex_shader(cmd_buffer->state.pipeline)->info.info.vs.needs_draw_id;
+ bool draw_id_enable = radv_get_shader(cmd_buffer->state.pipeline, MESA_SHADER_VERTEX)->info.info.vs.needs_draw_id;
uint32_t base_reg = cmd_buffer->state.pipeline->graphics.vtx_base_sgpr;
assert(base_reg);
{
struct radv_cmd_state *state = &cmd_buffer->state;
struct radeon_winsys *ws = cmd_buffer->device->ws;
- struct radeon_winsys_cs *cs = cmd_buffer->cs;
+ struct radeon_cmdbuf *cs = cmd_buffer->cs;
if (info->indirect) {
uint64_t va = radv_buffer_get_va(info->indirect->bo);
va += info->indirect->offset + info->indirect_offset;
- radv_cs_add_buffer(ws, cs, info->indirect->bo, 8);
+ radv_cs_add_buffer(ws, cs, info->indirect->bo);
radeon_emit(cs, PKT3(PKT3_SET_BASE, 2, 0));
radeon_emit(cs, 1);
count_va += info->count_buffer->offset +
info->count_buffer_offset;
- radv_cs_add_buffer(ws, cs, info->count_buffer->bo, 8);
+ radv_cs_add_buffer(ws, cs, info->count_buffer->bo);
}
if (!state->subpass->view_mask) {
if (!cmd_buffer->device->physical_device->has_scissor_bug)
return false;
+ uint32_t used_states = cmd_buffer->state.pipeline->graphics.needed_dynamic_state | ~RADV_CMD_DIRTY_DYNAMIC_ALL;
+
+ /* Index & Vertex buffer don't change context regs, and pipeline is handled later. */
+ used_states &= ~(RADV_CMD_DIRTY_INDEX_BUFFER | RADV_CMD_DIRTY_VERTEX_BUFFER | RADV_CMD_DIRTY_PIPELINE);
+
/* Assume all state changes except these two can imply context rolls. */
- if (cmd_buffer->state.dirty & ~(RADV_CMD_DIRTY_INDEX_BUFFER |
- RADV_CMD_DIRTY_VERTEX_BUFFER |
- RADV_CMD_DIRTY_PIPELINE))
+ if (cmd_buffer->state.dirty & used_states)
return true;
if (cmd_buffer->state.emitted_pipeline != cmd_buffer->state.pipeline)
cmd_buffer->device->physical_device->rad_info.chip_class >= CIK;
bool pipeline_is_dirty =
(cmd_buffer->state.dirty & RADV_CMD_DIRTY_PIPELINE) &&
- cmd_buffer->state.pipeline &&
cmd_buffer->state.pipeline != cmd_buffer->state.emitted_pipeline;
MAYBE_UNUSED unsigned cdw_max =
struct radv_shader_variant *compute_shader = pipeline->shaders[MESA_SHADER_COMPUTE];
unsigned dispatch_initiator = cmd_buffer->device->dispatch_initiator;
struct radeon_winsys *ws = cmd_buffer->device->ws;
- struct radeon_winsys_cs *cs = cmd_buffer->cs;
+ struct radeon_cmdbuf *cs = cmd_buffer->cs;
struct radv_userdata_info *loc;
loc = radv_lookup_user_sgpr(pipeline, MESA_SHADER_COMPUTE,
va += info->indirect->offset + info->indirect_offset;
- radv_cs_add_buffer(ws, cs, info->indirect->bo, 8);
+ radv_cs_add_buffer(ws, cs, info->indirect->bo);
if (loc->sgpr_idx != -1) {
for (unsigned i = 0; i < 3; ++i) {
for (unsigned i = 0; i < cmd_buffer->state.framebuffer->attachment_count; ++i) {
VkImageLayout layout = cmd_buffer->state.pass->attachments[i].final_layout;
radv_handle_subpass_image_transition(cmd_buffer,
- (VkAttachmentReference){i, layout});
+ (struct radv_subpass_attachment){i, layout});
}
vk_free(&cmd_buffer->pool->alloc, cmd_buffer->state.attachments);
cmd_buffer->state.framebuffer = NULL;
}
+void radv_CmdEndRenderPass2KHR(
+ VkCommandBuffer commandBuffer,
+ const VkSubpassEndInfoKHR* pSubpassEndInfo)
+{
+ radv_CmdEndRenderPass(commandBuffer);
+}
+
/*
* For HTILE we have the following interesting clear words:
* 0xfffff30f: Uncompressed, full depth range, for depth+stencil HTILE
assert(range->levelCount == 1 || range->levelCount == VK_REMAINING_ARRAY_LAYERS);
unsigned layer_count = radv_get_layerCount(image, range);
uint64_t size = image->surface.htile_slice_size * layer_count;
+ VkImageAspectFlags aspects = VK_IMAGE_ASPECT_DEPTH_BIT;
uint64_t offset = image->offset + image->htile_offset +
image->surface.htile_slice_size * range->baseArrayLayer;
struct radv_cmd_state *state = &cmd_buffer->state;
+ VkClearDepthStencilValue value = {};
state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB |
RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
size, clear_word);
state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
+
+ if (vk_format_is_stencil(image->vk_format))
+ aspects |= VK_IMAGE_ASPECT_STENCIL_BIT;
+
+ radv_set_ds_clear_metadata(cmd_buffer, image, value, aspects);
}
static void radv_handle_depth_image_transition(struct radv_cmd_buffer *cmd_buffer,
if (!radv_image_has_htile(image))
return;
- if (dst_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL &&
- (pending_clears & vk_format_aspects(image->vk_format)) == vk_format_aspects(image->vk_format) &&
- cmd_buffer->state.render_area.offset.x == 0 && cmd_buffer->state.render_area.offset.y == 0 &&
- cmd_buffer->state.render_area.extent.width == image->info.width &&
- cmd_buffer->state.render_area.extent.height == image->info.height) {
- /* The clear will initialize htile. */
- return;
- } else if (src_layout == VK_IMAGE_LAYOUT_UNDEFINED &&
+ if (src_layout == VK_IMAGE_LAYOUT_UNDEFINED &&
radv_layout_has_htile(image, dst_layout, dst_queue_mask)) {
/* TODO: merge with the clear if applicable */
radv_initialize_htile(cmd_buffer, image, range, 0);
}
radv_initialize_dcc(cmd_buffer, image, value);
+
+ radv_set_dcc_need_cmask_elim_pred(cmd_buffer, image, false);
+ }
+
+ if (radv_image_has_cmask(image) || radv_image_has_dcc(image)) {
+ uint32_t color_values[2] = {};
+ radv_set_color_clear_metadata(cmd_buffer, image, color_values);
}
}
}
}
-void radv_CmdPipelineBarrier(
- VkCommandBuffer commandBuffer,
- VkPipelineStageFlags srcStageMask,
- VkPipelineStageFlags destStageMask,
- VkBool32 byRegion,
- uint32_t memoryBarrierCount,
- const VkMemoryBarrier* pMemoryBarriers,
- uint32_t bufferMemoryBarrierCount,
- const VkBufferMemoryBarrier* pBufferMemoryBarriers,
- uint32_t imageMemoryBarrierCount,
- const VkImageMemoryBarrier* pImageMemoryBarriers)
-{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+struct radv_barrier_info {
+ uint32_t eventCount;
+ const VkEvent *pEvents;
+ VkPipelineStageFlags srcStageMask;
+};
+
+static void
+radv_barrier(struct radv_cmd_buffer *cmd_buffer,
+ uint32_t memoryBarrierCount,
+ const VkMemoryBarrier *pMemoryBarriers,
+ uint32_t bufferMemoryBarrierCount,
+ const VkBufferMemoryBarrier *pBufferMemoryBarriers,
+ uint32_t imageMemoryBarrierCount,
+ const VkImageMemoryBarrier *pImageMemoryBarriers,
+ const struct radv_barrier_info *info)
+{
+ struct radeon_cmdbuf *cs = cmd_buffer->cs;
enum radv_cmd_flush_bits src_flush_bits = 0;
enum radv_cmd_flush_bits dst_flush_bits = 0;
+ for (unsigned i = 0; i < info->eventCount; ++i) {
+ RADV_FROM_HANDLE(radv_event, event, info->pEvents[i]);
+ uint64_t va = radv_buffer_get_va(event->bo);
+
+ radv_cs_add_buffer(cmd_buffer->device->ws, cs, event->bo);
+
+ MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 7);
+
+ si_emit_wait_fence(cs, va, 1, 0xffffffff);
+ assert(cmd_buffer->cs->cdw <= cdw_max);
+ }
+
for (uint32_t i = 0; i < memoryBarrierCount; i++) {
- src_flush_bits |= radv_src_access_flush(cmd_buffer, pMemoryBarriers[i].srcAccessMask);
+ src_flush_bits |= radv_src_access_flush(cmd_buffer, pMemoryBarriers[i].srcAccessMask,
+ NULL);
dst_flush_bits |= radv_dst_access_flush(cmd_buffer, pMemoryBarriers[i].dstAccessMask,
NULL);
}
for (uint32_t i = 0; i < bufferMemoryBarrierCount; i++) {
- src_flush_bits |= radv_src_access_flush(cmd_buffer, pBufferMemoryBarriers[i].srcAccessMask);
+ src_flush_bits |= radv_src_access_flush(cmd_buffer, pBufferMemoryBarriers[i].srcAccessMask,
+ NULL);
dst_flush_bits |= radv_dst_access_flush(cmd_buffer, pBufferMemoryBarriers[i].dstAccessMask,
NULL);
}
for (uint32_t i = 0; i < imageMemoryBarrierCount; i++) {
RADV_FROM_HANDLE(radv_image, image, pImageMemoryBarriers[i].image);
- src_flush_bits |= radv_src_access_flush(cmd_buffer, pImageMemoryBarriers[i].srcAccessMask);
+
+ src_flush_bits |= radv_src_access_flush(cmd_buffer, pImageMemoryBarriers[i].srcAccessMask,
+ image);
dst_flush_bits |= radv_dst_access_flush(cmd_buffer, pImageMemoryBarriers[i].dstAccessMask,
image);
}
- radv_stage_flush(cmd_buffer, srcStageMask);
+ radv_stage_flush(cmd_buffer, info->srcStageMask);
cmd_buffer->state.flush_bits |= src_flush_bits;
for (uint32_t i = 0; i < imageMemoryBarrierCount; i++) {
0);
}
+ /* Make sure CP DMA is idle because the driver might have performed a
+ * DMA operation for copying or filling buffers/images.
+ */
+ si_cp_dma_wait_for_idle(cmd_buffer);
+
cmd_buffer->state.flush_bits |= dst_flush_bits;
}
+void radv_CmdPipelineBarrier(
+ VkCommandBuffer commandBuffer,
+ VkPipelineStageFlags srcStageMask,
+ VkPipelineStageFlags destStageMask,
+ VkBool32 byRegion,
+ uint32_t memoryBarrierCount,
+ const VkMemoryBarrier* pMemoryBarriers,
+ uint32_t bufferMemoryBarrierCount,
+ const VkBufferMemoryBarrier* pBufferMemoryBarriers,
+ uint32_t imageMemoryBarrierCount,
+ const VkImageMemoryBarrier* pImageMemoryBarriers)
+{
+ RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ struct radv_barrier_info info;
+
+ info.eventCount = 0;
+ info.pEvents = NULL;
+ info.srcStageMask = srcStageMask;
+
+ radv_barrier(cmd_buffer, memoryBarrierCount, pMemoryBarriers,
+ bufferMemoryBarrierCount, pBufferMemoryBarriers,
+ imageMemoryBarrierCount, pImageMemoryBarriers, &info);
+}
+
static void write_event(struct radv_cmd_buffer *cmd_buffer,
struct radv_event *event,
VkPipelineStageFlags stageMask,
unsigned value)
{
- struct radeon_winsys_cs *cs = cmd_buffer->cs;
+ struct radeon_cmdbuf *cs = cmd_buffer->cs;
uint64_t va = radv_buffer_get_va(event->bo);
- radv_cs_add_buffer(cmd_buffer->device->ws, cs, event->bo, 8);
+ radv_cs_add_buffer(cmd_buffer->device->ws, cs, event->bo);
MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 18);
- /* TODO: this is overkill. Probably should figure something out from
- * the stage mask. */
+ /* Flags that only require a top-of-pipe event. */
+ VkPipelineStageFlags top_of_pipe_flags =
+ VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
+
+ /* Flags that only require a post-index-fetch event. */
+ VkPipelineStageFlags post_index_fetch_flags =
+ top_of_pipe_flags |
+ VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT |
+ VK_PIPELINE_STAGE_VERTEX_INPUT_BIT;
+
+ /* Make sure CP DMA is idle because the driver might have performed a
+ * DMA operation for copying or filling buffers/images.
+ */
+ si_cp_dma_wait_for_idle(cmd_buffer);
+
+ /* TODO: Emit EOS events for syncing PS/CS stages. */
- si_cs_emit_write_event_eop(cs,
- cmd_buffer->state.predicating,
- cmd_buffer->device->physical_device->rad_info.chip_class,
- radv_cmd_buffer_uses_mec(cmd_buffer),
- V_028A90_BOTTOM_OF_PIPE_TS, 0,
- 1, va, 2, value);
+ if (!(stageMask & ~top_of_pipe_flags)) {
+ /* Just need to sync the PFP engine. */
+ radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 3, 0));
+ radeon_emit(cs, S_370_DST_SEL(V_370_MEM_ASYNC) |
+ S_370_WR_CONFIRM(1) |
+ S_370_ENGINE_SEL(V_370_PFP));
+ radeon_emit(cs, va);
+ radeon_emit(cs, va >> 32);
+ radeon_emit(cs, value);
+ } else if (!(stageMask & ~post_index_fetch_flags)) {
+ /* Sync ME because PFP reads index and indirect buffers. */
+ radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 3, 0));
+ radeon_emit(cs, S_370_DST_SEL(V_370_MEM_ASYNC) |
+ S_370_WR_CONFIRM(1) |
+ S_370_ENGINE_SEL(V_370_ME));
+ radeon_emit(cs, va);
+ radeon_emit(cs, va >> 32);
+ radeon_emit(cs, value);
+ } else {
+ /* Otherwise, sync all prior GPU work using an EOP event. */
+ si_cs_emit_write_event_eop(cs,
+ cmd_buffer->device->physical_device->rad_info.chip_class,
+ radv_cmd_buffer_uses_mec(cmd_buffer),
+ V_028A90_BOTTOM_OF_PIPE_TS, 0,
+ EOP_DATA_SEL_VALUE_32BIT, va, 2, value,
+ cmd_buffer->gfx9_eop_bug_va);
+ }
assert(cmd_buffer->cs->cdw <= cdw_max);
}
const VkImageMemoryBarrier* pImageMemoryBarriers)
{
RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
- struct radeon_winsys_cs *cs = cmd_buffer->cs;
-
- for (unsigned i = 0; i < eventCount; ++i) {
- RADV_FROM_HANDLE(radv_event, event, pEvents[i]);
- uint64_t va = radv_buffer_get_va(event->bo);
+ struct radv_barrier_info info;
- radv_cs_add_buffer(cmd_buffer->device->ws, cs, event->bo, 8);
-
- MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 7);
-
- si_emit_wait_fence(cs, false, va, 1, 0xffffffff);
- assert(cmd_buffer->cs->cdw <= cdw_max);
- }
-
-
- for (uint32_t i = 0; i < imageMemoryBarrierCount; i++) {
- RADV_FROM_HANDLE(radv_image, image, pImageMemoryBarriers[i].image);
-
- radv_handle_image_transition(cmd_buffer, image,
- pImageMemoryBarriers[i].oldLayout,
- pImageMemoryBarriers[i].newLayout,
- pImageMemoryBarriers[i].srcQueueFamilyIndex,
- pImageMemoryBarriers[i].dstQueueFamilyIndex,
- &pImageMemoryBarriers[i].subresourceRange,
- 0);
- }
+ info.eventCount = eventCount;
+ info.pEvents = pEvents;
+ info.srcStageMask = 0;
- /* TODO: figure out how to do memory barriers without waiting */
- cmd_buffer->state.flush_bits |= RADV_CMD_FLUSH_AND_INV_FRAMEBUFFER |
- RADV_CMD_FLAG_INV_GLOBAL_L2 |
- RADV_CMD_FLAG_INV_VMEM_L1 |
- RADV_CMD_FLAG_INV_SMEM_L1;
+ radv_barrier(cmd_buffer, memoryBarrierCount, pMemoryBarriers,
+ bufferMemoryBarrierCount, pBufferMemoryBarriers,
+ imageMemoryBarrierCount, pImageMemoryBarriers, &info);
}