cmd_buffer = vk_zalloc(&pool->alloc, sizeof(*cmd_buffer), 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (cmd_buffer == NULL)
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
cmd_buffer->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
cmd_buffer->device = device;
cmd_buffer->queue_family_index = pool->queue_family_index;
} else {
- /* Init the pool_link so we can safefly call list_del when we destroy
+ /* Init the pool_link so we can safely call list_del when we destroy
* the command buffer
*/
list_inithead(&cmd_buffer->pool_link);
cmd_buffer->cs = device->ws->cs_create(device->ws, ring);
if (!cmd_buffer->cs) {
vk_free(&cmd_buffer->pool->alloc, cmd_buffer);
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
}
*pCommandBuffer = radv_cmd_buffer_to_handle(cmd_buffer);
new_size, 4096,
RADEON_DOMAIN_GTT,
RADEON_FLAG_CPU_ACCESS|
- RADEON_FLAG_NO_INTERPROCESS_SHARING);
+ RADEON_FLAG_NO_INTERPROCESS_SHARING |
+ RADEON_FLAG_32BIT);
if (!bo) {
cmd_buffer->record_result = VK_ERROR_OUT_OF_DEVICE_MEMORY;
}
static void
-radv_emit_write_data_packet(struct radeon_winsys_cs *cs, uint64_t va,
+radv_emit_write_data_packet(struct radeon_cmdbuf *cs, uint64_t va,
unsigned count, const uint32_t *data)
{
radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 2 + count, 0));
void radv_cmd_buffer_trace_emit(struct radv_cmd_buffer *cmd_buffer)
{
struct radv_device *device = cmd_buffer->device;
- struct radeon_winsys_cs *cs = cmd_buffer->cs;
+ struct radeon_cmdbuf *cs = cmd_buffer->cs;
uint64_t va;
va = radv_buffer_get_va(device->trace_bo);
struct radv_pipeline *pipeline, enum ring_type ring)
{
struct radv_device *device = cmd_buffer->device;
- struct radeon_winsys_cs *cs = cmd_buffer->cs;
+ struct radeon_cmdbuf *cs = cmd_buffer->cs;
uint32_t data[2];
uint64_t va;
struct radv_descriptor_state *descriptors_state =
radv_get_descriptors_state(cmd_buffer, bind_point);
struct radv_device *device = cmd_buffer->device;
- struct radeon_winsys_cs *cs = cmd_buffer->cs;
+ struct radeon_cmdbuf *cs = cmd_buffer->cs;
uint32_t data[MAX_SETS * 2] = {};
uint64_t va;
unsigned i;
gl_shader_stage stage,
int idx)
{
- if (stage == MESA_SHADER_VERTEX) {
- if (pipeline->shaders[MESA_SHADER_VERTEX])
- return &pipeline->shaders[MESA_SHADER_VERTEX]->info.user_sgprs_locs.shader_data[idx];
- if (pipeline->shaders[MESA_SHADER_TESS_CTRL])
- return &pipeline->shaders[MESA_SHADER_TESS_CTRL]->info.user_sgprs_locs.shader_data[idx];
- if (pipeline->shaders[MESA_SHADER_GEOMETRY])
- return &pipeline->shaders[MESA_SHADER_GEOMETRY]->info.user_sgprs_locs.shader_data[idx];
- } else if (stage == MESA_SHADER_TESS_EVAL) {
- if (pipeline->shaders[MESA_SHADER_TESS_EVAL])
- return &pipeline->shaders[MESA_SHADER_TESS_EVAL]->info.user_sgprs_locs.shader_data[idx];
- if (pipeline->shaders[MESA_SHADER_GEOMETRY])
- return &pipeline->shaders[MESA_SHADER_GEOMETRY]->info.user_sgprs_locs.shader_data[idx];
- }
- return &pipeline->shaders[stage]->info.user_sgprs_locs.shader_data[idx];
+ struct radv_shader_variant *shader = radv_get_shader(pipeline, stage);
+ return &shader->info.user_sgprs_locs.shader_data[idx];
}
static void
uint32_t base_reg = pipeline->user_data_0[stage];
if (loc->sgpr_idx == -1)
return;
- assert(loc->num_sgprs == 2);
+
+ assert(loc->num_sgprs == (HAVE_32BIT_POINTERS ? 1 : 2));
assert(!loc->indirect);
- radeon_set_sh_reg_seq(cmd_buffer->cs, base_reg + loc->sgpr_idx * 4, 2);
- radeon_emit(cmd_buffer->cs, va);
- radeon_emit(cmd_buffer->cs, va >> 32);
+
+ radv_emit_shader_pointer(cmd_buffer->device, cmd_buffer->cs,
+ base_reg + loc->sgpr_idx * 4, va, false);
+}
+
+static void
+radv_emit_descriptor_pointers(struct radv_cmd_buffer *cmd_buffer,
+ struct radv_pipeline *pipeline,
+ struct radv_descriptor_state *descriptors_state,
+ gl_shader_stage stage)
+{
+ struct radv_device *device = cmd_buffer->device;
+ struct radeon_cmdbuf *cs = cmd_buffer->cs;
+ uint32_t sh_base = pipeline->user_data_0[stage];
+ struct radv_userdata_locations *locs =
+ &pipeline->shaders[stage]->info.user_sgprs_locs;
+ unsigned mask;
+
+ mask = descriptors_state->dirty & descriptors_state->valid;
+
+ for (int i = 0; i < MAX_SETS; i++) {
+ struct radv_userdata_info *loc = &locs->descriptor_sets[i];
+ if (loc->sgpr_idx != -1 && !loc->indirect)
+ continue;
+ mask &= ~(1 << i);
+ }
+
+ while (mask) {
+ int start, count;
+
+ u_bit_scan_consecutive_range(&mask, &start, &count);
+
+ struct radv_userdata_info *loc = &locs->descriptor_sets[start];
+ unsigned sh_offset = sh_base + loc->sgpr_idx * 4;
+
+ radv_emit_shader_pointer_head(cs, sh_offset, count,
+ HAVE_32BIT_POINTERS);
+ for (int i = 0; i < count; i++) {
+ struct radv_descriptor_set *set =
+ descriptors_state->sets[start + i];
+
+ radv_emit_shader_pointer_body(device, cs, set->va,
+ HAVE_32BIT_POINTERS);
+ }
+ }
}
static void
{
uint32_t count = cmd_buffer->state.dynamic.scissor.count;
- /* Vega10/Raven scissor bug workaround. This must be done before VPORT
- * scissor registers are changed. There is also a more efficient but
- * more involved alternative workaround.
- */
- if (cmd_buffer->device->physical_device->has_scissor_bug) {
- cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_PS_PARTIAL_FLUSH;
- si_emit_cache_flush(cmd_buffer);
- }
si_write_scissors(cmd_buffer->cs, 0, count,
cmd_buffer->state.dynamic.scissor.scissors,
cmd_buffer->state.dynamic.viewport.viewports,
}
}
+static void
+radv_update_zrange_precision(struct radv_cmd_buffer *cmd_buffer,
+ struct radv_ds_buffer_info *ds,
+ struct radv_image *image, VkImageLayout layout,
+ bool requires_cond_write)
+{
+ uint32_t db_z_info = ds->db_z_info;
+ uint32_t db_z_info_reg;
+
+ if (!radv_image_is_tc_compat_htile(image))
+ return;
+
+ if (!radv_layout_has_htile(image, layout,
+ radv_image_queue_family_mask(image,
+ cmd_buffer->queue_family_index,
+ cmd_buffer->queue_family_index))) {
+ db_z_info &= C_028040_TILE_SURFACE_ENABLE;
+ }
+
+ db_z_info &= C_028040_ZRANGE_PRECISION;
+
+ if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) {
+ db_z_info_reg = R_028038_DB_Z_INFO;
+ } else {
+ db_z_info_reg = R_028040_DB_Z_INFO;
+ }
+
+ /* When we don't know the last fast clear value we need to emit a
+ * conditional packet, otherwise we can update DB_Z_INFO directly.
+ */
+ if (requires_cond_write) {
+ radeon_emit(cmd_buffer->cs, PKT3(PKT3_COND_WRITE, 7, 0));
+
+ const uint32_t write_space = 0 << 8; /* register */
+ const uint32_t poll_space = 1 << 4; /* memory */
+ const uint32_t function = 3 << 0; /* equal to the reference */
+ const uint32_t options = write_space | poll_space | function;
+ radeon_emit(cmd_buffer->cs, options);
+
+ /* poll address - location of the depth clear value */
+ uint64_t va = radv_buffer_get_va(image->bo);
+ va += image->offset + image->clear_value_offset;
+
+ /* In presence of stencil format, we have to adjust the base
+ * address because the first value is the stencil clear value.
+ */
+ if (vk_format_is_stencil(image->vk_format))
+ va += 4;
+
+ radeon_emit(cmd_buffer->cs, va);
+ radeon_emit(cmd_buffer->cs, va >> 32);
+
+ radeon_emit(cmd_buffer->cs, fui(0.0f)); /* reference value */
+ radeon_emit(cmd_buffer->cs, (uint32_t)-1); /* comparison mask */
+ radeon_emit(cmd_buffer->cs, db_z_info_reg >> 2); /* write address low */
+ radeon_emit(cmd_buffer->cs, 0u); /* write address high */
+ radeon_emit(cmd_buffer->cs, db_z_info);
+ } else {
+ radeon_set_context_reg(cmd_buffer->cs, db_z_info_reg, db_z_info);
+ }
+}
+
static void
radv_emit_fb_ds_state(struct radv_cmd_buffer *cmd_buffer,
struct radv_ds_buffer_info *ds,
}
+ /* Update the ZRANGE_PRECISION value for the TC-compat bug. */
+ radv_update_zrange_precision(cmd_buffer, ds, image, layout, true);
+
radeon_set_context_reg(cmd_buffer->cs, R_028B78_PA_SU_POLY_OFFSET_DB_FMT_CNTL,
ds->pa_su_poly_offset_db_fmt_cntl);
}
+/**
+ * Update the fast clear depth/stencil values if the image is bound as a
+ * depth/stencil buffer.
+ */
+static void
+radv_update_bound_fast_clear_ds(struct radv_cmd_buffer *cmd_buffer,
+ struct radv_image *image,
+ VkClearDepthStencilValue ds_clear_value,
+ VkImageAspectFlags aspects)
+{
+ struct radv_framebuffer *framebuffer = cmd_buffer->state.framebuffer;
+ const struct radv_subpass *subpass = cmd_buffer->state.subpass;
+ struct radeon_cmdbuf *cs = cmd_buffer->cs;
+ struct radv_attachment_info *att;
+ uint32_t att_idx;
+
+ if (!framebuffer || !subpass)
+ return;
+
+ att_idx = subpass->depth_stencil_attachment.attachment;
+ if (att_idx == VK_ATTACHMENT_UNUSED)
+ return;
+
+ att = &framebuffer->attachments[att_idx];
+ if (att->attachment->image != image)
+ return;
+
+ radeon_set_context_reg_seq(cs, R_028028_DB_STENCIL_CLEAR, 2);
+ radeon_emit(cs, ds_clear_value.stencil);
+ radeon_emit(cs, fui(ds_clear_value.depth));
+
+ /* Update the ZRANGE_PRECISION value for the TC-compat bug. This is
+ * only needed when clearing Z to 0.0.
+ */
+ if ((aspects & VK_IMAGE_ASPECT_DEPTH_BIT) &&
+ ds_clear_value.depth == 0.0) {
+ VkImageLayout layout = subpass->depth_stencil_attachment.layout;
+
+ radv_update_zrange_precision(cmd_buffer, &att->ds, image,
+ layout, false);
+ }
+}
+
+/**
+ * Set the clear depth/stencil values to the image's metadata.
+ */
void
-radv_set_depth_clear_regs(struct radv_cmd_buffer *cmd_buffer,
- struct radv_image *image,
- VkClearDepthStencilValue ds_clear_value,
- VkImageAspectFlags aspects)
+radv_set_ds_clear_metadata(struct radv_cmd_buffer *cmd_buffer,
+ struct radv_image *image,
+ VkClearDepthStencilValue ds_clear_value,
+ VkImageAspectFlags aspects)
{
+ struct radeon_cmdbuf *cs = cmd_buffer->cs;
uint64_t va = radv_buffer_get_va(image->bo);
- va += image->offset + image->clear_value_offset;
unsigned reg_offset = 0, reg_count = 0;
+ va += image->offset + image->clear_value_offset;
+
assert(radv_image_has_htile(image));
if (aspects & VK_IMAGE_ASPECT_STENCIL_BIT) {
if (aspects & VK_IMAGE_ASPECT_DEPTH_BIT)
++reg_count;
- radeon_emit(cmd_buffer->cs, PKT3(PKT3_WRITE_DATA, 2 + reg_count, 0));
- radeon_emit(cmd_buffer->cs, S_370_DST_SEL(V_370_MEM_ASYNC) |
- S_370_WR_CONFIRM(1) |
- S_370_ENGINE_SEL(V_370_PFP));
- radeon_emit(cmd_buffer->cs, va);
- radeon_emit(cmd_buffer->cs, va >> 32);
+ radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 2 + reg_count, 0));
+ radeon_emit(cs, S_370_DST_SEL(V_370_MEM_ASYNC) |
+ S_370_WR_CONFIRM(1) |
+ S_370_ENGINE_SEL(V_370_PFP));
+ radeon_emit(cs, va);
+ radeon_emit(cs, va >> 32);
if (aspects & VK_IMAGE_ASPECT_STENCIL_BIT)
- radeon_emit(cmd_buffer->cs, ds_clear_value.stencil);
+ radeon_emit(cs, ds_clear_value.stencil);
if (aspects & VK_IMAGE_ASPECT_DEPTH_BIT)
- radeon_emit(cmd_buffer->cs, fui(ds_clear_value.depth));
+ radeon_emit(cs, fui(ds_clear_value.depth));
- radeon_set_context_reg_seq(cmd_buffer->cs, R_028028_DB_STENCIL_CLEAR + 4 * reg_offset, reg_count);
- if (aspects & VK_IMAGE_ASPECT_STENCIL_BIT)
- radeon_emit(cmd_buffer->cs, ds_clear_value.stencil); /* R_028028_DB_STENCIL_CLEAR */
- if (aspects & VK_IMAGE_ASPECT_DEPTH_BIT)
- radeon_emit(cmd_buffer->cs, fui(ds_clear_value.depth)); /* R_02802C_DB_DEPTH_CLEAR */
+ radv_update_bound_fast_clear_ds(cmd_buffer, image, ds_clear_value,
+ aspects);
}
+/**
+ * Load the clear depth/stencil values from the image's metadata.
+ */
static void
-radv_load_depth_clear_regs(struct radv_cmd_buffer *cmd_buffer,
- struct radv_image *image)
+radv_load_ds_clear_metadata(struct radv_cmd_buffer *cmd_buffer,
+ struct radv_image *image)
{
+ struct radeon_cmdbuf *cs = cmd_buffer->cs;
VkImageAspectFlags aspects = vk_format_aspects(image->vk_format);
uint64_t va = radv_buffer_get_va(image->bo);
- va += image->offset + image->clear_value_offset;
unsigned reg_offset = 0, reg_count = 0;
+ va += image->offset + image->clear_value_offset;
+
if (!radv_image_has_htile(image))
return;
if (aspects & VK_IMAGE_ASPECT_DEPTH_BIT)
++reg_count;
- radeon_emit(cmd_buffer->cs, PKT3(PKT3_COPY_DATA, 4, 0));
- radeon_emit(cmd_buffer->cs, COPY_DATA_SRC_SEL(COPY_DATA_MEM) |
- COPY_DATA_DST_SEL(COPY_DATA_REG) |
- (reg_count == 2 ? COPY_DATA_COUNT_SEL : 0));
- radeon_emit(cmd_buffer->cs, va);
- radeon_emit(cmd_buffer->cs, va >> 32);
- radeon_emit(cmd_buffer->cs, (R_028028_DB_STENCIL_CLEAR + 4 * reg_offset) >> 2);
- radeon_emit(cmd_buffer->cs, 0);
+ radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
+ radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_MEM) |
+ COPY_DATA_DST_SEL(COPY_DATA_REG) |
+ (reg_count == 2 ? COPY_DATA_COUNT_SEL : 0));
+ radeon_emit(cs, va);
+ radeon_emit(cs, va >> 32);
+ radeon_emit(cs, (R_028028_DB_STENCIL_CLEAR + 4 * reg_offset) >> 2);
+ radeon_emit(cs, 0);
- radeon_emit(cmd_buffer->cs, PKT3(PKT3_PFP_SYNC_ME, 0, 0));
- radeon_emit(cmd_buffer->cs, 0);
+ radeon_emit(cs, PKT3(PKT3_PFP_SYNC_ME, 0, 0));
+ radeon_emit(cs, 0);
}
/*
- *with DCC some colors don't require CMASK elimiation before being
+ * With DCC some colors don't require CMASK elimination before being
* used as a texture. This sets a predicate value to determine if the
* cmask eliminate is required.
*/
radeon_emit(cmd_buffer->cs, pred_val >> 32);
}
+/**
+ * Update the fast clear color values if the image is bound as a color buffer.
+ */
+static void
+radv_update_bound_fast_clear_color(struct radv_cmd_buffer *cmd_buffer,
+ struct radv_image *image,
+ int cb_idx,
+ uint32_t color_values[2])
+{
+ struct radv_framebuffer *framebuffer = cmd_buffer->state.framebuffer;
+ const struct radv_subpass *subpass = cmd_buffer->state.subpass;
+ struct radeon_cmdbuf *cs = cmd_buffer->cs;
+ struct radv_attachment_info *att;
+ uint32_t att_idx;
+
+ if (!framebuffer || !subpass)
+ return;
+
+ att_idx = subpass->color_attachments[cb_idx].attachment;
+ if (att_idx == VK_ATTACHMENT_UNUSED)
+ return;
+
+ att = &framebuffer->attachments[att_idx];
+ if (att->attachment->image != image)
+ return;
+
+ radeon_set_context_reg_seq(cs, R_028C8C_CB_COLOR0_CLEAR_WORD0 + cb_idx * 0x3c, 2);
+ radeon_emit(cs, color_values[0]);
+ radeon_emit(cs, color_values[1]);
+}
+
+/**
+ * Set the clear color values to the image's metadata.
+ */
void
-radv_set_color_clear_regs(struct radv_cmd_buffer *cmd_buffer,
- struct radv_image *image,
- int idx,
- uint32_t color_values[2])
+radv_set_color_clear_metadata(struct radv_cmd_buffer *cmd_buffer,
+ struct radv_image *image,
+ int cb_idx,
+ uint32_t color_values[2])
{
+ struct radeon_cmdbuf *cs = cmd_buffer->cs;
uint64_t va = radv_buffer_get_va(image->bo);
+
va += image->offset + image->clear_value_offset;
assert(radv_image_has_cmask(image) || radv_image_has_dcc(image));
- radeon_emit(cmd_buffer->cs, PKT3(PKT3_WRITE_DATA, 4, 0));
- radeon_emit(cmd_buffer->cs, S_370_DST_SEL(V_370_MEM_ASYNC) |
- S_370_WR_CONFIRM(1) |
- S_370_ENGINE_SEL(V_370_PFP));
- radeon_emit(cmd_buffer->cs, va);
- radeon_emit(cmd_buffer->cs, va >> 32);
- radeon_emit(cmd_buffer->cs, color_values[0]);
- radeon_emit(cmd_buffer->cs, color_values[1]);
+ radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 4, 0));
+ radeon_emit(cs, S_370_DST_SEL(V_370_MEM_ASYNC) |
+ S_370_WR_CONFIRM(1) |
+ S_370_ENGINE_SEL(V_370_PFP));
+ radeon_emit(cs, va);
+ radeon_emit(cs, va >> 32);
+ radeon_emit(cs, color_values[0]);
+ radeon_emit(cs, color_values[1]);
- radeon_set_context_reg_seq(cmd_buffer->cs, R_028C8C_CB_COLOR0_CLEAR_WORD0 + idx * 0x3c, 2);
- radeon_emit(cmd_buffer->cs, color_values[0]);
- radeon_emit(cmd_buffer->cs, color_values[1]);
+ radv_update_bound_fast_clear_color(cmd_buffer, image, cb_idx,
+ color_values);
}
+/**
+ * Load the clear color values from the image's metadata.
+ */
static void
-radv_load_color_clear_regs(struct radv_cmd_buffer *cmd_buffer,
- struct radv_image *image,
- int idx)
+radv_load_color_clear_metadata(struct radv_cmd_buffer *cmd_buffer,
+ struct radv_image *image,
+ int cb_idx)
{
+ struct radeon_cmdbuf *cs = cmd_buffer->cs;
uint64_t va = radv_buffer_get_va(image->bo);
+
va += image->offset + image->clear_value_offset;
if (!radv_image_has_cmask(image) && !radv_image_has_dcc(image))
return;
- uint32_t reg = R_028C8C_CB_COLOR0_CLEAR_WORD0 + idx * 0x3c;
+ uint32_t reg = R_028C8C_CB_COLOR0_CLEAR_WORD0 + cb_idx * 0x3c;
- radeon_emit(cmd_buffer->cs, PKT3(PKT3_COPY_DATA, 4, cmd_buffer->state.predicating));
- radeon_emit(cmd_buffer->cs, COPY_DATA_SRC_SEL(COPY_DATA_MEM) |
- COPY_DATA_DST_SEL(COPY_DATA_REG) |
- COPY_DATA_COUNT_SEL);
- radeon_emit(cmd_buffer->cs, va);
- radeon_emit(cmd_buffer->cs, va >> 32);
- radeon_emit(cmd_buffer->cs, reg >> 2);
- radeon_emit(cmd_buffer->cs, 0);
+ radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, cmd_buffer->state.predicating));
+ radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_MEM) |
+ COPY_DATA_DST_SEL(COPY_DATA_REG) |
+ COPY_DATA_COUNT_SEL);
+ radeon_emit(cs, va);
+ radeon_emit(cs, va >> 32);
+ radeon_emit(cs, reg >> 2);
+ radeon_emit(cs, 0);
- radeon_emit(cmd_buffer->cs, PKT3(PKT3_PFP_SYNC_ME, 0, cmd_buffer->state.predicating));
- radeon_emit(cmd_buffer->cs, 0);
+ radeon_emit(cs, PKT3(PKT3_PFP_SYNC_ME, 0, cmd_buffer->state.predicating));
+ radeon_emit(cs, 0);
}
static void
assert(att->attachment->aspect_mask & VK_IMAGE_ASPECT_COLOR_BIT);
radv_emit_fb_color_state(cmd_buffer, i, att, image, layout);
- radv_load_color_clear_regs(cmd_buffer, image, i);
+ radv_load_color_clear_metadata(cmd_buffer, image, i);
}
if(subpass->depth_stencil_attachment.attachment != VK_ATTACHMENT_UNUSED) {
cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS;
cmd_buffer->state.offset_scale = att->ds.offset_scale;
}
- radv_load_depth_clear_regs(cmd_buffer, image);
+ radv_load_ds_clear_metadata(cmd_buffer, image);
} else {
if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9)
radeon_set_context_reg_seq(cmd_buffer->cs, R_028038_DB_Z_INFO, 2);
static void
radv_emit_index_buffer(struct radv_cmd_buffer *cmd_buffer)
{
- struct radeon_winsys_cs *cs = cmd_buffer->cs;
+ struct radeon_cmdbuf *cs = cmd_buffer->cs;
struct radv_cmd_state *state = &cmd_buffer->state;
if (state->index_type != state->last_index_type) {
void radv_set_db_count_control(struct radv_cmd_buffer *cmd_buffer)
{
+ bool has_perfect_queries = cmd_buffer->state.perfect_occlusion_queries_enabled;
struct radv_pipeline *pipeline = cmd_buffer->state.pipeline;
uint32_t pa_sc_mode_cntl_1 =
pipeline ? pipeline->graphics.ms.pa_sc_mode_cntl_1 : 0;
if(!cmd_buffer->state.active_occlusion_queries) {
if (cmd_buffer->device->physical_device->rad_info.chip_class >= CIK) {
if (G_028A4C_OUT_OF_ORDER_PRIMITIVE_ENABLE(pa_sc_mode_cntl_1) &&
- pipeline->graphics.disable_out_of_order_rast_for_occlusion) {
+ pipeline->graphics.disable_out_of_order_rast_for_occlusion &&
+ has_perfect_queries) {
/* Re-enable out-of-order rasterization if the
* bound pipeline supports it and if it's has
- * been disabled before starting occlusion
- * queries.
+ * been disabled before starting any perfect
+ * occlusion queries.
*/
radeon_set_context_reg(cmd_buffer->cs,
R_028A4C_PA_SC_MODE_CNTL_1,
} else {
const struct radv_subpass *subpass = cmd_buffer->state.subpass;
uint32_t sample_rate = subpass ? util_logbase2(subpass->max_sample_count) : 0;
- bool perfect = cmd_buffer->state.perfect_occlusion_queries_enabled;
if (cmd_buffer->device->physical_device->rad_info.chip_class >= CIK) {
db_count_control =
- S_028004_PERFECT_ZPASS_COUNTS(perfect) |
+ S_028004_PERFECT_ZPASS_COUNTS(has_perfect_queries) |
S_028004_SAMPLE_RATE(sample_rate) |
S_028004_ZPASS_ENABLE(1) |
S_028004_SLICE_EVEN_ENABLE(1) |
S_028004_SLICE_ODD_ENABLE(1);
if (G_028A4C_OUT_OF_ORDER_PRIMITIVE_ENABLE(pa_sc_mode_cntl_1) &&
- pipeline->graphics.disable_out_of_order_rast_for_occlusion) {
+ pipeline->graphics.disable_out_of_order_rast_for_occlusion &&
+ has_perfect_queries) {
/* If the bound pipeline has enabled
* out-of-order rasterization, we should
- * disable it before starting occlusion
- * queries.
+ * disable it before starting any perfect
+ * occlusion queries.
*/
pa_sc_mode_cntl_1 &= C_028A4C_OUT_OF_ORDER_PRIMITIVE_ENABLE;
if (states & (RADV_CMD_DIRTY_DYNAMIC_VIEWPORT))
radv_emit_viewport(cmd_buffer);
- if (states & (RADV_CMD_DIRTY_DYNAMIC_SCISSOR | RADV_CMD_DIRTY_DYNAMIC_VIEWPORT))
+ if (states & (RADV_CMD_DIRTY_DYNAMIC_SCISSOR | RADV_CMD_DIRTY_DYNAMIC_VIEWPORT) &&
+ !cmd_buffer->device->physical_device->has_scissor_bug)
radv_emit_scissor(cmd_buffer);
if (states & RADV_CMD_DIRTY_DYNAMIC_LINE_WIDTH)
cmd_buffer->state.dirty &= ~states;
}
-static void
-emit_stage_descriptor_set_userdata(struct radv_cmd_buffer *cmd_buffer,
- struct radv_pipeline *pipeline,
- int idx,
- uint64_t va,
- gl_shader_stage stage)
-{
- struct radv_userdata_info *desc_set_loc = &pipeline->shaders[stage]->info.user_sgprs_locs.descriptor_sets[idx];
- uint32_t base_reg = pipeline->user_data_0[stage];
-
- if (desc_set_loc->sgpr_idx == -1 || desc_set_loc->indirect)
- return;
-
- assert(!desc_set_loc->indirect);
- assert(desc_set_loc->num_sgprs == 2);
- radeon_set_sh_reg_seq(cmd_buffer->cs,
- base_reg + desc_set_loc->sgpr_idx * 4, 2);
- radeon_emit(cmd_buffer->cs, va);
- radeon_emit(cmd_buffer->cs, va >> 32);
-}
-
-static void
-radv_emit_descriptor_set_userdata(struct radv_cmd_buffer *cmd_buffer,
- VkShaderStageFlags stages,
- struct radv_descriptor_set *set,
- unsigned idx)
-{
- if (cmd_buffer->state.pipeline) {
- radv_foreach_stage(stage, stages) {
- if (cmd_buffer->state.pipeline->shaders[stage])
- emit_stage_descriptor_set_userdata(cmd_buffer, cmd_buffer->state.pipeline,
- idx, set->va,
- stage);
- }
- }
-
- if (cmd_buffer->state.compute_pipeline && (stages & VK_SHADER_STAGE_COMPUTE_BIT))
- emit_stage_descriptor_set_userdata(cmd_buffer, cmd_buffer->state.compute_pipeline,
- idx, set->va,
- MESA_SHADER_COMPUTE);
-}
-
static void
radv_flush_push_descriptors(struct radv_cmd_buffer *cmd_buffer,
VkPipelineBindPoint bind_point)
VK_PIPELINE_BIND_POINT_GRAPHICS;
struct radv_descriptor_state *descriptors_state =
radv_get_descriptors_state(cmd_buffer, bind_point);
- unsigned i;
if (!descriptors_state->dirty)
return;
cmd_buffer->cs,
MAX_SETS * MESA_SHADER_STAGES * 4);
- for_each_bit(i, descriptors_state->dirty) {
- struct radv_descriptor_set *set = descriptors_state->sets[i];
- if (!(descriptors_state->valid & (1u << i)))
- continue;
+ if (cmd_buffer->state.pipeline) {
+ radv_foreach_stage(stage, stages) {
+ if (!cmd_buffer->state.pipeline->shaders[stage])
+ continue;
+
+ radv_emit_descriptor_pointers(cmd_buffer,
+ cmd_buffer->state.pipeline,
+ descriptors_state, stage);
+ }
+ }
- radv_emit_descriptor_set_userdata(cmd_buffer, stages, set, i);
+ if (cmd_buffer->state.compute_pipeline &&
+ (stages & VK_SHADER_STAGE_COMPUTE_BIT)) {
+ radv_emit_descriptor_pointers(cmd_buffer,
+ cmd_buffer->state.compute_pipeline,
+ descriptors_state,
+ MESA_SHADER_COMPUTE);
}
+
descriptors_state->dirty = 0;
descriptors_state->push_dirty = false;
? cmd_buffer->state.compute_pipeline
: cmd_buffer->state.pipeline;
struct radv_pipeline_layout *layout = pipeline->layout;
+ struct radv_shader_variant *shader, *prev_shader;
unsigned offset;
void *ptr;
uint64_t va;
MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws,
cmd_buffer->cs, MESA_SHADER_STAGES * 4);
+ prev_shader = NULL;
radv_foreach_stage(stage, stages) {
- if (pipeline->shaders[stage]) {
+ shader = radv_get_shader(pipeline, stage);
+
+ /* Avoid redundantly emitting the address for merged stages. */
+ if (shader && shader != prev_shader) {
radv_emit_userdata_address(cmd_buffer, pipeline, stage,
AC_UD_PUSH_CONSTANTS, va);
+
+ prev_shader = shader;
}
}
if ((pipeline_is_dirty ||
(cmd_buffer->state.dirty & RADV_CMD_DIRTY_VERTEX_BUFFER)) &&
cmd_buffer->state.pipeline->vertex_elements.count &&
- radv_get_vertex_shader(cmd_buffer->state.pipeline)->info.info.vs.has_vertex_buffers) {
+ radv_get_shader(cmd_buffer->state.pipeline, MESA_SHADER_VERTEX)->info.info.vs.has_vertex_buffers) {
struct radv_vertex_elements_info *velems = &cmd_buffer->state.pipeline->vertex_elements;
unsigned vb_offset;
void *vb_ptr;
{
struct radeon_info *info = &cmd_buffer->device->physical_device->rad_info;
struct radv_cmd_state *state = &cmd_buffer->state;
- struct radeon_winsys_cs *cs = cmd_buffer->cs;
+ struct radeon_cmdbuf *cs = cmd_buffer->cs;
uint32_t ia_multi_vgt_param;
int32_t primitive_reset_en;
assert(!(set->layout->flags & VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR));
+ if (!cmd_buffer->device->use_global_bo_list) {
+ for (unsigned j = 0; j < set->layout->buffer_count; ++j)
+ if (set->descriptors[j])
+ radv_cs_add_buffer(ws, cmd_buffer->cs, set->descriptors[j], 7);
+ }
+
if(set->bo)
radv_cs_add_buffer(ws, cmd_buffer->cs, set->bo, 8);
}
vk_free(&cmd_buffer->pool->alloc, cmd_buffer->state.attachments);
if (!cmd_buffer->device->ws->cs_finalize(cmd_buffer->cs))
- return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
+ return vk_error(cmd_buffer->device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
cmd_buffer->status = RADV_CMD_BUFFER_STATUS_EXECUTABLE;
assert(firstViewport < MAX_VIEWPORTS);
assert(total_count >= 1 && total_count <= MAX_VIEWPORTS);
- if (cmd_buffer->device->physical_device->has_scissor_bug) {
- /* Try to skip unnecessary PS partial flushes when the viewports
- * don't change.
- */
- if (!(state->dirty & (RADV_CMD_DIRTY_DYNAMIC_VIEWPORT |
- RADV_CMD_DIRTY_DYNAMIC_SCISSOR)) &&
- !memcmp(state->dynamic.viewport.viewports + firstViewport,
- pViewports, viewportCount * sizeof(*pViewports))) {
- return;
- }
- }
-
memcpy(state->dynamic.viewport.viewports + firstViewport, pViewports,
viewportCount * sizeof(*pViewports));
assert(firstScissor < MAX_SCISSORS);
assert(total_count >= 1 && total_count <= MAX_SCISSORS);
- if (cmd_buffer->device->physical_device->has_scissor_bug) {
- /* Try to skip unnecessary PS partial flushes when the scissors
- * don't change.
- */
- if (!(state->dirty & (RADV_CMD_DIRTY_DYNAMIC_VIEWPORT |
- RADV_CMD_DIRTY_DYNAMIC_SCISSOR)) &&
- !memcmp(state->dynamic.scissor.scissors + firstScissor,
- pScissors, scissorCount * sizeof(*pScissors))) {
- return;
- }
- }
-
memcpy(state->dynamic.scissor.scissors + firstScissor, pScissors,
scissorCount * sizeof(*pScissors));
pool = vk_alloc2(&device->alloc, pAllocator, sizeof(*pool), 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (pool == NULL)
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
if (pAllocator)
pool->alloc = *pAllocator;
uint64_t count_va,
uint32_t stride)
{
- struct radeon_winsys_cs *cs = cmd_buffer->cs;
+ struct radeon_cmdbuf *cs = cmd_buffer->cs;
unsigned di_src_sel = indexed ? V_0287F0_DI_SRC_SEL_DMA
: V_0287F0_DI_SRC_SEL_AUTO_INDEX;
- bool draw_id_enable = radv_get_vertex_shader(cmd_buffer->state.pipeline)->info.info.vs.needs_draw_id;
+ bool draw_id_enable = radv_get_shader(cmd_buffer->state.pipeline, MESA_SHADER_VERTEX)->info.info.vs.needs_draw_id;
uint32_t base_reg = cmd_buffer->state.pipeline->graphics.vtx_base_sgpr;
assert(base_reg);
{
struct radv_cmd_state *state = &cmd_buffer->state;
struct radeon_winsys *ws = cmd_buffer->device->ws;
- struct radeon_winsys_cs *cs = cmd_buffer->cs;
+ struct radeon_cmdbuf *cs = cmd_buffer->cs;
if (info->indirect) {
uint64_t va = radv_buffer_get_va(info->indirect->bo);
}
}
+/*
+ * Vega and raven have a bug which triggers if there are multiple context
+ * register contexts active at the same time with different scissor values.
+ *
+ * There are two possible workarounds:
+ * 1) Wait for PS_PARTIAL_FLUSH every time the scissor is changed. That way
+ * there is only ever 1 active set of scissor values at the same time.
+ *
+ * 2) Whenever the hardware switches contexts we have to set the scissor
+ * registers again even if it is a noop. That way the new context gets
+ * the correct scissor values.
+ *
+ * This implements option 2. radv_need_late_scissor_emission needs to
+ * return true on affected HW if radv_emit_all_graphics_states sets
+ * any context registers.
+ */
+static bool radv_need_late_scissor_emission(struct radv_cmd_buffer *cmd_buffer,
+ bool indexed_draw)
+{
+ struct radv_cmd_state *state = &cmd_buffer->state;
+
+ if (!cmd_buffer->device->physical_device->has_scissor_bug)
+ return false;
+
+ uint32_t used_states = cmd_buffer->state.pipeline->graphics.needed_dynamic_state | ~RADV_CMD_DIRTY_DYNAMIC_ALL;
+
+ /* Index & Vertex buffer don't change context regs, and pipeline is handled later. */
+ used_states &= ~(RADV_CMD_DIRTY_INDEX_BUFFER | RADV_CMD_DIRTY_VERTEX_BUFFER | RADV_CMD_DIRTY_PIPELINE);
+
+ /* Assume all state changes except these two can imply context rolls. */
+ if (cmd_buffer->state.dirty & used_states)
+ return true;
+
+ if (cmd_buffer->state.emitted_pipeline != cmd_buffer->state.pipeline)
+ return true;
+
+ if (indexed_draw && state->pipeline->graphics.prim_restart_enable &&
+ (state->index_type ? 0xffffffffu : 0xffffu) != state->last_primitive_reset_index)
+ return true;
+
+ return false;
+}
+
static void
radv_emit_all_graphics_states(struct radv_cmd_buffer *cmd_buffer,
const struct radv_draw_info *info)
{
+ bool late_scissor_emission = radv_need_late_scissor_emission(cmd_buffer, info->indexed);
+
if ((cmd_buffer->state.dirty & RADV_CMD_DIRTY_FRAMEBUFFER) ||
cmd_buffer->state.emitted_pipeline != cmd_buffer->state.pipeline)
radv_emit_rbplus_state(cmd_buffer);
radv_emit_draw_registers(cmd_buffer, info->indexed,
info->instance_count > 1, info->indirect,
info->indirect ? 0 : info->count);
+
+ if (late_scissor_emission)
+ radv_emit_scissor(cmd_buffer);
}
static void
cmd_buffer->device->physical_device->rad_info.chip_class >= CIK;
bool pipeline_is_dirty =
(cmd_buffer->state.dirty & RADV_CMD_DIRTY_PIPELINE) &&
- cmd_buffer->state.pipeline &&
cmd_buffer->state.pipeline != cmd_buffer->state.emitted_pipeline;
MAYBE_UNUSED unsigned cdw_max =
radv_draw(cmd_buffer, &info);
}
+void radv_CmdDrawIndirectCountKHR(
+ VkCommandBuffer commandBuffer,
+ VkBuffer _buffer,
+ VkDeviceSize offset,
+ VkBuffer _countBuffer,
+ VkDeviceSize countBufferOffset,
+ uint32_t maxDrawCount,
+ uint32_t stride)
+{
+ RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
+ RADV_FROM_HANDLE(radv_buffer, count_buffer, _countBuffer);
+ struct radv_draw_info info = {};
+
+ info.count = maxDrawCount;
+ info.indirect = buffer;
+ info.indirect_offset = offset;
+ info.count_buffer = count_buffer;
+ info.count_buffer_offset = countBufferOffset;
+ info.stride = stride;
+
+ radv_draw(cmd_buffer, &info);
+}
+
+void radv_CmdDrawIndexedIndirectCountKHR(
+ VkCommandBuffer commandBuffer,
+ VkBuffer _buffer,
+ VkDeviceSize offset,
+ VkBuffer _countBuffer,
+ VkDeviceSize countBufferOffset,
+ uint32_t maxDrawCount,
+ uint32_t stride)
+{
+ RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
+ RADV_FROM_HANDLE(radv_buffer, count_buffer, _countBuffer);
+ struct radv_draw_info info = {};
+
+ info.indexed = true;
+ info.count = maxDrawCount;
+ info.indirect = buffer;
+ info.indirect_offset = offset;
+ info.count_buffer = count_buffer;
+ info.count_buffer_offset = countBufferOffset;
+ info.stride = stride;
+
+ radv_draw(cmd_buffer, &info);
+}
+
struct radv_dispatch_info {
/**
* Determine the layout of the grid (in block units) to be used.
struct radv_shader_variant *compute_shader = pipeline->shaders[MESA_SHADER_COMPUTE];
unsigned dispatch_initiator = cmd_buffer->device->dispatch_initiator;
struct radeon_winsys *ws = cmd_buffer->device->ws;
- struct radeon_winsys_cs *cs = cmd_buffer->cs;
+ struct radeon_cmdbuf *cs = cmd_buffer->cs;
struct radv_userdata_info *loc;
loc = radv_lookup_user_sgpr(pipeline, MESA_SHADER_COMPUTE,
size, clear_word);
state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
+
+ /* Initialize the depth clear registers and update the ZRANGE_PRECISION
+ * value for the TC-compat bug (because ZRANGE_PRECISION is 1 by
+ * default). This is only needed whean clearing Z to 0.0f.
+ */
+ if (radv_image_is_tc_compat_htile(image) && clear_word == 0) {
+ VkImageAspectFlags aspects = VK_IMAGE_ASPECT_DEPTH_BIT;
+ VkClearDepthStencilValue value = {};
+
+ if (vk_format_is_stencil(image->vk_format))
+ aspects |= VK_IMAGE_ASPECT_STENCIL_BIT;
+
+ radv_set_ds_clear_metadata(cmd_buffer, image, value, aspects);
+ }
}
static void radv_handle_depth_image_transition(struct radv_cmd_buffer *cmd_buffer,
VkPipelineStageFlags stageMask,
unsigned value)
{
- struct radeon_winsys_cs *cs = cmd_buffer->cs;
+ struct radeon_cmdbuf *cs = cmd_buffer->cs;
uint64_t va = radv_buffer_get_va(event->bo);
radv_cs_add_buffer(cmd_buffer->device->ws, cs, event->bo, 8);
const VkImageMemoryBarrier* pImageMemoryBarriers)
{
RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
- struct radeon_winsys_cs *cs = cmd_buffer->cs;
+ struct radeon_cmdbuf *cs = cmd_buffer->cs;
for (unsigned i = 0; i < eventCount; ++i) {
RADV_FROM_HANDLE(radv_event, event, pEvents[i]);