static void radv_handle_image_transition(struct radv_cmd_buffer *cmd_buffer,
struct radv_image *image,
VkImageLayout src_layout,
+ bool src_render_loop,
VkImageLayout dst_layout,
+ bool dst_render_loop,
uint32_t src_family,
uint32_t dst_family,
const VkImageSubresourceRange *range,
.front = 0u,
.back = 0u,
},
+ .line_stipple = {
+ .factor = 0u,
+ .pattern = 0u,
+ },
};
static void
}
}
+ if (copy_mask & RADV_DYNAMIC_LINE_STIPPLE) {
+ if (memcmp(&dest->line_stipple, &src->line_stipple,
+ sizeof(src->line_stipple))) {
+ dest->line_stipple = src->line_stipple;
+ dest_mask |= RADV_DYNAMIC_LINE_STIPPLE;
+ }
+ }
+
cmd_buffer->state.dirty |= dest_mask;
}
struct radv_streamout_state *so = &cmd_buffer->state.streamout;
struct radv_shader_info *info;
- if (!pipeline->streamout_shader)
+ if (!pipeline->streamout_shader ||
+ cmd_buffer->device->physical_device->use_ngg_streamout)
return;
- info = &pipeline->streamout_shader->info.info;
+ info = &pipeline->streamout_shader->info;
for (int i = 0; i < MAX_SO_BUFFERS; i++)
so->stride_in_dw[i] = info->so.strides[i];
if (cmd_buffer == NULL)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
- cmd_buffer->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
+ vk_object_base_init(&device->vk, &cmd_buffer->base,
+ VK_OBJECT_TYPE_COMMAND_BUFFER);
+
cmd_buffer->device = device;
cmd_buffer->pool = pool;
cmd_buffer->level = level;
- if (pool) {
- list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
- cmd_buffer->queue_family_index = pool->queue_family_index;
-
- } else {
- /* Init the pool_link so we can safely call list_del when we destroy
- * the command buffer
- */
- list_inithead(&cmd_buffer->pool_link);
- cmd_buffer->queue_family_index = RADV_QUEUE_GENERAL;
- }
+ list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
+ cmd_buffer->queue_family_index = pool->queue_family_index;
ring = radv_queue_family_to_ring(cmd_buffer->queue_family_index);
cmd_buffer->device->ws->buffer_destroy(cmd_buffer->upload.upload_bo);
cmd_buffer->device->ws->cs_destroy(cmd_buffer->cs);
- for (unsigned i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; i++)
+ for (unsigned i = 0; i < MAX_BIND_POINTS; i++)
free(cmd_buffer->descriptors[i].push_set.set.mapped_ptr);
+ vk_object_base_finish(&cmd_buffer->base);
+
vk_free(&cmd_buffer->pool->alloc, cmd_buffer);
}
}
cmd_buffer->push_constant_stages = 0;
- cmd_buffer->scratch_size_needed = 0;
- cmd_buffer->compute_scratch_size_needed = 0;
+ cmd_buffer->scratch_size_per_wave_needed = 0;
+ cmd_buffer->scratch_waves_wanted = 0;
+ cmd_buffer->compute_scratch_size_per_wave_needed = 0;
+ cmd_buffer->compute_scratch_waves_wanted = 0;
cmd_buffer->esgs_ring_size_needed = 0;
cmd_buffer->gsvs_ring_size_needed = 0;
cmd_buffer->tess_rings_needed = false;
+ cmd_buffer->gds_needed = false;
+ cmd_buffer->gds_oa_needed = false;
cmd_buffer->sample_positions_needed = false;
if (cmd_buffer->upload.upload_bo)
memset(cmd_buffer->vertex_bindings, 0, sizeof(cmd_buffer->vertex_bindings));
- for (unsigned i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; i++) {
+ for (unsigned i = 0; i < MAX_BIND_POINTS; i++) {
cmd_buffer->descriptors[i].dirty = 0;
cmd_buffer->descriptors[i].valid = 0;
cmd_buffer->descriptors[i].push_dirty = false;
radv_cmd_buffer_after_draw(struct radv_cmd_buffer *cmd_buffer,
enum radv_cmd_flush_bits flags)
{
+ if (unlikely(cmd_buffer->device->thread_trace_bo)) {
+ radeon_emit(cmd_buffer->cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
+ radeon_emit(cmd_buffer->cs, EVENT_TYPE(V_028A90_THREAD_TRACE_MARKER) | EVENT_INDEX(0));
+ }
+
if (cmd_buffer->device->instance->debug_flags & RADV_DEBUG_SYNC_SHADERS) {
assert(flags & (RADV_CMD_FLAG_PS_PARTIAL_FLUSH |
RADV_CMD_FLAG_CS_PARTIAL_FLUSH));
assert(!"invalid ring type");
}
- data[0] = (uintptr_t)pipeline;
- data[1] = (uintptr_t)pipeline >> 32;
+ uint64_t pipeline_address = (uintptr_t)pipeline;
+ data[0] = pipeline_address;
+ data[1] = pipeline_address >> 32;
radv_emit_write_data_packet(cmd_buffer, va, 2, data);
}
float shifted_pos_x = user_locs[i].x - 0.5;
float shifted_pos_y = user_locs[i].y - 0.5;
- int32_t scaled_pos_x = floor(shifted_pos_x * 16);
- int32_t scaled_pos_y = floor(shifted_pos_y * 16);
+ int32_t scaled_pos_x = floorf(shifted_pos_x * 16);
+ int32_t scaled_pos_y = floorf(shifted_pos_y * 16);
sample_locs[i].x = CLAMP(scaled_pos_x, -8, 7);
sample_locs[i].y = CLAMP(scaled_pos_y, -8, 7);
static void
radv_emit_sample_locations(struct radv_cmd_buffer *cmd_buffer)
{
- struct radv_pipeline *pipeline = cmd_buffer->state.pipeline;
- struct radv_multisample_state *ms = &pipeline->graphics.ms;
struct radv_sample_locations_state *sample_location =
&cmd_buffer->state.dynamic.sample_location;
uint32_t num_samples = (uint32_t)sample_location->per_pixel;
num_samples);
/* Compute the maximum sample distance from the specified locations. */
- for (uint32_t i = 0; i < num_samples; i++) {
- VkOffset2D offset = sample_locs[0][i];
- max_sample_dist = MAX2(max_sample_dist,
- MAX2(abs(offset.x), abs(offset.y)));
+ for (unsigned i = 0; i < 4; ++i) {
+ for (uint32_t j = 0; j < num_samples; j++) {
+ VkOffset2D offset = sample_locs[i][j];
+ max_sample_dist = MAX2(max_sample_dist,
+ MAX2(abs(offset.x), abs(offset.y)));
+ }
}
/* Emit the specified user sample locations. */
}
/* Emit the maximum sample distance and the centroid priority. */
- uint32_t pa_sc_aa_config = ms->pa_sc_aa_config;
-
- pa_sc_aa_config &= C_028BE0_MAX_SAMPLE_DIST;
- pa_sc_aa_config |= S_028BE0_MAX_SAMPLE_DIST(max_sample_dist);
-
- radeon_set_context_reg_seq(cs, R_028BE0_PA_SC_AA_CONFIG, 1);
- radeon_emit(cs, pa_sc_aa_config);
+ radeon_set_context_reg_rmw(cs, R_028BE0_PA_SC_AA_CONFIG,
+ S_028BE0_MAX_SAMPLE_DIST(max_sample_dist),
+ ~C_028BE0_MAX_SAMPLE_DIST);
radeon_set_context_reg_seq(cs, R_028BD4_PA_SC_CENTROID_PRIORITY_0, 2);
radeon_emit(cs, centroid_priority);
struct radv_pipeline *pipeline)
{
int num_samples = pipeline->graphics.ms.num_samples;
- struct radv_multisample_state *ms = &pipeline->graphics.ms;
struct radv_pipeline *old_pipeline = cmd_buffer->state.emitted_pipeline;
- if (pipeline->shaders[MESA_SHADER_FRAGMENT]->info.info.ps.needs_sample_positions)
+ if (pipeline->shaders[MESA_SHADER_FRAGMENT]->info.ps.needs_sample_positions)
cmd_buffer->sample_positions_needed = true;
if (old_pipeline && num_samples == old_pipeline->graphics.ms.num_samples)
return;
- radeon_set_context_reg_seq(cmd_buffer->cs, R_028BDC_PA_SC_LINE_CNTL, 2);
- radeon_emit(cmd_buffer->cs, ms->pa_sc_line_cntl);
- radeon_emit(cmd_buffer->cs, ms->pa_sc_aa_config);
+ radv_emit_default_sample_locations(cmd_buffer->cs, num_samples);
- radeon_set_context_reg(cmd_buffer->cs, R_028A48_PA_SC_MODE_CNTL_0, ms->pa_sc_mode_cntl_0);
+ cmd_buffer->state.context_roll_without_scissor_emitted = true;
+}
- radv_emit_default_sample_locations(cmd_buffer->cs, num_samples);
+static void
+radv_update_binning_state(struct radv_cmd_buffer *cmd_buffer,
+ struct radv_pipeline *pipeline)
+{
+ const struct radv_pipeline *old_pipeline = cmd_buffer->state.emitted_pipeline;
- /* GFX9: Flush DFSM when the AA mode changes. */
- if (cmd_buffer->device->dfsm_allowed) {
- radeon_emit(cmd_buffer->cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
- radeon_emit(cmd_buffer->cs, EVENT_TYPE(V_028A90_FLUSH_DFSM) | EVENT_INDEX(0));
+
+ if (pipeline->device->physical_device->rad_info.chip_class < GFX9)
+ return;
+
+ if (old_pipeline &&
+ old_pipeline->graphics.binning.pa_sc_binner_cntl_0 == pipeline->graphics.binning.pa_sc_binner_cntl_0 &&
+ old_pipeline->graphics.binning.db_dfsm_control == pipeline->graphics.binning.db_dfsm_control)
+ return;
+
+ bool binning_flush = false;
+ if (cmd_buffer->device->physical_device->rad_info.family == CHIP_VEGA12 ||
+ cmd_buffer->device->physical_device->rad_info.family == CHIP_VEGA20 ||
+ cmd_buffer->device->physical_device->rad_info.family == CHIP_RAVEN2 ||
+ cmd_buffer->device->physical_device->rad_info.chip_class >= GFX10) {
+ binning_flush = !old_pipeline ||
+ G_028C44_BINNING_MODE(old_pipeline->graphics.binning.pa_sc_binner_cntl_0) !=
+ G_028C44_BINNING_MODE(pipeline->graphics.binning.pa_sc_binner_cntl_0);
+ }
+
+ radeon_set_context_reg(cmd_buffer->cs, R_028C44_PA_SC_BINNER_CNTL_0,
+ pipeline->graphics.binning.pa_sc_binner_cntl_0 |
+ S_028C44_FLUSH_ON_BINNING_TRANSITION(!!binning_flush));
+
+ if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX10) {
+ radeon_set_context_reg(cmd_buffer->cs, R_028038_DB_DFSM_CONTROL,
+ pipeline->graphics.binning.db_dfsm_control);
+ } else {
+ radeon_set_context_reg(cmd_buffer->cs, R_028060_DB_DFSM_CONTROL,
+ pipeline->graphics.binning.db_dfsm_control);
}
cmd_buffer->state.context_roll_without_scissor_emitted = true;
}
+
static void
radv_emit_shader_prefetch(struct radv_cmd_buffer *cmd_buffer,
struct radv_shader_variant *shader)
static void
radv_emit_rbplus_state(struct radv_cmd_buffer *cmd_buffer)
{
- if (!cmd_buffer->device->physical_device->rbplus_allowed)
+ if (!cmd_buffer->device->physical_device->rad_info.rbplus_allowed)
return;
struct radv_pipeline *pipeline = cmd_buffer->state.pipeline;
- struct radv_framebuffer *framebuffer = cmd_buffer->state.framebuffer;
const struct radv_subpass *subpass = cmd_buffer->state.subpass;
unsigned sx_ps_downconvert = 0;
unsigned sx_blend_opt_epsilon = 0;
unsigned sx_blend_opt_control = 0;
+ if (!cmd_buffer->state.attachments || !subpass)
+ return;
+
for (unsigned i = 0; i < subpass->color_count; ++i) {
if (subpass->color_attachments[i].attachment == VK_ATTACHMENT_UNUSED) {
- sx_blend_opt_control |= S_02875C_MRT0_COLOR_OPT_DISABLE(1) << (i * 4);
- sx_blend_opt_control |= S_02875C_MRT0_ALPHA_OPT_DISABLE(1) << (i * 4);
+ /* We don't set the DISABLE bits, because the HW can't have holes,
+ * so the SPI color format is set to 32-bit 1-component. */
+ sx_ps_downconvert |= V_028754_SX_RT_EXPORT_32_R << (i * 4);
continue;
}
int idx = subpass->color_attachments[i].attachment;
- struct radv_color_buffer_info *cb = &framebuffer->attachments[idx].cb;
+ struct radv_color_buffer_info *cb = &cmd_buffer->state.attachments[idx].cb;
unsigned format = G_028C70_FORMAT(cb->cb_color_info);
unsigned swap = G_028C70_COMP_SWAP(cb->cb_color_info);
}
}
- for (unsigned i = subpass->color_count; i < 8; ++i) {
- sx_blend_opt_control |= S_02875C_MRT0_COLOR_OPT_DISABLE(1) << (i * 4);
- sx_blend_opt_control |= S_02875C_MRT0_ALPHA_OPT_DISABLE(1) << (i * 4);
- }
- /* TODO: avoid redundantly setting context registers */
+ /* Do not set the DISABLE bits for the unused attachments, as that
+ * breaks dual source blending in SkQP and does not seem to improve
+ * performance. */
+
+ if (sx_ps_downconvert == cmd_buffer->state.last_sx_ps_downconvert &&
+ sx_blend_opt_epsilon == cmd_buffer->state.last_sx_blend_opt_epsilon &&
+ sx_blend_opt_control == cmd_buffer->state.last_sx_blend_opt_control)
+ return;
+
radeon_set_context_reg_seq(cmd_buffer->cs, R_028754_SX_PS_DOWNCONVERT, 3);
radeon_emit(cmd_buffer->cs, sx_ps_downconvert);
radeon_emit(cmd_buffer->cs, sx_blend_opt_epsilon);
radeon_emit(cmd_buffer->cs, sx_blend_opt_control);
cmd_buffer->state.context_roll_without_scissor_emitted = true;
+
+ cmd_buffer->state.last_sx_ps_downconvert = sx_ps_downconvert;
+ cmd_buffer->state.last_sx_blend_opt_epsilon = sx_blend_opt_epsilon;
+ cmd_buffer->state.last_sx_blend_opt_control = sx_blend_opt_control;
+}
+
+static void
+radv_emit_batch_break_on_new_ps(struct radv_cmd_buffer *cmd_buffer)
+{
+ if (!cmd_buffer->device->pbb_allowed)
+ return;
+
+ struct radv_binning_settings settings =
+ radv_get_binning_settings(cmd_buffer->device->physical_device);
+ bool break_for_new_ps =
+ (!cmd_buffer->state.emitted_pipeline ||
+ cmd_buffer->state.emitted_pipeline->shaders[MESA_SHADER_FRAGMENT] !=
+ cmd_buffer->state.pipeline->shaders[MESA_SHADER_FRAGMENT]) &&
+ (settings.context_states_per_bin > 1 ||
+ settings.persistent_states_per_bin > 1);
+ bool break_for_new_cb_target_mask =
+ (!cmd_buffer->state.emitted_pipeline ||
+ cmd_buffer->state.emitted_pipeline->graphics.cb_target_mask !=
+ cmd_buffer->state.pipeline->graphics.cb_target_mask) &&
+ settings.context_states_per_bin > 1;
+
+ if (!break_for_new_ps && !break_for_new_cb_target_mask)
+ return;
+
+ radeon_emit(cmd_buffer->cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
+ radeon_emit(cmd_buffer->cs, EVENT_TYPE(V_028A90_BREAK_BATCH) | EVENT_INDEX(0));
}
static void
return;
radv_update_multisample_state(cmd_buffer, pipeline);
+ radv_update_binning_state(cmd_buffer, pipeline);
- cmd_buffer->scratch_size_needed =
- MAX2(cmd_buffer->scratch_size_needed,
- pipeline->max_waves * pipeline->scratch_bytes_per_wave);
+ cmd_buffer->scratch_size_per_wave_needed = MAX2(cmd_buffer->scratch_size_per_wave_needed,
+ pipeline->scratch_bytes_per_wave);
+ cmd_buffer->scratch_waves_wanted = MAX2(cmd_buffer->scratch_waves_wanted,
+ pipeline->max_waves);
if (!cmd_buffer->state.emitted_pipeline ||
cmd_buffer->state.emitted_pipeline->graphics.can_use_guardband !=
cmd_buffer->state.context_roll_without_scissor_emitted = true;
}
+ radv_emit_batch_break_on_new_ps(cmd_buffer);
+
for (unsigned i = 0; i < MESA_SHADER_COMPUTE; i++) {
if (!pipeline->shaders[i])
continue;
unsigned width = cmd_buffer->state.dynamic.line_width * 8;
radeon_set_context_reg(cmd_buffer->cs, R_028A08_PA_SU_LINE_CNTL,
- S_028A08_WIDTH(CLAMP(width, 0, 0xFFF)));
+ S_028A08_WIDTH(CLAMP(width, 0, 0xFFFF)));
}
static void
radeon_emit(cmd_buffer->cs, bias); /* BACK OFFSET */
}
+static void
+radv_emit_line_stipple(struct radv_cmd_buffer *cmd_buffer)
+{
+ struct radv_dynamic_state *d = &cmd_buffer->state.dynamic;
+ struct radv_pipeline *pipeline = cmd_buffer->state.pipeline;
+ uint32_t auto_reset_cntl = 1;
+
+ if (pipeline->graphics.topology == VK_PRIMITIVE_TOPOLOGY_LINE_STRIP)
+ auto_reset_cntl = 2;
+
+ radeon_set_context_reg(cmd_buffer->cs, R_028A0C_PA_SC_LINE_STIPPLE,
+ S_028A0C_LINE_PATTERN(d->line_stipple.pattern) |
+ S_028A0C_REPEAT_COUNT(d->line_stipple.factor - 1) |
+ S_028A0C_AUTO_RESET_CNTL(auto_reset_cntl));
+}
+
static void
radv_emit_fb_color_state(struct radv_cmd_buffer *cmd_buffer,
int index,
- struct radv_attachment_info *att,
+ struct radv_color_buffer_info *cb,
struct radv_image_view *iview,
- VkImageLayout layout)
+ VkImageLayout layout,
+ bool in_render_loop)
{
bool is_vi = cmd_buffer->device->physical_device->rad_info.chip_class >= GFX8;
- struct radv_color_buffer_info *cb = &att->cb;
uint32_t cb_color_info = cb->cb_color_info;
struct radv_image *image = iview->image;
- if (!radv_layout_dcc_compressed(image, layout,
+ if (!radv_layout_dcc_compressed(cmd_buffer->device, image, layout, in_render_loop,
radv_image_queue_family_mask(image,
cmd_buffer->queue_family_index,
cmd_buffer->queue_family_index))) {
cb_color_info &= C_028C70_DCC_ENABLE;
}
+ if (!radv_layout_can_fast_clear(image, layout, in_render_loop,
+ radv_image_queue_family_mask(image,
+ cmd_buffer->queue_family_index,
+ cmd_buffer->queue_family_index))) {
+ cb_color_info &= C_028C70_COMPRESSION;
+ }
+
if (radv_image_is_tc_compat_cmask(image) &&
(radv_is_fmask_decompress_pipeline(cmd_buffer) ||
radv_is_dcc_decompress_pipeline(cmd_buffer))) {
cb_color_info &= C_028C70_FMASK_COMPRESS_1FRAG_ONLY;
}
+ if (radv_image_has_fmask(image) &&
+ (radv_is_fmask_decompress_pipeline(cmd_buffer) ||
+ radv_is_hw_resolve_pipeline(cmd_buffer))) {
+ /* Make sure FMASK is enabled if it has been cleared because:
+ *
+ * 1) it's required for FMASK_DECOMPRESS operations to avoid
+ * GPU hangs
+ * 2) it's necessary for CB_RESOLVE which can read compressed
+ * FMASK data anyways.
+ */
+ cb_color_info |= S_028C70_COMPRESSION(1);
+ }
+
if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX10) {
radeon_set_context_reg_seq(cmd_buffer->cs, R_028C60_CB_COLOR0_BASE + index * 0x3c, 11);
radeon_emit(cmd_buffer->cs, cb->cb_color_base);
static void
radv_update_zrange_precision(struct radv_cmd_buffer *cmd_buffer,
struct radv_ds_buffer_info *ds,
- struct radv_image *image, VkImageLayout layout,
- bool requires_cond_exec)
+ const struct radv_image_view *iview,
+ VkImageLayout layout,
+ bool in_render_loop, bool requires_cond_exec)
{
+ const struct radv_image *image = iview->image;
uint32_t db_z_info = ds->db_z_info;
uint32_t db_z_info_reg;
- if (!cmd_buffer->device->physical_device->has_tc_compat_zrange_bug ||
+ if (!cmd_buffer->device->physical_device->rad_info.has_tc_compat_zrange_bug ||
!radv_image_is_tc_compat_htile(image))
return;
- if (!radv_layout_has_htile(image, layout,
- radv_image_queue_family_mask(image,
- cmd_buffer->queue_family_index,
- cmd_buffer->queue_family_index))) {
+ if (!radv_layout_is_htile_compressed(image, layout, in_render_loop,
+ radv_image_queue_family_mask(image,
+ cmd_buffer->queue_family_index,
+ cmd_buffer->queue_family_index))) {
db_z_info &= C_028040_TILE_SURFACE_ENABLE;
}
* SET_CONTEXT_REG packet.
*/
if (requires_cond_exec) {
- uint64_t va = radv_buffer_get_va(image->bo);
- va += image->offset + image->tc_compat_zrange_offset;
+ uint64_t va = radv_get_tc_compat_zrange_va(image, iview->base_mip);
radeon_emit(cmd_buffer->cs, PKT3(PKT3_COND_EXEC, 3, 0));
radeon_emit(cmd_buffer->cs, va);
static void
radv_emit_fb_ds_state(struct radv_cmd_buffer *cmd_buffer,
struct radv_ds_buffer_info *ds,
- struct radv_image *image,
- VkImageLayout layout)
+ struct radv_image_view *iview,
+ VkImageLayout layout,
+ bool in_render_loop)
{
+ const struct radv_image *image = iview->image;
uint32_t db_z_info = ds->db_z_info;
uint32_t db_stencil_info = ds->db_stencil_info;
- if (!radv_layout_has_htile(image, layout,
- radv_image_queue_family_mask(image,
- cmd_buffer->queue_family_index,
- cmd_buffer->queue_family_index))) {
+ if (!radv_layout_is_htile_compressed(image, layout, in_render_loop,
+ radv_image_queue_family_mask(image,
+ cmd_buffer->queue_family_index,
+ cmd_buffer->queue_family_index))) {
db_z_info &= C_028040_TILE_SURFACE_ENABLE;
db_stencil_info |= S_028044_TILE_STENCIL_DISABLE(1);
}
}
/* Update the ZRANGE_PRECISION value for the TC-compat bug. */
- radv_update_zrange_precision(cmd_buffer, ds, image, layout, true);
+ radv_update_zrange_precision(cmd_buffer, ds, iview, layout,
+ in_render_loop, true);
radeon_set_context_reg(cmd_buffer->cs, R_028B78_PA_SU_POLY_OFFSET_DB_FMT_CNTL,
ds->pa_su_poly_offset_db_fmt_cntl);
*/
static void
radv_update_bound_fast_clear_ds(struct radv_cmd_buffer *cmd_buffer,
- struct radv_image *image,
+ const struct radv_image_view *iview,
VkClearDepthStencilValue ds_clear_value,
VkImageAspectFlags aspects)
{
- struct radv_framebuffer *framebuffer = cmd_buffer->state.framebuffer;
const struct radv_subpass *subpass = cmd_buffer->state.subpass;
+ const struct radv_image *image = iview->image;
struct radeon_cmdbuf *cs = cmd_buffer->cs;
- struct radv_attachment_info *att;
uint32_t att_idx;
- if (!framebuffer || !subpass)
+ if (!cmd_buffer->state.attachments || !subpass)
return;
if (!subpass->depth_stencil_attachment)
return;
att_idx = subpass->depth_stencil_attachment->attachment;
- att = &framebuffer->attachments[att_idx];
- if (att->attachment->image != image)
+ if (cmd_buffer->state.attachments[att_idx].iview->image != image)
return;
- radeon_set_context_reg_seq(cs, R_028028_DB_STENCIL_CLEAR, 2);
- radeon_emit(cs, ds_clear_value.stencil);
- radeon_emit(cs, fui(ds_clear_value.depth));
+ if (aspects == (VK_IMAGE_ASPECT_DEPTH_BIT |
+ VK_IMAGE_ASPECT_STENCIL_BIT)) {
+ radeon_set_context_reg_seq(cs, R_028028_DB_STENCIL_CLEAR, 2);
+ radeon_emit(cs, ds_clear_value.stencil);
+ radeon_emit(cs, fui(ds_clear_value.depth));
+ } else if (aspects == VK_IMAGE_ASPECT_DEPTH_BIT) {
+ radeon_set_context_reg_seq(cs, R_02802C_DB_DEPTH_CLEAR, 1);
+ radeon_emit(cs, fui(ds_clear_value.depth));
+ } else {
+ assert(aspects == VK_IMAGE_ASPECT_STENCIL_BIT);
+ radeon_set_context_reg_seq(cs, R_028028_DB_STENCIL_CLEAR, 1);
+ radeon_emit(cs, ds_clear_value.stencil);
+ }
/* Update the ZRANGE_PRECISION value for the TC-compat bug. This is
* only needed when clearing Z to 0.0.
if ((aspects & VK_IMAGE_ASPECT_DEPTH_BIT) &&
ds_clear_value.depth == 0.0) {
VkImageLayout layout = subpass->depth_stencil_attachment->layout;
+ bool in_render_loop = subpass->depth_stencil_attachment->in_render_loop;
- radv_update_zrange_precision(cmd_buffer, &att->ds, image,
- layout, false);
+ radv_update_zrange_precision(cmd_buffer, &cmd_buffer->state.attachments[att_idx].ds,
+ iview, layout, in_render_loop, false);
}
cmd_buffer->state.context_roll_without_scissor_emitted = true;
static void
radv_set_ds_clear_metadata(struct radv_cmd_buffer *cmd_buffer,
struct radv_image *image,
+ const VkImageSubresourceRange *range,
VkClearDepthStencilValue ds_clear_value,
VkImageAspectFlags aspects)
{
struct radeon_cmdbuf *cs = cmd_buffer->cs;
- uint64_t va = radv_buffer_get_va(image->bo);
- unsigned reg_offset = 0, reg_count = 0;
+ uint64_t va = radv_get_ds_clear_value_va(image, range->baseMipLevel);
+ uint32_t level_count = radv_get_levelCount(image, range);
- va += image->offset + image->clear_value_offset;
+ if (aspects == (VK_IMAGE_ASPECT_DEPTH_BIT |
+ VK_IMAGE_ASPECT_STENCIL_BIT)) {
+ /* Use the fastest way when both aspects are used. */
+ radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 2 + 2 * level_count, cmd_buffer->state.predicating));
+ radeon_emit(cs, S_370_DST_SEL(V_370_MEM) |
+ S_370_WR_CONFIRM(1) |
+ S_370_ENGINE_SEL(V_370_PFP));
+ radeon_emit(cs, va);
+ radeon_emit(cs, va >> 32);
- if (aspects & VK_IMAGE_ASPECT_STENCIL_BIT) {
- ++reg_count;
+ for (uint32_t l = 0; l < level_count; l++) {
+ radeon_emit(cs, ds_clear_value.stencil);
+ radeon_emit(cs, fui(ds_clear_value.depth));
+ }
} else {
- ++reg_offset;
- va += 4;
- }
- if (aspects & VK_IMAGE_ASPECT_DEPTH_BIT)
- ++reg_count;
+ /* Otherwise we need one WRITE_DATA packet per level. */
+ for (uint32_t l = 0; l < level_count; l++) {
+ uint64_t va = radv_get_ds_clear_value_va(image, range->baseMipLevel + l);
+ unsigned value;
+
+ if (aspects == VK_IMAGE_ASPECT_DEPTH_BIT) {
+ value = fui(ds_clear_value.depth);
+ va += 4;
+ } else {
+ assert(aspects == VK_IMAGE_ASPECT_STENCIL_BIT);
+ value = ds_clear_value.stencil;
+ }
- radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 2 + reg_count, cmd_buffer->state.predicating));
- radeon_emit(cs, S_370_DST_SEL(V_370_MEM) |
- S_370_WR_CONFIRM(1) |
- S_370_ENGINE_SEL(V_370_PFP));
- radeon_emit(cs, va);
- radeon_emit(cs, va >> 32);
- if (aspects & VK_IMAGE_ASPECT_STENCIL_BIT)
- radeon_emit(cs, ds_clear_value.stencil);
- if (aspects & VK_IMAGE_ASPECT_DEPTH_BIT)
- radeon_emit(cs, fui(ds_clear_value.depth));
+ radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 3, cmd_buffer->state.predicating));
+ radeon_emit(cs, S_370_DST_SEL(V_370_MEM) |
+ S_370_WR_CONFIRM(1) |
+ S_370_ENGINE_SEL(V_370_PFP));
+ radeon_emit(cs, va);
+ radeon_emit(cs, va >> 32);
+ radeon_emit(cs, value);
+ }
+ }
}
/**
static void
radv_set_tc_compat_zrange_metadata(struct radv_cmd_buffer *cmd_buffer,
struct radv_image *image,
+ const VkImageSubresourceRange *range,
uint32_t value)
{
struct radeon_cmdbuf *cs = cmd_buffer->cs;
- uint64_t va = radv_buffer_get_va(image->bo);
- if (!cmd_buffer->device->physical_device->has_tc_compat_zrange_bug)
+ if (!cmd_buffer->device->physical_device->rad_info.has_tc_compat_zrange_bug)
return;
- va += image->offset + image->tc_compat_zrange_offset;
+ uint64_t va = radv_get_tc_compat_zrange_va(image, range->baseMipLevel);
+ uint32_t level_count = radv_get_levelCount(image, range);
- radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 3, cmd_buffer->state.predicating));
+ radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 2 + level_count, cmd_buffer->state.predicating));
radeon_emit(cs, S_370_DST_SEL(V_370_MEM) |
S_370_WR_CONFIRM(1) |
S_370_ENGINE_SEL(V_370_PFP));
radeon_emit(cs, va);
radeon_emit(cs, va >> 32);
- radeon_emit(cs, value);
+
+ for (uint32_t l = 0; l < level_count; l++)
+ radeon_emit(cs, value);
}
static void
radv_update_tc_compat_zrange_metadata(struct radv_cmd_buffer *cmd_buffer,
- struct radv_image *image,
+ const struct radv_image_view *iview,
VkClearDepthStencilValue ds_clear_value)
{
+ VkImageSubresourceRange range = {
+ .aspectMask = iview->aspect_mask,
+ .baseMipLevel = iview->base_mip,
+ .levelCount = iview->level_count,
+ .baseArrayLayer = iview->base_layer,
+ .layerCount = iview->layer_count,
+ };
uint32_t cond_val;
/* Conditionally set DB_Z_INFO.ZRANGE_PRECISION to 0 when the last
*/
cond_val = ds_clear_value.depth == 0.0f ? UINT_MAX : 0;
- radv_set_tc_compat_zrange_metadata(cmd_buffer, image, cond_val);
+ radv_set_tc_compat_zrange_metadata(cmd_buffer, iview->image, &range,
+ cond_val);
}
/**
*/
void
radv_update_ds_clear_metadata(struct radv_cmd_buffer *cmd_buffer,
- struct radv_image *image,
+ const struct radv_image_view *iview,
VkClearDepthStencilValue ds_clear_value,
VkImageAspectFlags aspects)
{
+ VkImageSubresourceRange range = {
+ .aspectMask = iview->aspect_mask,
+ .baseMipLevel = iview->base_mip,
+ .levelCount = iview->level_count,
+ .baseArrayLayer = iview->base_layer,
+ .layerCount = iview->layer_count,
+ };
+ struct radv_image *image = iview->image;
+
assert(radv_image_has_htile(image));
- radv_set_ds_clear_metadata(cmd_buffer, image, ds_clear_value, aspects);
+ radv_set_ds_clear_metadata(cmd_buffer, iview->image, &range,
+ ds_clear_value, aspects);
if (radv_image_is_tc_compat_htile(image) &&
(aspects & VK_IMAGE_ASPECT_DEPTH_BIT)) {
- radv_update_tc_compat_zrange_metadata(cmd_buffer, image,
+ radv_update_tc_compat_zrange_metadata(cmd_buffer, iview,
ds_clear_value);
}
- radv_update_bound_fast_clear_ds(cmd_buffer, image, ds_clear_value,
- aspects);
+ radv_update_bound_fast_clear_ds(cmd_buffer, iview, ds_clear_value,
+ aspects);
}
/**
*/
static void
radv_load_ds_clear_metadata(struct radv_cmd_buffer *cmd_buffer,
- struct radv_image *image)
+ const struct radv_image_view *iview)
{
struct radeon_cmdbuf *cs = cmd_buffer->cs;
+ const struct radv_image *image = iview->image;
VkImageAspectFlags aspects = vk_format_aspects(image->vk_format);
- uint64_t va = radv_buffer_get_va(image->bo);
+ uint64_t va = radv_get_ds_clear_value_va(image, iview->base_mip);
unsigned reg_offset = 0, reg_count = 0;
- va += image->offset + image->clear_value_offset;
-
if (!radv_image_has_htile(image))
return;
uint32_t reg = R_028028_DB_STENCIL_CLEAR + 4 * reg_offset;
- if (cmd_buffer->device->physical_device->has_load_ctx_reg_pkt) {
- radeon_emit(cs, PKT3(PKT3_LOAD_CONTEXT_REG, 3, 0));
+ if (cmd_buffer->device->physical_device->rad_info.has_load_ctx_reg_pkt) {
+ radeon_emit(cs, PKT3(PKT3_LOAD_CONTEXT_REG_INDEX, 3, 0));
radeon_emit(cs, va);
radeon_emit(cs, va >> 32);
radeon_emit(cs, (reg - SI_CONTEXT_REG_OFFSET) >> 2);
int cb_idx,
uint32_t color_values[2])
{
- struct radv_framebuffer *framebuffer = cmd_buffer->state.framebuffer;
const struct radv_subpass *subpass = cmd_buffer->state.subpass;
struct radeon_cmdbuf *cs = cmd_buffer->cs;
- struct radv_attachment_info *att;
uint32_t att_idx;
- if (!framebuffer || !subpass)
+ if (!cmd_buffer->state.attachments || !subpass)
return;
att_idx = subpass->color_attachments[cb_idx].attachment;
if (att_idx == VK_ATTACHMENT_UNUSED)
return;
- att = &framebuffer->attachments[att_idx];
- if (att->attachment->image != image)
+ if (cmd_buffer->state.attachments[att_idx].iview->image != image)
return;
radeon_set_context_reg_seq(cs, R_028C8C_CB_COLOR0_CLEAR_WORD0 + cb_idx * 0x3c, 2);
uint32_t reg = R_028C8C_CB_COLOR0_CLEAR_WORD0 + cb_idx * 0x3c;
- if (cmd_buffer->device->physical_device->has_load_ctx_reg_pkt) {
- radeon_emit(cs, PKT3(PKT3_LOAD_CONTEXT_REG, 3, cmd_buffer->state.predicating));
+ if (cmd_buffer->device->physical_device->rad_info.has_load_ctx_reg_pkt) {
+ radeon_emit(cs, PKT3(PKT3_LOAD_CONTEXT_REG_INDEX, 3, cmd_buffer->state.predicating));
radeon_emit(cs, va);
radeon_emit(cs, va >> 32);
radeon_emit(cs, (reg - SI_CONTEXT_REG_OFFSET) >> 2);
}
int idx = subpass->color_attachments[i].attachment;
- struct radv_attachment_info *att = &framebuffer->attachments[idx];
- struct radv_image_view *iview = att->attachment;
+ struct radv_image_view *iview = cmd_buffer->state.attachments[idx].iview;
VkImageLayout layout = subpass->color_attachments[i].layout;
+ bool in_render_loop = subpass->color_attachments[i].in_render_loop;
- radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, att->attachment->bo);
+ radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, iview->bo);
- assert(att->attachment->aspect_mask & (VK_IMAGE_ASPECT_COLOR_BIT | VK_IMAGE_ASPECT_PLANE_0_BIT |
+ assert(iview->aspect_mask & (VK_IMAGE_ASPECT_COLOR_BIT | VK_IMAGE_ASPECT_PLANE_0_BIT |
VK_IMAGE_ASPECT_PLANE_1_BIT | VK_IMAGE_ASPECT_PLANE_2_BIT));
- radv_emit_fb_color_state(cmd_buffer, i, att, iview, layout);
+ radv_emit_fb_color_state(cmd_buffer, i, &cmd_buffer->state.attachments[idx].cb, iview, layout, in_render_loop);
radv_load_color_clear_metadata(cmd_buffer, iview, i);
}
if (subpass->depth_stencil_attachment) {
int idx = subpass->depth_stencil_attachment->attachment;
VkImageLayout layout = subpass->depth_stencil_attachment->layout;
- struct radv_attachment_info *att = &framebuffer->attachments[idx];
- struct radv_image *image = att->attachment->image;
- radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, att->attachment->bo);
- MAYBE_UNUSED uint32_t queue_mask = radv_image_queue_family_mask(image,
- cmd_buffer->queue_family_index,
- cmd_buffer->queue_family_index);
- /* We currently don't support writing decompressed HTILE */
- assert(radv_layout_has_htile(image, layout, queue_mask) ==
- radv_layout_is_htile_compressed(image, layout, queue_mask));
-
- radv_emit_fb_ds_state(cmd_buffer, &att->ds, image, layout);
-
- if (att->ds.offset_scale != cmd_buffer->state.offset_scale) {
+ bool in_render_loop = subpass->depth_stencil_attachment->in_render_loop;
+ struct radv_image_view *iview = cmd_buffer->state.attachments[idx].iview;
+ radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, cmd_buffer->state.attachments[idx].iview->bo);
+
+ radv_emit_fb_ds_state(cmd_buffer, &cmd_buffer->state.attachments[idx].ds, iview, layout, in_render_loop);
+
+ if (cmd_buffer->state.attachments[idx].ds.offset_scale != cmd_buffer->state.offset_scale) {
cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS;
- cmd_buffer->state.offset_scale = att->ds.offset_scale;
+ cmd_buffer->state.offset_scale = cmd_buffer->state.attachments[idx].ds.offset_scale;
}
- radv_load_ds_clear_metadata(cmd_buffer, image);
+ radv_load_ds_clear_metadata(cmd_buffer, iview);
} else {
if (cmd_buffer->device->physical_device->rad_info.chip_class == GFX9)
radeon_set_context_reg_seq(cmd_buffer->cs, R_028038_DB_Z_INFO, 2);
if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX8) {
bool disable_constant_encode =
- cmd_buffer->device->physical_device->has_dcc_constant_encode;
+ cmd_buffer->device->physical_device->rad_info.has_dcc_constant_encode;
enum chip_class chip_class =
cmd_buffer->device->physical_device->rad_info.chip_class;
uint8_t watermark = chip_class >= GFX10 ? 6 : 4;
S_028424_DISABLE_CONSTANT_ENCODE_REG(disable_constant_encode));
}
- if (cmd_buffer->device->pbb_allowed) {
+ if (cmd_buffer->device->dfsm_allowed) {
radeon_emit(cmd_buffer->cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
radeon_emit(cmd_buffer->cs, EVENT_TYPE(V_028A90_BREAK_BATCH) | EVENT_INDEX(0));
}
}
static void
-radv_emit_index_buffer(struct radv_cmd_buffer *cmd_buffer)
+radv_emit_index_buffer(struct radv_cmd_buffer *cmd_buffer, bool indirect)
{
struct radeon_cmdbuf *cs = cmd_buffer->cs;
struct radv_cmd_state *state = &cmd_buffer->state;
state->last_index_type = state->index_type;
}
+ /* For the direct indexed draws we use DRAW_INDEX_2, which includes
+ * the index_va and max_index_count already. */
+ if (!indirect)
+ return;
+
radeon_emit(cs, PKT3(PKT3_INDEX_BASE, 1, 0));
radeon_emit(cs, state->index_va);
radeon_emit(cs, state->index_va >> 32);
radv_emit_viewport(cmd_buffer);
if (states & (RADV_CMD_DIRTY_DYNAMIC_SCISSOR | RADV_CMD_DIRTY_DYNAMIC_VIEWPORT) &&
- !cmd_buffer->device->physical_device->has_scissor_bug)
+ !cmd_buffer->device->physical_device->rad_info.has_gfx9_scissor_bug)
radv_emit_scissor(cmd_buffer);
if (states & RADV_CMD_DIRTY_DYNAMIC_LINE_WIDTH)
if (states & RADV_CMD_DIRTY_DYNAMIC_SAMPLE_LOCATIONS)
radv_emit_sample_locations(cmd_buffer);
+ if (states & RADV_CMD_DIRTY_DYNAMIC_LINE_STIPPLE)
+ radv_emit_line_stipple(cmd_buffer);
+
cmd_buffer->state.dirty &= ~states;
}
if (flush_indirect_descriptors)
radv_flush_indirect_descriptor_sets(cmd_buffer, bind_point);
- MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws,
+ ASSERTED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws,
cmd_buffer->cs,
MAX_SETS * MESA_SHADER_STAGES * 4);
return;
radv_foreach_stage(stage, stages) {
- if (!pipeline->shaders[stage])
+ shader = radv_get_shader(pipeline, stage);
+ if (!shader)
continue;
- need_push_constants |= pipeline->shaders[stage]->info.info.loads_push_constants;
- need_push_constants |= pipeline->shaders[stage]->info.info.loads_dynamic_offsets;
+ need_push_constants |= shader->info.loads_push_constants;
+ need_push_constants |= shader->info.loads_dynamic_offsets;
- uint8_t base = pipeline->shaders[stage]->info.info.base_inline_push_consts;
- uint8_t count = pipeline->shaders[stage]->info.info.num_inline_push_consts;
+ uint8_t base = shader->info.base_inline_push_consts;
+ uint8_t count = shader->info.num_inline_push_consts;
radv_emit_inline_push_consts(cmd_buffer, pipeline, stage,
AC_UD_INLINE_PUSH_CONSTANTS,
va = radv_buffer_get_va(cmd_buffer->upload.upload_bo);
va += offset;
- MAYBE_UNUSED unsigned cdw_max =
+ ASSERTED unsigned cdw_max =
radeon_check_space(cmd_buffer->device->ws,
cmd_buffer->cs, MESA_SHADER_STAGES * 4);
if ((pipeline_is_dirty ||
(cmd_buffer->state.dirty & RADV_CMD_DIRTY_VERTEX_BUFFER)) &&
cmd_buffer->state.pipeline->num_vertex_bindings &&
- radv_get_shader(cmd_buffer->state.pipeline, MESA_SHADER_VERTEX)->info.info.vs.has_vertex_buffers) {
- struct radv_vertex_elements_info *velems = &cmd_buffer->state.pipeline->vertex_elements;
+ radv_get_shader(cmd_buffer->state.pipeline, MESA_SHADER_VERTEX)->info.vs.has_vertex_buffers) {
unsigned vb_offset;
void *vb_ptr;
uint32_t i = 0;
uint32_t offset;
struct radv_buffer *buffer = cmd_buffer->vertex_bindings[i].buffer;
uint32_t stride = cmd_buffer->state.pipeline->binding_stride[i];
+ unsigned num_records;
if (!buffer)
continue;
offset = cmd_buffer->vertex_bindings[i].offset;
va += offset + buffer->offset;
+
+ num_records = buffer->size - offset;
+ if (cmd_buffer->device->physical_device->rad_info.chip_class != GFX8 && stride)
+ num_records /= stride;
+
desc[0] = va;
desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) | S_008F04_STRIDE(stride);
- if (cmd_buffer->device->physical_device->rad_info.chip_class <= GFX7 && stride)
- desc[2] = (buffer->size - offset - velems->format_size[i]) / stride + 1;
- else
- desc[2] = buffer->size - offset;
+ desc[2] = num_records;
desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W);
if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX10) {
+ /* OOB_SELECT chooses the out-of-bounds check:
+ * - 1: index >= NUM_RECORDS (Structured)
+ * - 3: offset >= NUM_RECORDS (Raw)
+ */
+ int oob_select = stride ? V_008F0C_OOB_SELECT_STRUCTURED : V_008F0C_OOB_SELECT_RAW;
+
desc[3] |= S_008F0C_FORMAT(V_008F0C_IMG_FORMAT_32_UINT) |
- S_008F0C_OOB_SELECT(1) |
+ S_008F0C_OOB_SELECT(oob_select) |
S_008F0C_RESOURCE_LEVEL(1);
} else {
desc[3] |= S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_UINT) |
* the buffer will be considered not bound and store
* instructions will be no-ops.
*/
+ uint32_t size = 0xffffffff;
+
+ /* Compute the correct buffer size for NGG streamout
+ * because it's used to determine the max emit per
+ * buffer.
+ */
+ if (cmd_buffer->device->physical_device->use_ngg_streamout)
+ size = buffer->size - sb[i].offset;
+
desc[0] = va;
desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32);
- desc[2] = 0xffffffff;
+ desc[2] = size;
desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
- S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
- S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
+ S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W);
+
+ if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX10) {
+ desc[3] |= S_008F0C_FORMAT(V_008F0C_IMG_FORMAT_32_FLOAT) |
+ S_008F0C_OOB_SELECT(V_008F0C_OOB_SELECT_RAW) |
+ S_008F0C_RESOURCE_LEVEL(1);
+ } else {
+ desc[3] |= S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
+ }
}
va = radv_buffer_get_va(cmd_buffer->upload.upload_bo);
cmd_buffer->state.dirty &= ~RADV_CMD_DIRTY_STREAMOUT_BUFFER;
}
+static void
+radv_flush_ngg_gs_state(struct radv_cmd_buffer *cmd_buffer)
+{
+ struct radv_pipeline *pipeline = cmd_buffer->state.pipeline;
+ struct radv_userdata_info *loc;
+ uint32_t ngg_gs_state = 0;
+ uint32_t base_reg;
+
+ if (!radv_pipeline_has_gs(pipeline) ||
+ !radv_pipeline_has_ngg(pipeline))
+ return;
+
+ /* By default NGG GS queries are disabled but they are enabled if the
+ * command buffer has active GDS queries or if it's a secondary command
+ * buffer that inherits the number of generated primitives.
+ */
+ if (cmd_buffer->state.active_pipeline_gds_queries ||
+ (cmd_buffer->state.inherited_pipeline_statistics & VK_QUERY_PIPELINE_STATISTIC_GEOMETRY_SHADER_PRIMITIVES_BIT))
+ ngg_gs_state = 1;
+
+ loc = radv_lookup_user_sgpr(pipeline, MESA_SHADER_GEOMETRY,
+ AC_UD_NGG_GS_STATE);
+ base_reg = pipeline->user_data_0[MESA_SHADER_GEOMETRY];
+ assert(loc->sgpr_idx != -1);
+
+ radeon_set_sh_reg(cmd_buffer->cs, base_reg + loc->sgpr_idx * 4,
+ ngg_gs_state);
+}
+
static void
radv_upload_graphics_shader_descriptors(struct radv_cmd_buffer *cmd_buffer, bool pipeline_is_dirty)
{
radv_flush_streamout_descriptors(cmd_buffer);
radv_flush_descriptors(cmd_buffer, VK_SHADER_STAGE_ALL_GRAPHICS);
radv_flush_constants(cmd_buffer, VK_SHADER_STAGE_ALL_GRAPHICS);
+ radv_flush_ngg_gs_state(cmd_buffer);
}
struct radv_draw_info {
uint64_t strmout_buffer_offset;
};
+static uint32_t
+radv_get_primitive_reset_index(struct radv_cmd_buffer *cmd_buffer)
+{
+ switch (cmd_buffer->state.index_type) {
+ case V_028A7C_VGT_INDEX_8:
+ return 0xffu;
+ case V_028A7C_VGT_INDEX_16:
+ return 0xffffu;
+ case V_028A7C_VGT_INDEX_32:
+ return 0xffffffffu;
+ default:
+ unreachable("invalid index type");
+ }
+}
+
static void
si_emit_ia_multi_vgt_param(struct radv_cmd_buffer *cmd_buffer,
bool instanced_draw, bool indirect_draw,
if (primitive_reset_en) {
uint32_t primitive_reset_index =
- state->index_type ? 0xffffffffu : 0xffffu;
+ radv_get_primitive_reset_index(cmd_buffer);
if (primitive_reset_index != state->last_primitive_reset_index) {
radeon_set_context_reg(cs,
break;
case VK_ACCESS_SHADER_READ_BIT:
flush_bits |= RADV_CMD_FLAG_INV_VCACHE;
+ /* Unlike LLVM, ACO uses SMEM for SSBOs and we have to
+ * invalidate the scalar cache. */
+ if (!cmd_buffer->device->physical_device->use_llvm)
+ flush_bits |= RADV_CMD_FLAG_INV_SCACHE;
if (!image_is_coherent)
flush_bits |= RADV_CMD_FLAG_INV_L2;
{
struct radv_cmd_state *state = &cmd_buffer->state;
uint32_t subpass_id = radv_get_subpass_id(cmd_buffer);
- struct radv_image_view *view = state->framebuffer->attachments[att_idx].attachment;
+ struct radv_image_view *view = state->attachments[att_idx].iview;
if (view->image->info.samples == 1)
return NULL;
bool begin_subpass)
{
unsigned idx = att.attachment;
- struct radv_image_view *view = cmd_buffer->state.framebuffer->attachments[idx].attachment;
+ struct radv_image_view *view = cmd_buffer->state.attachments[idx].iview;
struct radv_sample_locations_state *sample_locs;
VkImageSubresourceRange range;
- range.aspectMask = 0;
+ range.aspectMask = view->aspect_mask;
range.baseMipLevel = view->base_mip;
range.levelCount = 1;
range.baseArrayLayer = view->base_layer;
sample_locs = radv_get_attachment_sample_locations(cmd_buffer, idx,
begin_subpass);
- radv_handle_image_transition(cmd_buffer,
- view->image,
- cmd_buffer->state.attachments[idx].current_layout,
- att.layout, 0, 0, &range, sample_locs);
+ /* Determine if the subpass uses separate depth/stencil layouts. */
+ bool uses_separate_depth_stencil_layouts = false;
+ if ((cmd_buffer->state.attachments[idx].current_layout !=
+ cmd_buffer->state.attachments[idx].current_stencil_layout) ||
+ (att.layout != att.stencil_layout)) {
+ uses_separate_depth_stencil_layouts = true;
+ }
+
+ /* For separate layouts, perform depth and stencil transitions
+ * separately.
+ */
+ if (uses_separate_depth_stencil_layouts &&
+ (range.aspectMask == (VK_IMAGE_ASPECT_DEPTH_BIT |
+ VK_IMAGE_ASPECT_STENCIL_BIT))) {
+ /* Depth-only transitions. */
+ range.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
+ radv_handle_image_transition(cmd_buffer,
+ view->image,
+ cmd_buffer->state.attachments[idx].current_layout,
+ cmd_buffer->state.attachments[idx].current_in_render_loop,
+ att.layout, att.in_render_loop,
+ 0, 0, &range, sample_locs);
+
+ /* Stencil-only transitions. */
+ range.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
+ radv_handle_image_transition(cmd_buffer,
+ view->image,
+ cmd_buffer->state.attachments[idx].current_stencil_layout,
+ cmd_buffer->state.attachments[idx].current_in_render_loop,
+ att.stencil_layout, att.in_render_loop,
+ 0, 0, &range, sample_locs);
+ } else {
+ radv_handle_image_transition(cmd_buffer,
+ view->image,
+ cmd_buffer->state.attachments[idx].current_layout,
+ cmd_buffer->state.attachments[idx].current_in_render_loop,
+ att.layout, att.in_render_loop,
+ 0, 0, &range, sample_locs);
+ }
cmd_buffer->state.attachments[idx].current_layout = att.layout;
+ cmd_buffer->state.attachments[idx].current_stencil_layout = att.stencil_layout;
+ cmd_buffer->state.attachments[idx].current_in_render_loop = att.in_render_loop;
}
vk_find_struct_const(info->pNext,
RENDER_PASS_SAMPLE_LOCATIONS_BEGIN_INFO_EXT);
struct radv_cmd_state *state = &cmd_buffer->state;
- struct radv_framebuffer *framebuffer = state->framebuffer;
if (!sample_locs) {
state->subpass_sample_locs = NULL;
const VkAttachmentSampleLocationsEXT *att_sample_locs =
&sample_locs->pAttachmentInitialSampleLocations[i];
uint32_t att_idx = att_sample_locs->attachmentIndex;
- struct radv_attachment_info *att = &framebuffer->attachments[att_idx];
- struct radv_image *image = att->attachment->image;
+ struct radv_image *image = cmd_buffer->state.attachments[att_idx].iview->image;
assert(vk_format_is_depth_or_stencil(image->vk_format));
const VkRenderPassBeginInfo *info)
{
struct radv_cmd_state *state = &cmd_buffer->state;
+ const struct VkRenderPassAttachmentBeginInfo *attachment_info = NULL;
+
+ if (info) {
+ attachment_info = vk_find_struct_const(info->pNext,
+ RENDER_PASS_ATTACHMENT_BEGIN_INFO);
+ }
+
if (pass->attachment_count == 0) {
state->attachments = NULL;
}
state->attachments[i].current_layout = att->initial_layout;
+ state->attachments[i].current_stencil_layout = att->stencil_initial_layout;
state->attachments[i].sample_location.count = 0;
+
+ struct radv_image_view *iview;
+ if (attachment_info && attachment_info->attachmentCount > i) {
+ iview = radv_image_view_from_handle(attachment_info->pAttachments[i]);
+ } else {
+ iview = state->framebuffer->attachments[i];
+ }
+
+ state->attachments[i].iview = iview;
+ if (iview->aspect_mask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) {
+ radv_initialise_ds_surface(cmd_buffer->device, &state->attachments[i].ds, iview);
+ } else {
+ radv_initialise_color_surface(cmd_buffer->device, &state->attachments[i].cb, iview);
+ }
}
return VK_SUCCESS;
for (i = 0; i < pAllocateInfo->commandBufferCount; i++) {
- if (!list_empty(&pool->free_cmd_buffers)) {
+ if (!list_is_empty(&pool->free_cmd_buffers)) {
struct radv_cmd_buffer *cmd_buffer = list_first_entry(&pool->free_cmd_buffers, struct radv_cmd_buffer, pool_link);
list_del(&cmd_buffer->pool_link);
list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
result = radv_reset_cmd_buffer(cmd_buffer);
- cmd_buffer->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
cmd_buffer->level = pAllocateInfo->level;
pCommandBuffers[i] = radv_cmd_buffer_to_handle(cmd_buffer);
cmd_buffer->state.last_vertex_offset = -1;
cmd_buffer->state.last_first_instance = -1;
cmd_buffer->state.predication_type = -1;
+ cmd_buffer->state.last_sx_ps_downconvert = -1;
+ cmd_buffer->state.last_sx_blend_opt_epsilon = -1;
+ cmd_buffer->state.last_sx_blend_opt_control = -1;
cmd_buffer->usage_flags = pBeginInfo->flags;
if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY &&
struct radv_subpass *subpass =
&cmd_buffer->state.pass->subpasses[pBeginInfo->pInheritanceInfo->subpass];
- result = radv_cmd_state_setup_attachments(cmd_buffer, cmd_buffer->state.pass, NULL);
- if (result != VK_SUCCESS)
- return result;
+ if (cmd_buffer->state.framebuffer) {
+ result = radv_cmd_state_setup_attachments(cmd_buffer, cmd_buffer->state.pass, NULL);
+ if (result != VK_SUCCESS)
+ return result;
+ }
+
+ cmd_buffer->state.inherited_pipeline_statistics =
+ pBeginInfo->pInheritanceInfo->pipelineStatistics;
radv_cmd_buffer_set_subpass(cmd_buffer, subpass);
}
- if (unlikely(cmd_buffer->device->trace_bo)) {
- struct radv_device *device = cmd_buffer->device;
-
- radv_cs_add_buffer(device->ws, cmd_buffer->cs,
- device->trace_bo);
-
+ if (unlikely(cmd_buffer->device->trace_bo))
radv_cmd_buffer_trace_emit(cmd_buffer);
- }
+
+ radv_describe_begin_cmd_buffer(cmd_buffer);
cmd_buffer->status = RADV_CMD_BUFFER_STATUS_RECORDING;
assert(firstBinding + bindingCount <= MAX_VBS);
for (uint32_t i = 0; i < bindingCount; i++) {
+ RADV_FROM_HANDLE(radv_buffer, buffer, pBuffers[i]);
uint32_t idx = firstBinding + i;
if (!changed &&
- (vb[idx].buffer != radv_buffer_from_handle(pBuffers[i]) ||
+ (vb[idx].buffer != buffer ||
vb[idx].offset != pOffsets[i])) {
changed = true;
}
- vb[idx].buffer = radv_buffer_from_handle(pBuffers[i]);
+ vb[idx].buffer = buffer;
vb[idx].offset = pOffsets[i];
- radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs,
- vb[idx].buffer->bo);
+ if (buffer) {
+ radv_cs_add_buffer(cmd_buffer->device->ws,
+ cmd_buffer->cs, vb[idx].buffer->bo);
+ }
}
if (!changed) {
cmd_buffer->state.dirty |= RADV_CMD_DIRTY_VERTEX_BUFFER;
}
+static uint32_t
+vk_to_index_type(VkIndexType type)
+{
+ switch (type) {
+ case VK_INDEX_TYPE_UINT8_EXT:
+ return V_028A7C_VGT_INDEX_8;
+ case VK_INDEX_TYPE_UINT16:
+ return V_028A7C_VGT_INDEX_16;
+ case VK_INDEX_TYPE_UINT32:
+ return V_028A7C_VGT_INDEX_32;
+ default:
+ unreachable("invalid index type");
+ }
+}
+
+static uint32_t
+radv_get_vgt_index_size(uint32_t type)
+{
+ switch (type) {
+ case V_028A7C_VGT_INDEX_8:
+ return 1;
+ case V_028A7C_VGT_INDEX_16:
+ return 2;
+ case V_028A7C_VGT_INDEX_32:
+ return 4;
+ default:
+ unreachable("invalid index type");
+ }
+}
+
void radv_CmdBindIndexBuffer(
VkCommandBuffer commandBuffer,
VkBuffer buffer,
cmd_buffer->state.index_buffer = index_buffer;
cmd_buffer->state.index_offset = offset;
- cmd_buffer->state.index_type = indexType; /* vk matches hw */
+ cmd_buffer->state.index_type = vk_to_index_type(indexType);
cmd_buffer->state.index_va = radv_buffer_get_va(index_buffer->bo);
cmd_buffer->state.index_va += index_buffer->offset + offset;
- int index_size_shift = cmd_buffer->state.index_type ? 2 : 1;
- cmd_buffer->state.max_index_count = (index_buffer->size - offset) >> index_size_shift;
+ int index_size = radv_get_vgt_index_size(vk_to_index_type(indexType));
+ cmd_buffer->state.max_index_count = (index_buffer->size - offset) / index_size;
cmd_buffer->state.dirty |= RADV_CMD_DIRTY_INDEX_BUFFER;
radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, index_buffer->bo);
}
assert(!(set->layout->flags & VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR));
if (!cmd_buffer->device->use_global_bo_list) {
- for (unsigned j = 0; j < set->layout->buffer_count; ++j)
+ for (unsigned j = 0; j < set->buffer_count; ++j)
if (set->descriptors[j])
radv_cs_add_buffer(ws, cmd_buffer->cs, set->descriptors[j]);
}
for (unsigned i = 0; i < descriptorSetCount; ++i) {
unsigned idx = i + firstSet;
RADV_FROM_HANDLE(radv_descriptor_set, set, pDescriptorSets[i]);
- radv_bind_descriptor_set(cmd_buffer, pipelineBindPoint, set, idx);
+
+ /* If the set is already bound we only need to update the
+ * (potentially changed) dynamic offsets. */
+ if (descriptors_state->sets[idx] != set ||
+ !(descriptors_state->valid & (1u << idx))) {
+ radv_bind_descriptor_set(cmd_buffer, pipelineBindPoint, set, idx);
+ }
for(unsigned j = 0; j < set->layout->dynamic_offset_count; ++j, ++dyn_idx) {
unsigned idx = j + layout->set[i + firstSet].dynamic_offset_start;
if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX10) {
dst[3] |= S_008F0C_FORMAT(V_008F0C_IMG_FORMAT_32_FLOAT) |
- S_008F0C_OOB_SELECT(3) |
+ S_008F0C_OOB_SELECT(V_008F0C_OOB_SELECT_RAW) |
S_008F0C_RESOURCE_LEVEL(1);
} else {
dst[3] |= S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
* because it is invalid, according to Vulkan spec.
*/
for (int i = 0; i < descriptorWriteCount; i++) {
- MAYBE_UNUSED const VkWriteDescriptorSet *writeset = &pDescriptorWrites[i];
+ ASSERTED const VkWriteDescriptorSet *writeset = &pDescriptorWrites[i];
assert(writeset->descriptorType != VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT);
}
*/
cmd_buffer->state.flush_bits |= cmd_buffer->active_query_flush_bits;
+ /* Since NGG streamout uses GDS, we need to make GDS idle when
+ * we leave the IB, otherwise another process might overwrite
+ * it while our shaders are busy.
+ */
+ if (cmd_buffer->gds_needed)
+ cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_PS_PARTIAL_FLUSH;
+
si_emit_cache_flush(cmd_buffer);
}
*/
si_cp_dma_wait_for_idle(cmd_buffer);
+ radv_describe_end_cmd_buffer(cmd_buffer);
+
vk_free(&cmd_buffer->pool->alloc, cmd_buffer->state.attachments);
vk_free(&cmd_buffer->pool->alloc, cmd_buffer->state.subpass_sample_locs);
- if (!cmd_buffer->device->ws->cs_finalize(cmd_buffer->cs))
- return vk_error(cmd_buffer->device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
+ VkResult result = cmd_buffer->device->ws->cs_finalize(cmd_buffer->cs);
+ if (result != VK_SUCCESS)
+ return vk_error(cmd_buffer->device->instance, result);
cmd_buffer->status = RADV_CMD_BUFFER_STATUS_EXECUTABLE;
radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, pipeline->cs.cdw);
radeon_emit_array(cmd_buffer->cs, pipeline->cs.buf, pipeline->cs.cdw);
- cmd_buffer->compute_scratch_size_needed =
- MAX2(cmd_buffer->compute_scratch_size_needed,
- pipeline->max_waves * pipeline->scratch_bytes_per_wave);
+ cmd_buffer->compute_scratch_size_per_wave_needed = MAX2(cmd_buffer->compute_scratch_size_per_wave_needed,
+ pipeline->scratch_bytes_per_wave);
+ cmd_buffer->compute_scratch_waves_wanted = MAX2(cmd_buffer->compute_scratch_waves_wanted,
+ pipeline->max_waves);
radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs,
pipeline->shaders[MESA_SHADER_COMPUTE]->bo);
/* Prefetch all pipeline shaders at first draw time. */
cmd_buffer->state.prefetch_L2_mask |= RADV_PREFETCH_SHADERS;
+ if (cmd_buffer->device->physical_device->rad_info.chip_class == GFX10 &&
+ cmd_buffer->state.emitted_pipeline &&
+ radv_pipeline_has_ngg(cmd_buffer->state.emitted_pipeline) &&
+ !radv_pipeline_has_ngg(cmd_buffer->state.pipeline)) {
+ /* Transitioning from NGG to legacy GS requires
+ * VGT_FLUSH on Navi10-14. VGT_FLUSH is also emitted
+ * at the beginning of IBs when legacy GS ring pointers
+ * are set.
+ */
+ cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_VGT_FLUSH;
+ }
+
radv_bind_dynamic_state(cmd_buffer, &pipeline->dynamic_state);
radv_bind_streamout_state(cmd_buffer, pipeline);
{
RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
struct radv_cmd_state *state = &cmd_buffer->state;
- MAYBE_UNUSED const uint32_t total_count = firstViewport + viewportCount;
+ ASSERTED const uint32_t total_count = firstViewport + viewportCount;
assert(firstViewport < MAX_VIEWPORTS);
assert(total_count >= 1 && total_count <= MAX_VIEWPORTS);
{
RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
struct radv_cmd_state *state = &cmd_buffer->state;
- MAYBE_UNUSED const uint32_t total_count = firstScissor + scissorCount;
+ ASSERTED const uint32_t total_count = firstScissor + scissorCount;
assert(firstScissor < MAX_SCISSORS);
assert(total_count >= 1 && total_count <= MAX_SCISSORS);
{
RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
struct radv_cmd_state *state = &cmd_buffer->state;
- MAYBE_UNUSED const uint32_t total_count = firstDiscardRectangle + discardRectangleCount;
+ ASSERTED const uint32_t total_count = firstDiscardRectangle + discardRectangleCount;
assert(firstDiscardRectangle < MAX_DISCARD_RECTANGLES);
assert(total_count >= 1 && total_count <= MAX_DISCARD_RECTANGLES);
state->dirty |= RADV_CMD_DIRTY_DYNAMIC_SAMPLE_LOCATIONS;
}
+void radv_CmdSetLineStippleEXT(
+ VkCommandBuffer commandBuffer,
+ uint32_t lineStippleFactor,
+ uint16_t lineStipplePattern)
+{
+ RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ struct radv_cmd_state *state = &cmd_buffer->state;
+
+ state->dynamic.line_stipple.factor = lineStippleFactor;
+ state->dynamic.line_stipple.pattern = lineStipplePattern;
+
+ state->dirty |= RADV_CMD_DIRTY_DYNAMIC_LINE_STIPPLE;
+}
+
void radv_CmdExecuteCommands(
VkCommandBuffer commandBuffer,
uint32_t commandBufferCount,
for (uint32_t i = 0; i < commandBufferCount; i++) {
RADV_FROM_HANDLE(radv_cmd_buffer, secondary, pCmdBuffers[i]);
- primary->scratch_size_needed = MAX2(primary->scratch_size_needed,
- secondary->scratch_size_needed);
- primary->compute_scratch_size_needed = MAX2(primary->compute_scratch_size_needed,
- secondary->compute_scratch_size_needed);
+ primary->scratch_size_per_wave_needed = MAX2(primary->scratch_size_per_wave_needed,
+ secondary->scratch_size_per_wave_needed);
+ primary->scratch_waves_wanted = MAX2(primary->scratch_waves_wanted,
+ secondary->scratch_waves_wanted);
+ primary->compute_scratch_size_per_wave_needed = MAX2(primary->compute_scratch_size_per_wave_needed,
+ secondary->compute_scratch_size_per_wave_needed);
+ primary->compute_scratch_waves_wanted = MAX2(primary->compute_scratch_waves_wanted,
+ secondary->compute_scratch_waves_wanted);
if (secondary->esgs_ring_size_needed > primary->esgs_ring_size_needed)
primary->esgs_ring_size_needed = secondary->esgs_ring_size_needed;
primary->tess_rings_needed = true;
if (secondary->sample_positions_needed)
primary->sample_positions_needed = true;
+ if (secondary->gds_needed)
+ primary->gds_needed = true;
if (!secondary->state.framebuffer &&
(primary->state.dirty & RADV_CMD_DIRTY_FRAMEBUFFER)) {
primary->state.last_first_instance = secondary->state.last_first_instance;
primary->state.last_num_instances = secondary->state.last_num_instances;
primary->state.last_vertex_offset = secondary->state.last_vertex_offset;
+ primary->state.last_sx_ps_downconvert = secondary->state.last_sx_ps_downconvert;
+ primary->state.last_sx_blend_opt_epsilon = secondary->state.last_sx_blend_opt_epsilon;
+ primary->state.last_sx_blend_opt_control = secondary->state.last_sx_blend_opt_control;
if (secondary->state.last_index_type != -1) {
primary->state.last_index_type =
RADV_FROM_HANDLE(radv_device, device, _device);
struct radv_cmd_pool *pool;
- pool = vk_alloc2(&device->alloc, pAllocator, sizeof(*pool), 8,
+ pool = vk_alloc2(&device->vk.alloc, pAllocator, sizeof(*pool), 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (pool == NULL)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
+ vk_object_base_init(&device->vk, &pool->base,
+ VK_OBJECT_TYPE_COMMAND_POOL);
+
if (pAllocator)
pool->alloc = *pAllocator;
else
- pool->alloc = device->alloc;
+ pool->alloc = device->vk.alloc;
list_inithead(&pool->cmd_buffers);
list_inithead(&pool->free_cmd_buffers);
radv_cmd_buffer_destroy(cmd_buffer);
}
- vk_free2(&device->alloc, pAllocator, pool);
+ vk_object_base_finish(&pool->base);
+ vk_free2(&device->vk.alloc, pAllocator, pool);
}
VkResult radv_ResetCommandPool(
struct radv_cmd_state *state = &cmd_buffer->state;
struct radv_subpass *subpass = &state->pass->subpasses[subpass_id];
- MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws,
+ ASSERTED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws,
cmd_buffer->cs, 4096);
radv_subpass_barrier(cmd_buffer, &subpass->start_barrier);
radv_cmd_buffer_set_subpass(cmd_buffer, subpass);
+ radv_describe_barrier_start(cmd_buffer, RGP_BARRIER_EXTERNAL_RENDER_PASS_SYNC);
+
for (uint32_t i = 0; i < subpass->attachment_count; ++i) {
const uint32_t a = subpass->attachments[i].attachment;
if (a == VK_ATTACHMENT_UNUSED)
true);
}
+ radv_describe_barrier_end(cmd_buffer);
+
radv_cmd_buffer_clear_subpass(cmd_buffer);
assert(cmd_buffer->cs->cdw <= cdw_max);
radv_cmd_buffer_resolve_subpass(cmd_buffer);
+ radv_describe_barrier_start(cmd_buffer, RGP_BARRIER_EXTERNAL_RENDER_PASS_SYNC);
+
for (uint32_t i = 0; i < subpass->attachment_count; ++i) {
const uint32_t a = subpass->attachments[i].attachment;
if (a == VK_ATTACHMENT_UNUSED)
continue;
VkImageLayout layout = state->pass->attachments[a].final_layout;
- struct radv_subpass_attachment att = { a, layout };
+ VkImageLayout stencil_layout = state->pass->attachments[a].stencil_final_layout;
+ struct radv_subpass_attachment att = { a, layout, stencil_layout };
radv_handle_subpass_image_transition(cmd_buffer, att, false);
}
+
+ radv_describe_barrier_end(cmd_buffer);
}
-void radv_CmdBeginRenderPass(
- VkCommandBuffer commandBuffer,
- const VkRenderPassBeginInfo* pRenderPassBegin,
- VkSubpassContents contents)
+void
+radv_cmd_buffer_begin_render_pass(struct radv_cmd_buffer *cmd_buffer,
+ const VkRenderPassBeginInfo *pRenderPassBegin)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
RADV_FROM_HANDLE(radv_render_pass, pass, pRenderPassBegin->renderPass);
RADV_FROM_HANDLE(radv_framebuffer, framebuffer, pRenderPassBegin->framebuffer);
VkResult result;
result = radv_cmd_state_setup_sample_locations(cmd_buffer, pass, pRenderPassBegin);
if (result != VK_SUCCESS)
return;
+}
+
+void radv_CmdBeginRenderPass(
+ VkCommandBuffer commandBuffer,
+ const VkRenderPassBeginInfo* pRenderPassBegin,
+ VkSubpassContents contents)
+{
+ RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+
+ radv_cmd_buffer_begin_render_pass(cmd_buffer, pRenderPassBegin);
radv_cmd_buffer_begin_subpass(cmd_buffer, 0);
}
-void radv_CmdBeginRenderPass2KHR(
+void radv_CmdBeginRenderPass2(
VkCommandBuffer commandBuffer,
const VkRenderPassBeginInfo* pRenderPassBeginInfo,
- const VkSubpassBeginInfoKHR* pSubpassBeginInfo)
+ const VkSubpassBeginInfo* pSubpassBeginInfo)
{
radv_CmdBeginRenderPass(commandBuffer, pRenderPassBeginInfo,
pSubpassBeginInfo->contents);
radv_cmd_buffer_begin_subpass(cmd_buffer, prev_subpass + 1);
}
-void radv_CmdNextSubpass2KHR(
+void radv_CmdNextSubpass2(
VkCommandBuffer commandBuffer,
- const VkSubpassBeginInfoKHR* pSubpassBeginInfo,
- const VkSubpassEndInfoKHR* pSubpassEndInfo)
+ const VkSubpassBeginInfo* pSubpassBeginInfo,
+ const VkSubpassEndInfo* pSubpassEndInfo)
{
radv_CmdNextSubpass(commandBuffer, pSubpassBeginInfo->contents);
}
struct radeon_cmdbuf *cs = cmd_buffer->cs;
unsigned di_src_sel = indexed ? V_0287F0_DI_SRC_SEL_DMA
: V_0287F0_DI_SRC_SEL_AUTO_INDEX;
- bool draw_id_enable = radv_get_shader(cmd_buffer->state.pipeline, MESA_SHADER_VERTEX)->info.info.vs.needs_draw_id;
+ bool draw_id_enable = radv_get_shader(cmd_buffer->state.pipeline, MESA_SHADER_VERTEX)->info.vs.needs_draw_id;
uint32_t base_reg = cmd_buffer->state.pipeline->graphics.vtx_base_sgpr;
bool predicating = cmd_buffer->state.predicating;
assert(base_reg);
}
if (info->indexed) {
- int index_size = state->index_type ? 4 : 2;
+ int index_size = radv_get_vgt_index_size(state->index_type);
uint64_t index_va;
+ /* Skip draw calls with 0-sized index buffers. They
+ * cause a hang on some chips, like Navi10-14.
+ */
+ if (!cmd_buffer->state.max_index_count)
+ return;
+
index_va = state->index_va;
index_va += info->first_index * index_size;
{
struct radv_cmd_state *state = &cmd_buffer->state;
- if (!cmd_buffer->device->physical_device->has_scissor_bug)
+ if (!cmd_buffer->device->physical_device->rad_info.has_gfx9_scissor_bug)
return false;
if (cmd_buffer->state.context_roll_without_scissor_emitted || info->strmout_buffer)
if (cmd_buffer->state.dirty & used_states)
return true;
+ uint32_t primitive_reset_index =
+ radv_get_primitive_reset_index(cmd_buffer);
+
if (info->indexed && state->pipeline->graphics.prim_restart_enable &&
- (state->index_type ? 0xffffffffu : 0xffffu) != state->last_primitive_reset_index)
+ primitive_reset_index != state->last_primitive_reset_index)
return true;
return false;
if (info->indexed) {
if (cmd_buffer->state.dirty & RADV_CMD_DIRTY_INDEX_BUFFER)
- radv_emit_index_buffer(cmd_buffer);
+ radv_emit_index_buffer(cmd_buffer, info->indirect);
} else {
/* On GFX7 and later, non-indexed draws overwrite VGT_INDEX_TYPE,
* so the state must be re-emitted before the next indexed
(cmd_buffer->state.dirty & RADV_CMD_DIRTY_PIPELINE) &&
cmd_buffer->state.pipeline != cmd_buffer->state.emitted_pipeline;
- MAYBE_UNUSED unsigned cdw_max =
+ ASSERTED unsigned cdw_max =
radeon_check_space(cmd_buffer->device->ws,
cmd_buffer->cs, 4096);
return;
}
+ radv_describe_draw(cmd_buffer);
+
/* Use optimal packet order based on whether we need to sync the
* pipeline.
*/
radv_draw(cmd_buffer, &info);
}
-void radv_CmdDrawIndirectCountKHR(
+void radv_CmdDrawIndirectCount(
VkCommandBuffer commandBuffer,
VkBuffer _buffer,
VkDeviceSize offset,
radv_draw(cmd_buffer, &info);
}
-void radv_CmdDrawIndexedIndirectCountKHR(
+void radv_CmdDrawIndexedIndirectCount(
VkCommandBuffer commandBuffer,
VkBuffer _buffer,
VkDeviceSize offset,
loc = radv_lookup_user_sgpr(pipeline, MESA_SHADER_COMPUTE,
AC_UD_CS_GRID_SIZE);
- MAYBE_UNUSED unsigned cdw_max = radeon_check_space(ws, cs, 25);
+ ASSERTED unsigned cdw_max = radeon_check_space(ws, cs, 25);
+
+ if (compute_shader->info.wave_size == 32) {
+ assert(cmd_buffer->device->physical_device->rad_info.chip_class >= GFX10);
+ dispatch_initiator |= S_00B800_CS_W32_EN(1);
+ }
if (info->indirect) {
uint64_t va = radv_buffer_get_va(info->indirect->bo);
bool pipeline_is_dirty = pipeline &&
pipeline != cmd_buffer->state.emitted_compute_pipeline;
+ radv_describe_dispatch(cmd_buffer, 8, 8, 8);
+
if (cmd_buffer->state.flush_bits & (RADV_CMD_FLAG_FLUSH_AND_INV_CB |
RADV_CMD_FLAG_FLUSH_AND_INV_DB |
RADV_CMD_FLAG_PS_PARTIAL_FLUSH |
radv_dispatch(cmd_buffer, &info);
}
-void radv_CmdEndRenderPass(
- VkCommandBuffer commandBuffer)
+void
+radv_cmd_buffer_end_render_pass(struct radv_cmd_buffer *cmd_buffer)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
-
- radv_subpass_barrier(cmd_buffer, &cmd_buffer->state.pass->end_barrier);
-
- radv_cmd_buffer_end_subpass(cmd_buffer);
-
vk_free(&cmd_buffer->pool->alloc, cmd_buffer->state.attachments);
vk_free(&cmd_buffer->pool->alloc, cmd_buffer->state.subpass_sample_locs);
cmd_buffer->state.subpass_sample_locs = NULL;
}
-void radv_CmdEndRenderPass2KHR(
+void radv_CmdEndRenderPass(
+ VkCommandBuffer commandBuffer)
+{
+ RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+
+ radv_subpass_barrier(cmd_buffer, &cmd_buffer->state.pass->end_barrier);
+
+ radv_cmd_buffer_end_subpass(cmd_buffer);
+
+ radv_cmd_buffer_end_render_pass(cmd_buffer);
+}
+
+void radv_CmdEndRenderPass2(
VkCommandBuffer commandBuffer,
- const VkSubpassEndInfoKHR* pSubpassEndInfo)
+ const VkSubpassEndInfo* pSubpassEndInfo)
{
radv_CmdEndRenderPass(commandBuffer);
}
*/
static void radv_initialize_htile(struct radv_cmd_buffer *cmd_buffer,
struct radv_image *image,
- const VkImageSubresourceRange *range,
- uint32_t clear_word)
+ const VkImageSubresourceRange *range)
{
assert(range->baseMipLevel == 0);
assert(range->levelCount == 1 || range->levelCount == VK_REMAINING_ARRAY_LAYERS);
VkImageAspectFlags aspects = VK_IMAGE_ASPECT_DEPTH_BIT;
struct radv_cmd_state *state = &cmd_buffer->state;
+ uint32_t htile_value = vk_format_is_stencil(image->vk_format) ? 0xfffff30f : 0xfffc000f;
VkClearDepthStencilValue value = {};
+ struct radv_barrier_data barrier = {};
state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB |
RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
- state->flush_bits |= radv_clear_htile(cmd_buffer, image, range, clear_word);
+ barrier.layout_transitions.init_mask_ram = 1;
+ radv_describe_layout_transition(cmd_buffer, &barrier);
+
+ state->flush_bits |= radv_clear_htile(cmd_buffer, image, range, htile_value);
state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
if (vk_format_is_stencil(image->vk_format))
aspects |= VK_IMAGE_ASPECT_STENCIL_BIT;
- radv_set_ds_clear_metadata(cmd_buffer, image, value, aspects);
+ radv_set_ds_clear_metadata(cmd_buffer, image, range, value, aspects);
if (radv_image_is_tc_compat_htile(image)) {
/* Initialize the TC-compat metada value to 0 because by
* need have to conditionally update its value when performing
* a fast depth clear.
*/
- radv_set_tc_compat_zrange_metadata(cmd_buffer, image, 0);
+ radv_set_tc_compat_zrange_metadata(cmd_buffer, image, range, 0);
}
}
static void radv_handle_depth_image_transition(struct radv_cmd_buffer *cmd_buffer,
struct radv_image *image,
VkImageLayout src_layout,
+ bool src_render_loop,
VkImageLayout dst_layout,
+ bool dst_render_loop,
unsigned src_queue_mask,
unsigned dst_queue_mask,
const VkImageSubresourceRange *range,
return;
if (src_layout == VK_IMAGE_LAYOUT_UNDEFINED) {
- uint32_t clear_value = vk_format_is_stencil(image->vk_format) ? 0xfffff30f : 0xfffc000f;
-
- if (radv_layout_is_htile_compressed(image, dst_layout,
- dst_queue_mask)) {
- clear_value = 0;
- }
-
- radv_initialize_htile(cmd_buffer, image, range, clear_value);
- } else if (!radv_layout_is_htile_compressed(image, src_layout, src_queue_mask) &&
- radv_layout_is_htile_compressed(image, dst_layout, dst_queue_mask)) {
- uint32_t clear_value = vk_format_is_stencil(image->vk_format) ? 0xfffff30f : 0xfffc000f;
- radv_initialize_htile(cmd_buffer, image, range, clear_value);
- } else if (radv_layout_is_htile_compressed(image, src_layout, src_queue_mask) &&
- !radv_layout_is_htile_compressed(image, dst_layout, dst_queue_mask)) {
- VkImageSubresourceRange local_range = *range;
- local_range.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
- local_range.baseMipLevel = 0;
- local_range.levelCount = 1;
-
+ radv_initialize_htile(cmd_buffer, image, range);
+ } else if (!radv_layout_is_htile_compressed(image, src_layout, src_render_loop, src_queue_mask) &&
+ radv_layout_is_htile_compressed(image, dst_layout, dst_render_loop, dst_queue_mask)) {
+ radv_initialize_htile(cmd_buffer, image, range);
+ } else if (radv_layout_is_htile_compressed(image, src_layout, src_render_loop, src_queue_mask) &&
+ !radv_layout_is_htile_compressed(image, dst_layout, dst_render_loop, dst_queue_mask)) {
cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB |
RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
- radv_decompress_depth_image_inplace(cmd_buffer, image,
- &local_range, sample_locs);
+ radv_decompress_depth_stencil(cmd_buffer, image, range,
+ sample_locs);
cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB |
RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
uint32_t value)
{
struct radv_cmd_state *state = &cmd_buffer->state;
+ struct radv_barrier_data barrier = {};
state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB |
RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
+ barrier.layout_transitions.init_mask_ram = 1;
+ radv_describe_layout_transition(cmd_buffer, &barrier);
+
state->flush_bits |= radv_clear_cmask(cmd_buffer, image, range, value);
state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
};
uint32_t log2_samples = util_logbase2(image->info.samples);
uint32_t value = fmask_clear_values[log2_samples];
+ struct radv_barrier_data barrier = {};
state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB |
RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
+ barrier.layout_transitions.init_mask_ram = 1;
+ radv_describe_layout_transition(cmd_buffer, &barrier);
+
state->flush_bits |= radv_clear_fmask(cmd_buffer, image, range, value);
state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
const VkImageSubresourceRange *range, uint32_t value)
{
struct radv_cmd_state *state = &cmd_buffer->state;
+ struct radv_barrier_data barrier = {};
unsigned size = 0;
state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB |
RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
+ barrier.layout_transitions.init_mask_ram = 1;
+ radv_describe_layout_transition(cmd_buffer, &barrier);
+
state->flush_bits |= radv_clear_dcc(cmd_buffer, image, range, value);
if (cmd_buffer->device->physical_device->rad_info.chip_class == GFX8) {
if (size != image->planes[0].surface.dcc_size) {
state->flush_bits |=
radv_fill_buffer(cmd_buffer, image->bo,
- image->offset + image->dcc_offset + size,
+ image->offset + image->planes[0].surface.dcc_offset + size,
image->planes[0].surface.dcc_size - size,
0xffffffff);
}
static void radv_init_color_image_metadata(struct radv_cmd_buffer *cmd_buffer,
struct radv_image *image,
VkImageLayout src_layout,
+ bool src_render_loop,
VkImageLayout dst_layout,
+ bool dst_render_loop,
unsigned src_queue_mask,
unsigned dst_queue_mask,
const VkImageSubresourceRange *range)
uint32_t value = 0xffffffffu; /* Fully expanded mode. */
bool need_decompress_pass = false;
- if (radv_layout_dcc_compressed(image, dst_layout,
+ if (radv_layout_dcc_compressed(cmd_buffer->device, image, dst_layout,
+ dst_render_loop,
dst_queue_mask)) {
value = 0x20202020u;
need_decompress_pass = true;
static void radv_handle_color_image_transition(struct radv_cmd_buffer *cmd_buffer,
struct radv_image *image,
VkImageLayout src_layout,
+ bool src_render_loop,
VkImageLayout dst_layout,
+ bool dst_render_loop,
unsigned src_queue_mask,
unsigned dst_queue_mask,
const VkImageSubresourceRange *range)
{
if (src_layout == VK_IMAGE_LAYOUT_UNDEFINED) {
radv_init_color_image_metadata(cmd_buffer, image,
- src_layout, dst_layout,
+ src_layout, src_render_loop,
+ dst_layout, dst_render_loop,
src_queue_mask, dst_queue_mask,
range);
return;
if (radv_dcc_enabled(image, range->baseMipLevel)) {
if (src_layout == VK_IMAGE_LAYOUT_PREINITIALIZED) {
radv_initialize_dcc(cmd_buffer, image, range, 0xffffffffu);
- } else if (radv_layout_dcc_compressed(image, src_layout, src_queue_mask) &&
- !radv_layout_dcc_compressed(image, dst_layout, dst_queue_mask)) {
+ } else if (radv_layout_dcc_compressed(cmd_buffer->device, image, src_layout, src_render_loop, src_queue_mask) &&
+ !radv_layout_dcc_compressed(cmd_buffer->device, image, dst_layout, dst_render_loop, dst_queue_mask)) {
radv_decompress_dcc(cmd_buffer, image, range);
- } else if (radv_layout_can_fast_clear(image, src_layout, src_queue_mask) &&
- !radv_layout_can_fast_clear(image, dst_layout, dst_queue_mask)) {
+ } else if (radv_layout_can_fast_clear(image, src_layout, src_render_loop, src_queue_mask) &&
+ !radv_layout_can_fast_clear(image, dst_layout, dst_render_loop, dst_queue_mask)) {
radv_fast_clear_flush_image_inplace(cmd_buffer, image, range);
}
} else if (radv_image_has_cmask(image) || radv_image_has_fmask(image)) {
bool fce_eliminate = false, fmask_expand = false;
- if (radv_layout_can_fast_clear(image, src_layout, src_queue_mask) &&
- !radv_layout_can_fast_clear(image, dst_layout, dst_queue_mask)) {
+ if (radv_layout_can_fast_clear(image, src_layout, src_render_loop, src_queue_mask) &&
+ !radv_layout_can_fast_clear(image, dst_layout, dst_render_loop, dst_queue_mask)) {
fce_eliminate = true;
}
if (fce_eliminate || fmask_expand)
radv_fast_clear_flush_image_inplace(cmd_buffer, image, range);
- if (fmask_expand)
+ if (fmask_expand) {
+ struct radv_barrier_data barrier = {};
+ barrier.layout_transitions.fmask_color_expand = 1;
+ radv_describe_layout_transition(cmd_buffer, &barrier);
+
radv_expand_fmask_image_inplace(cmd_buffer, image, range);
+ }
}
}
static void radv_handle_image_transition(struct radv_cmd_buffer *cmd_buffer,
struct radv_image *image,
VkImageLayout src_layout,
+ bool src_render_loop,
VkImageLayout dst_layout,
+ bool dst_render_loop,
uint32_t src_family,
uint32_t dst_family,
const VkImageSubresourceRange *range,
if (vk_format_is_depth(image->vk_format)) {
radv_handle_depth_image_transition(cmd_buffer, image,
- src_layout, dst_layout,
+ src_layout, src_render_loop,
+ dst_layout, dst_render_loop,
src_queue_mask, dst_queue_mask,
range, sample_locs);
} else {
radv_handle_color_image_transition(cmd_buffer, image,
- src_layout, dst_layout,
+ src_layout, src_render_loop,
+ dst_layout, dst_render_loop,
src_queue_mask, dst_queue_mask,
range);
}
}
struct radv_barrier_info {
+ enum rgp_barrier_reason reason;
uint32_t eventCount;
const VkEvent *pEvents;
VkPipelineStageFlags srcStageMask;
enum radv_cmd_flush_bits src_flush_bits = 0;
enum radv_cmd_flush_bits dst_flush_bits = 0;
+ radv_describe_barrier_start(cmd_buffer, info->reason);
+
for (unsigned i = 0; i < info->eventCount; ++i) {
RADV_FROM_HANDLE(radv_event, event, info->pEvents[i]);
uint64_t va = radv_buffer_get_va(event->bo);
radv_cs_add_buffer(cmd_buffer->device->ws, cs, event->bo);
- MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 7);
+ ASSERTED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 7);
radv_cp_wait_mem(cs, WAIT_REG_MEM_EQUAL, va, 1, 0xffffffff);
assert(cmd_buffer->cs->cdw <= cdw_max);
radv_handle_image_transition(cmd_buffer, image,
pImageMemoryBarriers[i].oldLayout,
+ false, /* Outside of a renderpass we are never in a renderloop */
pImageMemoryBarriers[i].newLayout,
+ false, /* Outside of a renderpass we are never in a renderloop */
pImageMemoryBarriers[i].srcQueueFamilyIndex,
pImageMemoryBarriers[i].dstQueueFamilyIndex,
&pImageMemoryBarriers[i].subresourceRange,
si_cp_dma_wait_for_idle(cmd_buffer);
cmd_buffer->state.flush_bits |= dst_flush_bits;
+
+ radv_describe_barrier_end(cmd_buffer);
}
void radv_CmdPipelineBarrier(
RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
struct radv_barrier_info info;
+ info.reason = RGP_BARRIER_EXTERNAL_CMD_PIPELINE_BARRIER;
info.eventCount = 0;
info.pEvents = NULL;
info.srcStageMask = srcStageMask;
radv_cs_add_buffer(cmd_buffer->device->ws, cs, event->bo);
- MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 21);
+ ASSERTED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 21);
/* Flags that only require a top-of-pipe event. */
VkPipelineStageFlags top_of_pipe_flags =
RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
struct radv_barrier_info info;
+ info.reason = RGP_BARRIER_EXTERNAL_CMD_WAIT_EVENTS;
info.eventCount = eventCount;
info.pEvents = pEvents;
info.srcStageMask = 0;
sb[idx].buffer = radv_buffer_from_handle(pBuffers[i]);
sb[idx].offset = pOffsets[i];
- sb[idx].size = pSizes[i];
+
+ if (!pSizes || pSizes[i] == VK_WHOLE_SIZE) {
+ sb[idx].size = sb[idx].buffer->size - sb[idx].offset;
+ } else {
+ sb[idx].size = pSizes[i];
+ }
radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs,
sb[idx].buffer->bo);
(so->enabled_mask << 8) |
(so->enabled_mask << 12);
- if ((old_streamout_enabled != so->streamout_enabled) ||
- (old_hw_enabled_mask != so->hw_enabled_mask))
+ if (!cmd_buffer->device->physical_device->use_ngg_streamout &&
+ ((old_streamout_enabled != so->streamout_enabled) ||
+ (old_hw_enabled_mask != so->hw_enabled_mask)))
radv_emit_streamout_enable(cmd_buffer);
+
+ if (cmd_buffer->device->physical_device->use_ngg_streamout) {
+ cmd_buffer->gds_needed = true;
+ cmd_buffer->gds_oa_needed = true;
+ }
}
static void radv_flush_vgt_streamout(struct radv_cmd_buffer *cmd_buffer)
radv_set_streamout_enable(cmd_buffer, true);
}
+static void
+gfx10_emit_streamout_begin(struct radv_cmd_buffer *cmd_buffer,
+ uint32_t firstCounterBuffer,
+ uint32_t counterBufferCount,
+ const VkBuffer *pCounterBuffers,
+ const VkDeviceSize *pCounterBufferOffsets)
+{
+ struct radv_streamout_state *so = &cmd_buffer->state.streamout;
+ unsigned last_target = util_last_bit(so->enabled_mask) - 1;
+ struct radeon_cmdbuf *cs = cmd_buffer->cs;
+ uint32_t i;
+
+ assert(cmd_buffer->device->physical_device->rad_info.chip_class >= GFX10);
+ assert(firstCounterBuffer + counterBufferCount <= MAX_SO_BUFFERS);
+
+ /* Sync because the next streamout operation will overwrite GDS and we
+ * have to make sure it's idle.
+ * TODO: Improve by tracking if there is a streamout operation in
+ * flight.
+ */
+ cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_VS_PARTIAL_FLUSH;
+ si_emit_cache_flush(cmd_buffer);
+
+ for_each_bit(i, so->enabled_mask) {
+ int32_t counter_buffer_idx = i - firstCounterBuffer;
+ if (counter_buffer_idx >= 0 && counter_buffer_idx >= counterBufferCount)
+ counter_buffer_idx = -1;
+
+ bool append = counter_buffer_idx >= 0 &&
+ pCounterBuffers && pCounterBuffers[counter_buffer_idx];
+ uint64_t va = 0;
+
+ if (append) {
+ RADV_FROM_HANDLE(radv_buffer, buffer, pCounterBuffers[counter_buffer_idx]);
+
+ va += radv_buffer_get_va(buffer->bo);
+ va += buffer->offset + pCounterBufferOffsets[counter_buffer_idx];
+
+ radv_cs_add_buffer(cmd_buffer->device->ws, cs, buffer->bo);
+ }
+
+ radeon_emit(cs, PKT3(PKT3_DMA_DATA, 5, 0));
+ radeon_emit(cs, S_411_SRC_SEL(append ? V_411_SRC_ADDR_TC_L2 : V_411_DATA) |
+ S_411_DST_SEL(V_411_GDS) |
+ S_411_CP_SYNC(i == last_target));
+ radeon_emit(cs, va);
+ radeon_emit(cs, va >> 32);
+ radeon_emit(cs, 4 * i); /* destination in GDS */
+ radeon_emit(cs, 0);
+ radeon_emit(cs, S_414_BYTE_COUNT_GFX9(4) |
+ S_414_DISABLE_WR_CONFIRM_GFX9(i != last_target));
+ }
+
+ radv_set_streamout_enable(cmd_buffer, true);
+}
+
void radv_CmdBeginTransformFeedbackEXT(
VkCommandBuffer commandBuffer,
uint32_t firstCounterBuffer,
{
RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
- radv_emit_streamout_begin(cmd_buffer,
- firstCounterBuffer, counterBufferCount,
- pCounterBuffers, pCounterBufferOffsets);
+ if (cmd_buffer->device->physical_device->use_ngg_streamout) {
+ gfx10_emit_streamout_begin(cmd_buffer,
+ firstCounterBuffer, counterBufferCount,
+ pCounterBuffers, pCounterBufferOffsets);
+ } else {
+ radv_emit_streamout_begin(cmd_buffer,
+ firstCounterBuffer, counterBufferCount,
+ pCounterBuffers, pCounterBufferOffsets);
+ }
}
static void
radv_set_streamout_enable(cmd_buffer, false);
}
+static void
+gfx10_emit_streamout_end(struct radv_cmd_buffer *cmd_buffer,
+ uint32_t firstCounterBuffer,
+ uint32_t counterBufferCount,
+ const VkBuffer *pCounterBuffers,
+ const VkDeviceSize *pCounterBufferOffsets)
+{
+ struct radv_streamout_state *so = &cmd_buffer->state.streamout;
+ struct radeon_cmdbuf *cs = cmd_buffer->cs;
+ uint32_t i;
+
+ assert(cmd_buffer->device->physical_device->rad_info.chip_class >= GFX10);
+ assert(firstCounterBuffer + counterBufferCount <= MAX_SO_BUFFERS);
+
+ for_each_bit(i, so->enabled_mask) {
+ int32_t counter_buffer_idx = i - firstCounterBuffer;
+ if (counter_buffer_idx >= 0 && counter_buffer_idx >= counterBufferCount)
+ counter_buffer_idx = -1;
+
+ if (counter_buffer_idx >= 0 && pCounterBuffers && pCounterBuffers[counter_buffer_idx]) {
+ /* The array of counters buffer is optional. */
+ RADV_FROM_HANDLE(radv_buffer, buffer, pCounterBuffers[counter_buffer_idx]);
+ uint64_t va = radv_buffer_get_va(buffer->bo);
+
+ va += buffer->offset + pCounterBufferOffsets[counter_buffer_idx];
+
+ si_cs_emit_write_event_eop(cs,
+ cmd_buffer->device->physical_device->rad_info.chip_class,
+ radv_cmd_buffer_uses_mec(cmd_buffer),
+ V_028A90_PS_DONE, 0,
+ EOP_DST_SEL_TC_L2,
+ EOP_DATA_SEL_GDS,
+ va, EOP_DATA_GDS(i, 1), 0);
+
+ radv_cs_add_buffer(cmd_buffer->device->ws, cs, buffer->bo);
+ }
+ }
+
+ radv_set_streamout_enable(cmd_buffer, false);
+}
+
void radv_CmdEndTransformFeedbackEXT(
VkCommandBuffer commandBuffer,
uint32_t firstCounterBuffer,
{
RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
- radv_emit_streamout_end(cmd_buffer,
- firstCounterBuffer, counterBufferCount,
- pCounterBuffers, pCounterBufferOffsets);
+ if (cmd_buffer->device->physical_device->use_ngg_streamout) {
+ gfx10_emit_streamout_end(cmd_buffer,
+ firstCounterBuffer, counterBufferCount,
+ pCounterBuffers, pCounterBufferOffsets);
+ } else {
+ radv_emit_streamout_end(cmd_buffer,
+ firstCounterBuffer, counterBufferCount,
+ pCounterBuffers, pCounterBufferOffsets);
+ }
}
void radv_CmdDrawIndirectByteCountEXT(
si_emit_cache_flush(cmd_buffer);
+ ASSERTED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 12);
+
if (!(pipelineStage & ~VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT)) {
radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_IMM) |
va, marker,
cmd_buffer->gfx9_eop_bug_va);
}
+
+ assert(cmd_buffer->cs->cdw <= cdw_max);
}