.front = 0u,
.back = 0u,
},
+ .line_stipple = {
+ .factor = 0u,
+ .pattern = 0u,
+ },
};
static void
}
}
+ if (copy_mask & RADV_DYNAMIC_LINE_STIPPLE) {
+ if (memcmp(&dest->line_stipple, &src->line_stipple,
+ sizeof(src->line_stipple))) {
+ dest->line_stipple = src->line_stipple;
+ dest_mask |= RADV_DYNAMIC_LINE_STIPPLE;
+ }
+ }
+
cmd_buffer->state.dirty |= dest_mask;
}
if (cmd_buffer == NULL)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
- cmd_buffer->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
+ vk_object_base_init(&device->vk, &cmd_buffer->base,
+ VK_OBJECT_TYPE_COMMAND_BUFFER);
+
cmd_buffer->device = device;
cmd_buffer->pool = pool;
cmd_buffer->level = level;
- if (pool) {
- list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
- cmd_buffer->queue_family_index = pool->queue_family_index;
-
- } else {
- /* Init the pool_link so we can safely call list_del when we destroy
- * the command buffer
- */
- list_inithead(&cmd_buffer->pool_link);
- cmd_buffer->queue_family_index = RADV_QUEUE_GENERAL;
- }
+ list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
+ cmd_buffer->queue_family_index = pool->queue_family_index;
ring = radv_queue_family_to_ring(cmd_buffer->queue_family_index);
cmd_buffer->device->ws->buffer_destroy(cmd_buffer->upload.upload_bo);
cmd_buffer->device->ws->cs_destroy(cmd_buffer->cs);
- for (unsigned i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; i++)
+ for (unsigned i = 0; i < MAX_BIND_POINTS; i++)
free(cmd_buffer->descriptors[i].push_set.set.mapped_ptr);
+ vk_object_base_finish(&cmd_buffer->base);
+
vk_free(&cmd_buffer->pool->alloc, cmd_buffer);
}
cmd_buffer->gsvs_ring_size_needed = 0;
cmd_buffer->tess_rings_needed = false;
cmd_buffer->gds_needed = false;
+ cmd_buffer->gds_oa_needed = false;
cmd_buffer->sample_positions_needed = false;
if (cmd_buffer->upload.upload_bo)
memset(cmd_buffer->vertex_bindings, 0, sizeof(cmd_buffer->vertex_bindings));
- for (unsigned i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; i++) {
+ for (unsigned i = 0; i < MAX_BIND_POINTS; i++) {
cmd_buffer->descriptors[i].dirty = 0;
cmd_buffer->descriptors[i].valid = 0;
cmd_buffer->descriptors[i].push_dirty = false;
radv_cmd_buffer_after_draw(struct radv_cmd_buffer *cmd_buffer,
enum radv_cmd_flush_bits flags)
{
+ if (unlikely(cmd_buffer->device->thread_trace_bo)) {
+ radeon_emit(cmd_buffer->cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
+ radeon_emit(cmd_buffer->cs, EVENT_TYPE(V_028A90_THREAD_TRACE_MARKER) | EVENT_INDEX(0));
+ }
+
if (cmd_buffer->device->instance->debug_flags & RADV_DEBUG_SYNC_SHADERS) {
assert(flags & (RADV_CMD_FLAG_PS_PARTIAL_FLUSH |
RADV_CMD_FLAG_CS_PARTIAL_FLUSH));
float shifted_pos_x = user_locs[i].x - 0.5;
float shifted_pos_y = user_locs[i].y - 0.5;
- int32_t scaled_pos_x = floor(shifted_pos_x * 16);
- int32_t scaled_pos_y = floor(shifted_pos_y * 16);
+ int32_t scaled_pos_x = floorf(shifted_pos_x * 16);
+ int32_t scaled_pos_y = floorf(shifted_pos_y * 16);
sample_locs[i].x = CLAMP(scaled_pos_x, -8, 7);
sample_locs[i].y = CLAMP(scaled_pos_y, -8, 7);
static void
radv_emit_sample_locations(struct radv_cmd_buffer *cmd_buffer)
{
- struct radv_pipeline *pipeline = cmd_buffer->state.pipeline;
- struct radv_multisample_state *ms = &pipeline->graphics.ms;
struct radv_sample_locations_state *sample_location =
&cmd_buffer->state.dynamic.sample_location;
uint32_t num_samples = (uint32_t)sample_location->per_pixel;
num_samples);
/* Compute the maximum sample distance from the specified locations. */
- for (uint32_t i = 0; i < num_samples; i++) {
- VkOffset2D offset = sample_locs[0][i];
- max_sample_dist = MAX2(max_sample_dist,
- MAX2(abs(offset.x), abs(offset.y)));
+ for (unsigned i = 0; i < 4; ++i) {
+ for (uint32_t j = 0; j < num_samples; j++) {
+ VkOffset2D offset = sample_locs[i][j];
+ max_sample_dist = MAX2(max_sample_dist,
+ MAX2(abs(offset.x), abs(offset.y)));
+ }
}
/* Emit the specified user sample locations. */
}
/* Emit the maximum sample distance and the centroid priority. */
- uint32_t pa_sc_aa_config = ms->pa_sc_aa_config;
-
- pa_sc_aa_config &= C_028BE0_MAX_SAMPLE_DIST;
- pa_sc_aa_config |= S_028BE0_MAX_SAMPLE_DIST(max_sample_dist);
-
- radeon_set_context_reg_seq(cs, R_028BE0_PA_SC_AA_CONFIG, 1);
- radeon_emit(cs, pa_sc_aa_config);
+ radeon_set_context_reg_rmw(cs, R_028BE0_PA_SC_AA_CONFIG,
+ S_028BE0_MAX_SAMPLE_DIST(max_sample_dist),
+ ~C_028BE0_MAX_SAMPLE_DIST);
radeon_set_context_reg_seq(cs, R_028BD4_PA_SC_CENTROID_PRIORITY_0, 2);
radeon_emit(cs, centroid_priority);
struct radv_pipeline *pipeline)
{
int num_samples = pipeline->graphics.ms.num_samples;
- struct radv_multisample_state *ms = &pipeline->graphics.ms;
struct radv_pipeline *old_pipeline = cmd_buffer->state.emitted_pipeline;
if (pipeline->shaders[MESA_SHADER_FRAGMENT]->info.ps.needs_sample_positions)
if (old_pipeline && num_samples == old_pipeline->graphics.ms.num_samples)
return;
- radeon_set_context_reg_seq(cmd_buffer->cs, R_028BE0_PA_SC_AA_CONFIG, 1);
- radeon_emit(cmd_buffer->cs, ms->pa_sc_aa_config);
-
radv_emit_default_sample_locations(cmd_buffer->cs, num_samples);
- /* GFX9: Flush DFSM when the AA mode changes. */
- if (cmd_buffer->device->dfsm_allowed) {
- radeon_emit(cmd_buffer->cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
- radeon_emit(cmd_buffer->cs, EVENT_TYPE(V_028A90_FLUSH_DFSM) | EVENT_INDEX(0));
- }
-
cmd_buffer->state.context_roll_without_scissor_emitted = true;
}
for (unsigned i = 0; i < subpass->color_count; ++i) {
if (subpass->color_attachments[i].attachment == VK_ATTACHMENT_UNUSED) {
- sx_blend_opt_control |= S_02875C_MRT0_COLOR_OPT_DISABLE(1) << (i * 4);
- sx_blend_opt_control |= S_02875C_MRT0_ALPHA_OPT_DISABLE(1) << (i * 4);
+ /* We don't set the DISABLE bits, because the HW can't have holes,
+ * so the SPI color format is set to 32-bit 1-component. */
+ sx_ps_downconvert |= V_028754_SX_RT_EXPORT_32_R << (i * 4);
continue;
}
}
}
- for (unsigned i = subpass->color_count; i < 8; ++i) {
- sx_blend_opt_control |= S_02875C_MRT0_COLOR_OPT_DISABLE(1) << (i * 4);
- sx_blend_opt_control |= S_02875C_MRT0_ALPHA_OPT_DISABLE(1) << (i * 4);
- }
- /* TODO: avoid redundantly setting context registers */
+ /* Do not set the DISABLE bits for the unused attachments, as that
+ * breaks dual source blending in SkQP and does not seem to improve
+ * performance. */
+
+ if (sx_ps_downconvert == cmd_buffer->state.last_sx_ps_downconvert &&
+ sx_blend_opt_epsilon == cmd_buffer->state.last_sx_blend_opt_epsilon &&
+ sx_blend_opt_control == cmd_buffer->state.last_sx_blend_opt_control)
+ return;
+
radeon_set_context_reg_seq(cmd_buffer->cs, R_028754_SX_PS_DOWNCONVERT, 3);
radeon_emit(cmd_buffer->cs, sx_ps_downconvert);
radeon_emit(cmd_buffer->cs, sx_blend_opt_epsilon);
radeon_emit(cmd_buffer->cs, sx_blend_opt_control);
cmd_buffer->state.context_roll_without_scissor_emitted = true;
+
+ cmd_buffer->state.last_sx_ps_downconvert = sx_ps_downconvert;
+ cmd_buffer->state.last_sx_blend_opt_epsilon = sx_blend_opt_epsilon;
+ cmd_buffer->state.last_sx_blend_opt_control = sx_blend_opt_control;
+}
+
+static void
+radv_emit_batch_break_on_new_ps(struct radv_cmd_buffer *cmd_buffer)
+{
+ if (!cmd_buffer->device->pbb_allowed)
+ return;
+
+ struct radv_binning_settings settings =
+ radv_get_binning_settings(cmd_buffer->device->physical_device);
+ bool break_for_new_ps =
+ (!cmd_buffer->state.emitted_pipeline ||
+ cmd_buffer->state.emitted_pipeline->shaders[MESA_SHADER_FRAGMENT] !=
+ cmd_buffer->state.pipeline->shaders[MESA_SHADER_FRAGMENT]) &&
+ (settings.context_states_per_bin > 1 ||
+ settings.persistent_states_per_bin > 1);
+ bool break_for_new_cb_target_mask =
+ (!cmd_buffer->state.emitted_pipeline ||
+ cmd_buffer->state.emitted_pipeline->graphics.cb_target_mask !=
+ cmd_buffer->state.pipeline->graphics.cb_target_mask) &&
+ settings.context_states_per_bin > 1;
+
+ if (!break_for_new_ps && !break_for_new_cb_target_mask)
+ return;
+
+ radeon_emit(cmd_buffer->cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
+ radeon_emit(cmd_buffer->cs, EVENT_TYPE(V_028A90_BREAK_BATCH) | EVENT_INDEX(0));
}
static void
cmd_buffer->state.context_roll_without_scissor_emitted = true;
}
+ radv_emit_batch_break_on_new_ps(cmd_buffer);
+
for (unsigned i = 0; i < MESA_SHADER_COMPUTE; i++) {
if (!pipeline->shaders[i])
continue;
unsigned width = cmd_buffer->state.dynamic.line_width * 8;
radeon_set_context_reg(cmd_buffer->cs, R_028A08_PA_SU_LINE_CNTL,
- S_028A08_WIDTH(CLAMP(width, 0, 0xFFF)));
+ S_028A08_WIDTH(CLAMP(width, 0, 0xFFFF)));
}
static void
radeon_emit(cmd_buffer->cs, bias); /* BACK OFFSET */
}
+static void
+radv_emit_line_stipple(struct radv_cmd_buffer *cmd_buffer)
+{
+ struct radv_dynamic_state *d = &cmd_buffer->state.dynamic;
+ struct radv_pipeline *pipeline = cmd_buffer->state.pipeline;
+ uint32_t auto_reset_cntl = 1;
+
+ if (pipeline->graphics.topology == VK_PRIMITIVE_TOPOLOGY_LINE_STRIP)
+ auto_reset_cntl = 2;
+
+ radeon_set_context_reg(cmd_buffer->cs, R_028A0C_PA_SC_LINE_STIPPLE,
+ S_028A0C_LINE_PATTERN(d->line_stipple.pattern) |
+ S_028A0C_REPEAT_COUNT(d->line_stipple.factor - 1) |
+ S_028A0C_AUTO_RESET_CNTL(auto_reset_cntl));
+}
+
static void
radv_emit_fb_color_state(struct radv_cmd_buffer *cmd_buffer,
int index,
cb_color_info &= C_028C70_DCC_ENABLE;
}
+ if (!radv_layout_can_fast_clear(image, layout, in_render_loop,
+ radv_image_queue_family_mask(image,
+ cmd_buffer->queue_family_index,
+ cmd_buffer->queue_family_index))) {
+ cb_color_info &= C_028C70_COMPRESSION;
+ }
+
if (radv_image_is_tc_compat_cmask(image) &&
(radv_is_fmask_decompress_pipeline(cmd_buffer) ||
radv_is_dcc_decompress_pipeline(cmd_buffer))) {
cb_color_info &= C_028C70_FMASK_COMPRESS_1FRAG_ONLY;
}
+ if (radv_image_has_fmask(image) &&
+ (radv_is_fmask_decompress_pipeline(cmd_buffer) ||
+ radv_is_hw_resolve_pipeline(cmd_buffer))) {
+ /* Make sure FMASK is enabled if it has been cleared because:
+ *
+ * 1) it's required for FMASK_DECOMPRESS operations to avoid
+ * GPU hangs
+ * 2) it's necessary for CB_RESOLVE which can read compressed
+ * FMASK data anyways.
+ */
+ cb_color_info |= S_028C70_COMPRESSION(1);
+ }
+
if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX10) {
radeon_set_context_reg_seq(cmd_buffer->cs, R_028C60_CB_COLOR0_BASE + index * 0x3c, 11);
radeon_emit(cmd_buffer->cs, cb->cb_color_base);
!radv_image_is_tc_compat_htile(image))
return;
- if (!radv_layout_has_htile(image, layout, in_render_loop,
- radv_image_queue_family_mask(image,
- cmd_buffer->queue_family_index,
- cmd_buffer->queue_family_index))) {
+ if (!radv_layout_is_htile_compressed(image, layout, in_render_loop,
+ radv_image_queue_family_mask(image,
+ cmd_buffer->queue_family_index,
+ cmd_buffer->queue_family_index))) {
db_z_info &= C_028040_TILE_SURFACE_ENABLE;
}
uint32_t db_z_info = ds->db_z_info;
uint32_t db_stencil_info = ds->db_stencil_info;
- if (!radv_layout_has_htile(image, layout, in_render_loop,
- radv_image_queue_family_mask(image,
- cmd_buffer->queue_family_index,
- cmd_buffer->queue_family_index))) {
+ if (!radv_layout_is_htile_compressed(image, layout, in_render_loop,
+ radv_image_queue_family_mask(image,
+ cmd_buffer->queue_family_index,
+ cmd_buffer->queue_family_index))) {
db_z_info &= C_028040_TILE_SURFACE_ENABLE;
db_stencil_info |= S_028044_TILE_STENCIL_DISABLE(1);
}
uint32_t reg = R_028028_DB_STENCIL_CLEAR + 4 * reg_offset;
if (cmd_buffer->device->physical_device->rad_info.has_load_ctx_reg_pkt) {
- radeon_emit(cs, PKT3(PKT3_LOAD_CONTEXT_REG, 3, 0));
+ radeon_emit(cs, PKT3(PKT3_LOAD_CONTEXT_REG_INDEX, 3, 0));
radeon_emit(cs, va);
radeon_emit(cs, va >> 32);
radeon_emit(cs, (reg - SI_CONTEXT_REG_OFFSET) >> 2);
uint32_t reg = R_028C8C_CB_COLOR0_CLEAR_WORD0 + cb_idx * 0x3c;
if (cmd_buffer->device->physical_device->rad_info.has_load_ctx_reg_pkt) {
- radeon_emit(cs, PKT3(PKT3_LOAD_CONTEXT_REG, 3, cmd_buffer->state.predicating));
+ radeon_emit(cs, PKT3(PKT3_LOAD_CONTEXT_REG_INDEX, 3, cmd_buffer->state.predicating));
radeon_emit(cs, va);
radeon_emit(cs, va >> 32);
radeon_emit(cs, (reg - SI_CONTEXT_REG_OFFSET) >> 2);
VkImageLayout layout = subpass->depth_stencil_attachment->layout;
bool in_render_loop = subpass->depth_stencil_attachment->in_render_loop;
struct radv_image_view *iview = cmd_buffer->state.attachments[idx].iview;
- struct radv_image *image = iview->image;
radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, cmd_buffer->state.attachments[idx].iview->bo);
- ASSERTED uint32_t queue_mask = radv_image_queue_family_mask(image,
- cmd_buffer->queue_family_index,
- cmd_buffer->queue_family_index);
- /* We currently don't support writing decompressed HTILE */
- assert(radv_layout_has_htile(image, layout, in_render_loop, queue_mask) ==
- radv_layout_is_htile_compressed(image, layout, in_render_loop, queue_mask));
radv_emit_fb_ds_state(cmd_buffer, &cmd_buffer->state.attachments[idx].ds, iview, layout, in_render_loop);
}
static void
-radv_emit_index_buffer(struct radv_cmd_buffer *cmd_buffer)
+radv_emit_index_buffer(struct radv_cmd_buffer *cmd_buffer, bool indirect)
{
struct radeon_cmdbuf *cs = cmd_buffer->cs;
struct radv_cmd_state *state = &cmd_buffer->state;
state->last_index_type = state->index_type;
}
+ /* For the direct indexed draws we use DRAW_INDEX_2, which includes
+ * the index_va and max_index_count already. */
+ if (!indirect)
+ return;
+
radeon_emit(cs, PKT3(PKT3_INDEX_BASE, 1, 0));
radeon_emit(cs, state->index_va);
radeon_emit(cs, state->index_va >> 32);
if (states & RADV_CMD_DIRTY_DYNAMIC_SAMPLE_LOCATIONS)
radv_emit_sample_locations(cmd_buffer);
+ if (states & RADV_CMD_DIRTY_DYNAMIC_LINE_STIPPLE)
+ radv_emit_line_stipple(cmd_buffer);
+
cmd_buffer->state.dirty &= ~states;
}
S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W);
if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX10) {
+ /* OOB_SELECT chooses the out-of-bounds check:
+ * - 1: index >= NUM_RECORDS (Structured)
+ * - 3: offset >= NUM_RECORDS (Raw)
+ */
+ int oob_select = stride ? V_008F0C_OOB_SELECT_STRUCTURED : V_008F0C_OOB_SELECT_RAW;
+
desc[3] |= S_008F0C_FORMAT(V_008F0C_IMG_FORMAT_32_UINT) |
- S_008F0C_OOB_SELECT(1) |
+ S_008F0C_OOB_SELECT(oob_select) |
S_008F0C_RESOURCE_LEVEL(1);
} else {
desc[3] |= S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_UINT) |
if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX10) {
desc[3] |= S_008F0C_FORMAT(V_008F0C_IMG_FORMAT_32_FLOAT) |
- S_008F0C_OOB_SELECT(3) |
+ S_008F0C_OOB_SELECT(V_008F0C_OOB_SELECT_RAW) |
S_008F0C_RESOURCE_LEVEL(1);
} else {
desc[3] |= S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
cmd_buffer->state.dirty &= ~RADV_CMD_DIRTY_STREAMOUT_BUFFER;
}
+static void
+radv_flush_ngg_gs_state(struct radv_cmd_buffer *cmd_buffer)
+{
+ struct radv_pipeline *pipeline = cmd_buffer->state.pipeline;
+ struct radv_userdata_info *loc;
+ uint32_t ngg_gs_state = 0;
+ uint32_t base_reg;
+
+ if (!radv_pipeline_has_gs(pipeline) ||
+ !radv_pipeline_has_ngg(pipeline))
+ return;
+
+ /* By default NGG GS queries are disabled but they are enabled if the
+ * command buffer has active GDS queries or if it's a secondary command
+ * buffer that inherits the number of generated primitives.
+ */
+ if (cmd_buffer->state.active_pipeline_gds_queries ||
+ (cmd_buffer->state.inherited_pipeline_statistics & VK_QUERY_PIPELINE_STATISTIC_GEOMETRY_SHADER_PRIMITIVES_BIT))
+ ngg_gs_state = 1;
+
+ loc = radv_lookup_user_sgpr(pipeline, MESA_SHADER_GEOMETRY,
+ AC_UD_NGG_GS_STATE);
+ base_reg = pipeline->user_data_0[MESA_SHADER_GEOMETRY];
+ assert(loc->sgpr_idx != -1);
+
+ radeon_set_sh_reg(cmd_buffer->cs, base_reg + loc->sgpr_idx * 4,
+ ngg_gs_state);
+}
+
static void
radv_upload_graphics_shader_descriptors(struct radv_cmd_buffer *cmd_buffer, bool pipeline_is_dirty)
{
radv_flush_streamout_descriptors(cmd_buffer);
radv_flush_descriptors(cmd_buffer, VK_SHADER_STAGE_ALL_GRAPHICS);
radv_flush_constants(cmd_buffer, VK_SHADER_STAGE_ALL_GRAPHICS);
+ radv_flush_ngg_gs_state(cmd_buffer);
}
struct radv_draw_info {
flush_bits |= RADV_CMD_FLAG_INV_VCACHE;
/* Unlike LLVM, ACO uses SMEM for SSBOs and we have to
* invalidate the scalar cache. */
- if (cmd_buffer->device->physical_device->use_aco &&
- cmd_buffer->device->physical_device->rad_info.chip_class >= GFX8)
+ if (!cmd_buffer->device->physical_device->use_llvm)
flush_bits |= RADV_CMD_FLAG_INV_SCACHE;
if (!image_is_coherent)
sample_locs = radv_get_attachment_sample_locations(cmd_buffer, idx,
begin_subpass);
- radv_handle_image_transition(cmd_buffer,
- view->image,
- cmd_buffer->state.attachments[idx].current_layout,
- cmd_buffer->state.attachments[idx].current_in_render_loop,
- att.layout, att.in_render_loop,
- 0, 0, &range, sample_locs);
+ /* Determine if the subpass uses separate depth/stencil layouts. */
+ bool uses_separate_depth_stencil_layouts = false;
+ if ((cmd_buffer->state.attachments[idx].current_layout !=
+ cmd_buffer->state.attachments[idx].current_stencil_layout) ||
+ (att.layout != att.stencil_layout)) {
+ uses_separate_depth_stencil_layouts = true;
+ }
+
+ /* For separate layouts, perform depth and stencil transitions
+ * separately.
+ */
+ if (uses_separate_depth_stencil_layouts &&
+ (range.aspectMask == (VK_IMAGE_ASPECT_DEPTH_BIT |
+ VK_IMAGE_ASPECT_STENCIL_BIT))) {
+ /* Depth-only transitions. */
+ range.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
+ radv_handle_image_transition(cmd_buffer,
+ view->image,
+ cmd_buffer->state.attachments[idx].current_layout,
+ cmd_buffer->state.attachments[idx].current_in_render_loop,
+ att.layout, att.in_render_loop,
+ 0, 0, &range, sample_locs);
+
+ /* Stencil-only transitions. */
+ range.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
+ radv_handle_image_transition(cmd_buffer,
+ view->image,
+ cmd_buffer->state.attachments[idx].current_stencil_layout,
+ cmd_buffer->state.attachments[idx].current_in_render_loop,
+ att.stencil_layout, att.in_render_loop,
+ 0, 0, &range, sample_locs);
+ } else {
+ radv_handle_image_transition(cmd_buffer,
+ view->image,
+ cmd_buffer->state.attachments[idx].current_layout,
+ cmd_buffer->state.attachments[idx].current_in_render_loop,
+ att.layout, att.in_render_loop,
+ 0, 0, &range, sample_locs);
+ }
cmd_buffer->state.attachments[idx].current_layout = att.layout;
+ cmd_buffer->state.attachments[idx].current_stencil_layout = att.stencil_layout;
cmd_buffer->state.attachments[idx].current_in_render_loop = att.in_render_loop;
const VkRenderPassBeginInfo *info)
{
struct radv_cmd_state *state = &cmd_buffer->state;
- const struct VkRenderPassAttachmentBeginInfoKHR *attachment_info = NULL;
+ const struct VkRenderPassAttachmentBeginInfo *attachment_info = NULL;
if (info) {
attachment_info = vk_find_struct_const(info->pNext,
- RENDER_PASS_ATTACHMENT_BEGIN_INFO_KHR);
+ RENDER_PASS_ATTACHMENT_BEGIN_INFO);
}
}
state->attachments[i].current_layout = att->initial_layout;
+ state->attachments[i].current_stencil_layout = att->stencil_initial_layout;
state->attachments[i].sample_location.count = 0;
struct radv_image_view *iview;
list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
result = radv_reset_cmd_buffer(cmd_buffer);
- cmd_buffer->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
cmd_buffer->level = pAllocateInfo->level;
pCommandBuffers[i] = radv_cmd_buffer_to_handle(cmd_buffer);
cmd_buffer->state.last_vertex_offset = -1;
cmd_buffer->state.last_first_instance = -1;
cmd_buffer->state.predication_type = -1;
+ cmd_buffer->state.last_sx_ps_downconvert = -1;
+ cmd_buffer->state.last_sx_blend_opt_epsilon = -1;
+ cmd_buffer->state.last_sx_blend_opt_control = -1;
cmd_buffer->usage_flags = pBeginInfo->flags;
if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY &&
return result;
}
+ cmd_buffer->state.inherited_pipeline_statistics =
+ pBeginInfo->pInheritanceInfo->pipelineStatistics;
+
radv_cmd_buffer_set_subpass(cmd_buffer, subpass);
}
- if (unlikely(cmd_buffer->device->trace_bo)) {
- struct radv_device *device = cmd_buffer->device;
-
- radv_cs_add_buffer(device->ws, cmd_buffer->cs,
- device->trace_bo);
-
+ if (unlikely(cmd_buffer->device->trace_bo))
radv_cmd_buffer_trace_emit(cmd_buffer);
- }
+
+ radv_describe_begin_cmd_buffer(cmd_buffer);
cmd_buffer->status = RADV_CMD_BUFFER_STATUS_RECORDING;
assert(firstBinding + bindingCount <= MAX_VBS);
for (uint32_t i = 0; i < bindingCount; i++) {
+ RADV_FROM_HANDLE(radv_buffer, buffer, pBuffers[i]);
uint32_t idx = firstBinding + i;
if (!changed &&
- (vb[idx].buffer != radv_buffer_from_handle(pBuffers[i]) ||
+ (vb[idx].buffer != buffer ||
vb[idx].offset != pOffsets[i])) {
changed = true;
}
- vb[idx].buffer = radv_buffer_from_handle(pBuffers[i]);
+ vb[idx].buffer = buffer;
vb[idx].offset = pOffsets[i];
- radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs,
- vb[idx].buffer->bo);
+ if (buffer) {
+ radv_cs_add_buffer(cmd_buffer->device->ws,
+ cmd_buffer->cs, vb[idx].buffer->bo);
+ }
}
if (!changed) {
assert(!(set->layout->flags & VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR));
if (!cmd_buffer->device->use_global_bo_list) {
- for (unsigned j = 0; j < set->layout->buffer_count; ++j)
+ for (unsigned j = 0; j < set->buffer_count; ++j)
if (set->descriptors[j])
radv_cs_add_buffer(ws, cmd_buffer->cs, set->descriptors[j]);
}
if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX10) {
dst[3] |= S_008F0C_FORMAT(V_008F0C_IMG_FORMAT_32_FLOAT) |
- S_008F0C_OOB_SELECT(3) |
+ S_008F0C_OOB_SELECT(V_008F0C_OOB_SELECT_RAW) |
S_008F0C_RESOURCE_LEVEL(1);
} else {
dst[3] |= S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
*/
si_cp_dma_wait_for_idle(cmd_buffer);
+ radv_describe_end_cmd_buffer(cmd_buffer);
+
vk_free(&cmd_buffer->pool->alloc, cmd_buffer->state.attachments);
vk_free(&cmd_buffer->pool->alloc, cmd_buffer->state.subpass_sample_locs);
- if (!cmd_buffer->device->ws->cs_finalize(cmd_buffer->cs))
- return vk_error(cmd_buffer->device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
+ VkResult result = cmd_buffer->device->ws->cs_finalize(cmd_buffer->cs);
+ if (result != VK_SUCCESS)
+ return vk_error(cmd_buffer->device->instance, result);
cmd_buffer->status = RADV_CMD_BUFFER_STATUS_EXECUTABLE;
/* Prefetch all pipeline shaders at first draw time. */
cmd_buffer->state.prefetch_L2_mask |= RADV_PREFETCH_SHADERS;
- if ((cmd_buffer->device->physical_device->rad_info.family == CHIP_NAVI10 ||
- cmd_buffer->device->physical_device->rad_info.family == CHIP_NAVI12 ||
- cmd_buffer->device->physical_device->rad_info.family == CHIP_NAVI14) &&
+ if (cmd_buffer->device->physical_device->rad_info.chip_class == GFX10 &&
cmd_buffer->state.emitted_pipeline &&
radv_pipeline_has_ngg(cmd_buffer->state.emitted_pipeline) &&
!radv_pipeline_has_ngg(cmd_buffer->state.pipeline)) {
state->dirty |= RADV_CMD_DIRTY_DYNAMIC_SAMPLE_LOCATIONS;
}
+void radv_CmdSetLineStippleEXT(
+ VkCommandBuffer commandBuffer,
+ uint32_t lineStippleFactor,
+ uint16_t lineStipplePattern)
+{
+ RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ struct radv_cmd_state *state = &cmd_buffer->state;
+
+ state->dynamic.line_stipple.factor = lineStippleFactor;
+ state->dynamic.line_stipple.pattern = lineStipplePattern;
+
+ state->dirty |= RADV_CMD_DIRTY_DYNAMIC_LINE_STIPPLE;
+}
+
void radv_CmdExecuteCommands(
VkCommandBuffer commandBuffer,
uint32_t commandBufferCount,
primary->tess_rings_needed = true;
if (secondary->sample_positions_needed)
primary->sample_positions_needed = true;
+ if (secondary->gds_needed)
+ primary->gds_needed = true;
if (!secondary->state.framebuffer &&
(primary->state.dirty & RADV_CMD_DIRTY_FRAMEBUFFER)) {
primary->state.last_first_instance = secondary->state.last_first_instance;
primary->state.last_num_instances = secondary->state.last_num_instances;
primary->state.last_vertex_offset = secondary->state.last_vertex_offset;
+ primary->state.last_sx_ps_downconvert = secondary->state.last_sx_ps_downconvert;
+ primary->state.last_sx_blend_opt_epsilon = secondary->state.last_sx_blend_opt_epsilon;
+ primary->state.last_sx_blend_opt_control = secondary->state.last_sx_blend_opt_control;
if (secondary->state.last_index_type != -1) {
primary->state.last_index_type =
RADV_FROM_HANDLE(radv_device, device, _device);
struct radv_cmd_pool *pool;
- pool = vk_alloc2(&device->alloc, pAllocator, sizeof(*pool), 8,
+ pool = vk_alloc2(&device->vk.alloc, pAllocator, sizeof(*pool), 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (pool == NULL)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
+ vk_object_base_init(&device->vk, &pool->base,
+ VK_OBJECT_TYPE_COMMAND_POOL);
+
if (pAllocator)
pool->alloc = *pAllocator;
else
- pool->alloc = device->alloc;
+ pool->alloc = device->vk.alloc;
list_inithead(&pool->cmd_buffers);
list_inithead(&pool->free_cmd_buffers);
radv_cmd_buffer_destroy(cmd_buffer);
}
- vk_free2(&device->alloc, pAllocator, pool);
+ vk_object_base_finish(&pool->base);
+ vk_free2(&device->vk.alloc, pAllocator, pool);
}
VkResult radv_ResetCommandPool(
radv_cmd_buffer_set_subpass(cmd_buffer, subpass);
+ radv_describe_barrier_start(cmd_buffer, RGP_BARRIER_EXTERNAL_RENDER_PASS_SYNC);
+
for (uint32_t i = 0; i < subpass->attachment_count; ++i) {
const uint32_t a = subpass->attachments[i].attachment;
if (a == VK_ATTACHMENT_UNUSED)
true);
}
+ radv_describe_barrier_end(cmd_buffer);
+
radv_cmd_buffer_clear_subpass(cmd_buffer);
assert(cmd_buffer->cs->cdw <= cdw_max);
radv_cmd_buffer_resolve_subpass(cmd_buffer);
+ radv_describe_barrier_start(cmd_buffer, RGP_BARRIER_EXTERNAL_RENDER_PASS_SYNC);
+
for (uint32_t i = 0; i < subpass->attachment_count; ++i) {
const uint32_t a = subpass->attachments[i].attachment;
if (a == VK_ATTACHMENT_UNUSED)
continue;
VkImageLayout layout = state->pass->attachments[a].final_layout;
- struct radv_subpass_attachment att = { a, layout };
+ VkImageLayout stencil_layout = state->pass->attachments[a].stencil_final_layout;
+ struct radv_subpass_attachment att = { a, layout, stencil_layout };
radv_handle_subpass_image_transition(cmd_buffer, att, false);
}
+
+ radv_describe_barrier_end(cmd_buffer);
}
-void radv_CmdBeginRenderPass(
- VkCommandBuffer commandBuffer,
- const VkRenderPassBeginInfo* pRenderPassBegin,
- VkSubpassContents contents)
+void
+radv_cmd_buffer_begin_render_pass(struct radv_cmd_buffer *cmd_buffer,
+ const VkRenderPassBeginInfo *pRenderPassBegin)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
RADV_FROM_HANDLE(radv_render_pass, pass, pRenderPassBegin->renderPass);
RADV_FROM_HANDLE(radv_framebuffer, framebuffer, pRenderPassBegin->framebuffer);
VkResult result;
result = radv_cmd_state_setup_sample_locations(cmd_buffer, pass, pRenderPassBegin);
if (result != VK_SUCCESS)
return;
+}
+
+void radv_CmdBeginRenderPass(
+ VkCommandBuffer commandBuffer,
+ const VkRenderPassBeginInfo* pRenderPassBegin,
+ VkSubpassContents contents)
+{
+ RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+
+ radv_cmd_buffer_begin_render_pass(cmd_buffer, pRenderPassBegin);
radv_cmd_buffer_begin_subpass(cmd_buffer, 0);
}
-void radv_CmdBeginRenderPass2KHR(
+void radv_CmdBeginRenderPass2(
VkCommandBuffer commandBuffer,
const VkRenderPassBeginInfo* pRenderPassBeginInfo,
- const VkSubpassBeginInfoKHR* pSubpassBeginInfo)
+ const VkSubpassBeginInfo* pSubpassBeginInfo)
{
radv_CmdBeginRenderPass(commandBuffer, pRenderPassBeginInfo,
pSubpassBeginInfo->contents);
radv_cmd_buffer_begin_subpass(cmd_buffer, prev_subpass + 1);
}
-void radv_CmdNextSubpass2KHR(
+void radv_CmdNextSubpass2(
VkCommandBuffer commandBuffer,
- const VkSubpassBeginInfoKHR* pSubpassBeginInfo,
- const VkSubpassEndInfoKHR* pSubpassEndInfo)
+ const VkSubpassBeginInfo* pSubpassBeginInfo,
+ const VkSubpassEndInfo* pSubpassEndInfo)
{
radv_CmdNextSubpass(commandBuffer, pSubpassBeginInfo->contents);
}
if (info->indexed) {
if (cmd_buffer->state.dirty & RADV_CMD_DIRTY_INDEX_BUFFER)
- radv_emit_index_buffer(cmd_buffer);
+ radv_emit_index_buffer(cmd_buffer, info->indirect);
} else {
/* On GFX7 and later, non-indexed draws overwrite VGT_INDEX_TYPE,
* so the state must be re-emitted before the next indexed
return;
}
+ radv_describe_draw(cmd_buffer);
+
/* Use optimal packet order based on whether we need to sync the
* pipeline.
*/
radv_draw(cmd_buffer, &info);
}
-void radv_CmdDrawIndirectCountKHR(
+void radv_CmdDrawIndirectCount(
VkCommandBuffer commandBuffer,
VkBuffer _buffer,
VkDeviceSize offset,
radv_draw(cmd_buffer, &info);
}
-void radv_CmdDrawIndexedIndirectCountKHR(
+void radv_CmdDrawIndexedIndirectCount(
VkCommandBuffer commandBuffer,
VkBuffer _buffer,
VkDeviceSize offset,
bool pipeline_is_dirty = pipeline &&
pipeline != cmd_buffer->state.emitted_compute_pipeline;
+ radv_describe_dispatch(cmd_buffer, 8, 8, 8);
+
if (cmd_buffer->state.flush_bits & (RADV_CMD_FLAG_FLUSH_AND_INV_CB |
RADV_CMD_FLAG_FLUSH_AND_INV_DB |
RADV_CMD_FLAG_PS_PARTIAL_FLUSH |
radv_dispatch(cmd_buffer, &info);
}
-void radv_CmdEndRenderPass(
- VkCommandBuffer commandBuffer)
+void
+radv_cmd_buffer_end_render_pass(struct radv_cmd_buffer *cmd_buffer)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
-
- radv_subpass_barrier(cmd_buffer, &cmd_buffer->state.pass->end_barrier);
-
- radv_cmd_buffer_end_subpass(cmd_buffer);
-
vk_free(&cmd_buffer->pool->alloc, cmd_buffer->state.attachments);
vk_free(&cmd_buffer->pool->alloc, cmd_buffer->state.subpass_sample_locs);
cmd_buffer->state.subpass_sample_locs = NULL;
}
-void radv_CmdEndRenderPass2KHR(
+void radv_CmdEndRenderPass(
+ VkCommandBuffer commandBuffer)
+{
+ RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+
+ radv_subpass_barrier(cmd_buffer, &cmd_buffer->state.pass->end_barrier);
+
+ radv_cmd_buffer_end_subpass(cmd_buffer);
+
+ radv_cmd_buffer_end_render_pass(cmd_buffer);
+}
+
+void radv_CmdEndRenderPass2(
VkCommandBuffer commandBuffer,
- const VkSubpassEndInfoKHR* pSubpassEndInfo)
+ const VkSubpassEndInfo* pSubpassEndInfo)
{
radv_CmdEndRenderPass(commandBuffer);
}
*/
static void radv_initialize_htile(struct radv_cmd_buffer *cmd_buffer,
struct radv_image *image,
- const VkImageSubresourceRange *range,
- uint32_t clear_word)
+ const VkImageSubresourceRange *range)
{
assert(range->baseMipLevel == 0);
assert(range->levelCount == 1 || range->levelCount == VK_REMAINING_ARRAY_LAYERS);
VkImageAspectFlags aspects = VK_IMAGE_ASPECT_DEPTH_BIT;
struct radv_cmd_state *state = &cmd_buffer->state;
+ uint32_t htile_value = vk_format_is_stencil(image->vk_format) ? 0xfffff30f : 0xfffc000f;
VkClearDepthStencilValue value = {};
+ struct radv_barrier_data barrier = {};
state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB |
RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
- state->flush_bits |= radv_clear_htile(cmd_buffer, image, range, clear_word);
+ barrier.layout_transitions.init_mask_ram = 1;
+ radv_describe_layout_transition(cmd_buffer, &barrier);
+
+ state->flush_bits |= radv_clear_htile(cmd_buffer, image, range, htile_value);
state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
return;
if (src_layout == VK_IMAGE_LAYOUT_UNDEFINED) {
- uint32_t clear_value = vk_format_is_stencil(image->vk_format) ? 0xfffff30f : 0xfffc000f;
-
- if (radv_layout_is_htile_compressed(image, dst_layout, dst_render_loop,
- dst_queue_mask)) {
- clear_value = 0;
- }
-
- radv_initialize_htile(cmd_buffer, image, range, clear_value);
+ radv_initialize_htile(cmd_buffer, image, range);
} else if (!radv_layout_is_htile_compressed(image, src_layout, src_render_loop, src_queue_mask) &&
radv_layout_is_htile_compressed(image, dst_layout, dst_render_loop, dst_queue_mask)) {
- uint32_t clear_value = vk_format_is_stencil(image->vk_format) ? 0xfffff30f : 0xfffc000f;
- radv_initialize_htile(cmd_buffer, image, range, clear_value);
+ radv_initialize_htile(cmd_buffer, image, range);
} else if (radv_layout_is_htile_compressed(image, src_layout, src_render_loop, src_queue_mask) &&
!radv_layout_is_htile_compressed(image, dst_layout, dst_render_loop, dst_queue_mask)) {
cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB |
RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
- radv_decompress_depth_image_inplace(cmd_buffer, image, range,
- sample_locs);
+ radv_decompress_depth_stencil(cmd_buffer, image, range,
+ sample_locs);
cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB |
RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
uint32_t value)
{
struct radv_cmd_state *state = &cmd_buffer->state;
+ struct radv_barrier_data barrier = {};
state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB |
RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
+ barrier.layout_transitions.init_mask_ram = 1;
+ radv_describe_layout_transition(cmd_buffer, &barrier);
+
state->flush_bits |= radv_clear_cmask(cmd_buffer, image, range, value);
state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
};
uint32_t log2_samples = util_logbase2(image->info.samples);
uint32_t value = fmask_clear_values[log2_samples];
+ struct radv_barrier_data barrier = {};
state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB |
RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
+ barrier.layout_transitions.init_mask_ram = 1;
+ radv_describe_layout_transition(cmd_buffer, &barrier);
+
state->flush_bits |= radv_clear_fmask(cmd_buffer, image, range, value);
state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
const VkImageSubresourceRange *range, uint32_t value)
{
struct radv_cmd_state *state = &cmd_buffer->state;
+ struct radv_barrier_data barrier = {};
unsigned size = 0;
state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB |
RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
+ barrier.layout_transitions.init_mask_ram = 1;
+ radv_describe_layout_transition(cmd_buffer, &barrier);
+
state->flush_bits |= radv_clear_dcc(cmd_buffer, image, range, value);
if (cmd_buffer->device->physical_device->rad_info.chip_class == GFX8) {
if (size != image->planes[0].surface.dcc_size) {
state->flush_bits |=
radv_fill_buffer(cmd_buffer, image->bo,
- image->offset + image->dcc_offset + size,
+ image->offset + image->planes[0].surface.dcc_offset + size,
image->planes[0].surface.dcc_size - size,
0xffffffff);
}
if (fce_eliminate || fmask_expand)
radv_fast_clear_flush_image_inplace(cmd_buffer, image, range);
- if (fmask_expand)
+ if (fmask_expand) {
+ struct radv_barrier_data barrier = {};
+ barrier.layout_transitions.fmask_color_expand = 1;
+ radv_describe_layout_transition(cmd_buffer, &barrier);
+
radv_expand_fmask_image_inplace(cmd_buffer, image, range);
+ }
}
}
}
struct radv_barrier_info {
+ enum rgp_barrier_reason reason;
uint32_t eventCount;
const VkEvent *pEvents;
VkPipelineStageFlags srcStageMask;
enum radv_cmd_flush_bits src_flush_bits = 0;
enum radv_cmd_flush_bits dst_flush_bits = 0;
+ radv_describe_barrier_start(cmd_buffer, info->reason);
+
for (unsigned i = 0; i < info->eventCount; ++i) {
RADV_FROM_HANDLE(radv_event, event, info->pEvents[i]);
uint64_t va = radv_buffer_get_va(event->bo);
si_cp_dma_wait_for_idle(cmd_buffer);
cmd_buffer->state.flush_bits |= dst_flush_bits;
+
+ radv_describe_barrier_end(cmd_buffer);
}
void radv_CmdPipelineBarrier(
RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
struct radv_barrier_info info;
+ info.reason = RGP_BARRIER_EXTERNAL_CMD_PIPELINE_BARRIER;
info.eventCount = 0;
info.pEvents = NULL;
info.srcStageMask = srcStageMask;
RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
struct radv_barrier_info info;
+ info.reason = RGP_BARRIER_EXTERNAL_CMD_WAIT_EVENTS;
info.eventCount = eventCount;
info.pEvents = pEvents;
info.srcStageMask = 0;
sb[idx].buffer = radv_buffer_from_handle(pBuffers[i]);
sb[idx].offset = pOffsets[i];
- sb[idx].size = pSizes[i];
+
+ if (!pSizes || pSizes[i] == VK_WHOLE_SIZE) {
+ sb[idx].size = sb[idx].buffer->size - sb[idx].offset;
+ } else {
+ sb[idx].size = pSizes[i];
+ }
radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs,
sb[idx].buffer->bo);
(old_hw_enabled_mask != so->hw_enabled_mask)))
radv_emit_streamout_enable(cmd_buffer);
- if (cmd_buffer->device->physical_device->use_ngg_streamout)
+ if (cmd_buffer->device->physical_device->use_ngg_streamout) {
cmd_buffer->gds_needed = true;
+ cmd_buffer->gds_oa_needed = true;
+ }
}
static void radv_flush_vgt_streamout(struct radv_cmd_buffer *cmd_buffer)