}
cmd_buffer->push_constant_stages = 0;
- cmd_buffer->scratch_size_needed = 0;
- cmd_buffer->compute_scratch_size_needed = 0;
+ cmd_buffer->scratch_size_per_wave_needed = 0;
+ cmd_buffer->scratch_waves_wanted = 0;
+ cmd_buffer->compute_scratch_size_per_wave_needed = 0;
+ cmd_buffer->compute_scratch_waves_wanted = 0;
cmd_buffer->esgs_ring_size_needed = 0;
cmd_buffer->gsvs_ring_size_needed = 0;
cmd_buffer->tess_rings_needed = false;
struct radv_pipeline *pipeline)
{
int num_samples = pipeline->graphics.ms.num_samples;
- struct radv_multisample_state *ms = &pipeline->graphics.ms;
struct radv_pipeline *old_pipeline = cmd_buffer->state.emitted_pipeline;
if (pipeline->shaders[MESA_SHADER_FRAGMENT]->info.ps.needs_sample_positions)
if (old_pipeline && num_samples == old_pipeline->graphics.ms.num_samples)
return;
- radeon_set_context_reg_seq(cmd_buffer->cs, R_028BDC_PA_SC_LINE_CNTL, 2);
- radeon_emit(cmd_buffer->cs, ms->pa_sc_line_cntl);
- radeon_emit(cmd_buffer->cs, ms->pa_sc_aa_config);
-
- radeon_set_context_reg(cmd_buffer->cs, R_028A48_PA_SC_MODE_CNTL_0, ms->pa_sc_mode_cntl_0);
-
radv_emit_default_sample_locations(cmd_buffer->cs, num_samples);
- /* GFX9: Flush DFSM when the AA mode changes. */
- if (cmd_buffer->device->dfsm_allowed) {
- radeon_emit(cmd_buffer->cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
- radeon_emit(cmd_buffer->cs, EVENT_TYPE(V_028A90_FLUSH_DFSM) | EVENT_INDEX(0));
- }
-
cmd_buffer->state.context_roll_without_scissor_emitted = true;
}
unsigned sx_blend_opt_epsilon = 0;
unsigned sx_blend_opt_control = 0;
+ if (!cmd_buffer->state.attachments || !subpass)
+ return;
+
for (unsigned i = 0; i < subpass->color_count; ++i) {
if (subpass->color_attachments[i].attachment == VK_ATTACHMENT_UNUSED) {
sx_blend_opt_control |= S_02875C_MRT0_COLOR_OPT_DISABLE(1) << (i * 4);
radv_update_multisample_state(cmd_buffer, pipeline);
radv_update_binning_state(cmd_buffer, pipeline);
- cmd_buffer->scratch_size_needed =
- MAX2(cmd_buffer->scratch_size_needed,
- pipeline->max_waves * pipeline->scratch_bytes_per_wave);
+ cmd_buffer->scratch_size_per_wave_needed = MAX2(cmd_buffer->scratch_size_per_wave_needed,
+ pipeline->scratch_bytes_per_wave);
+ cmd_buffer->scratch_waves_wanted = MAX2(cmd_buffer->scratch_waves_wanted,
+ pipeline->max_waves);
if (!cmd_buffer->state.emitted_pipeline ||
cmd_buffer->state.emitted_pipeline->graphics.can_use_guardband !=
if (cmd_buffer->state.attachments[att_idx].iview->image != image)
return;
- radeon_set_context_reg_seq(cs, R_028028_DB_STENCIL_CLEAR, 2);
- radeon_emit(cs, ds_clear_value.stencil);
- radeon_emit(cs, fui(ds_clear_value.depth));
+ if (aspects == (VK_IMAGE_ASPECT_DEPTH_BIT |
+ VK_IMAGE_ASPECT_STENCIL_BIT)) {
+ radeon_set_context_reg_seq(cs, R_028028_DB_STENCIL_CLEAR, 2);
+ radeon_emit(cs, ds_clear_value.stencil);
+ radeon_emit(cs, fui(ds_clear_value.depth));
+ } else if (aspects == VK_IMAGE_ASPECT_DEPTH_BIT) {
+ radeon_set_context_reg_seq(cs, R_02802C_DB_DEPTH_CLEAR, 1);
+ radeon_emit(cs, fui(ds_clear_value.depth));
+ } else {
+ assert(aspects == VK_IMAGE_ASPECT_STENCIL_BIT);
+ radeon_set_context_reg_seq(cs, R_028028_DB_STENCIL_CLEAR, 1);
+ radeon_emit(cs, ds_clear_value.stencil);
+ }
/* Update the ZRANGE_PRECISION value for the TC-compat bug. This is
* only needed when clearing Z to 0.0.
return;
radv_foreach_stage(stage, stages) {
- if (!pipeline->shaders[stage])
+ shader = radv_get_shader(pipeline, stage);
+ if (!shader)
continue;
- need_push_constants |= pipeline->shaders[stage]->info.loads_push_constants;
- need_push_constants |= pipeline->shaders[stage]->info.loads_dynamic_offsets;
+ need_push_constants |= shader->info.loads_push_constants;
+ need_push_constants |= shader->info.loads_dynamic_offsets;
- uint8_t base = pipeline->shaders[stage]->info.base_inline_push_consts;
- uint8_t count = pipeline->shaders[stage]->info.num_inline_push_consts;
+ uint8_t base = shader->info.base_inline_push_consts;
+ uint8_t count = shader->info.num_inline_push_consts;
radv_emit_inline_push_consts(cmd_buffer, pipeline, stage,
AC_UD_INLINE_PUSH_CONSTANTS,
(cmd_buffer->state.dirty & RADV_CMD_DIRTY_VERTEX_BUFFER)) &&
cmd_buffer->state.pipeline->num_vertex_bindings &&
radv_get_shader(cmd_buffer->state.pipeline, MESA_SHADER_VERTEX)->info.vs.has_vertex_buffers) {
- struct radv_vertex_elements_info *velems = &cmd_buffer->state.pipeline->vertex_elements;
unsigned vb_offset;
void *vb_ptr;
uint32_t i = 0;
uint32_t offset;
struct radv_buffer *buffer = cmd_buffer->vertex_bindings[i].buffer;
uint32_t stride = cmd_buffer->state.pipeline->binding_stride[i];
+ unsigned num_records;
if (!buffer)
continue;
offset = cmd_buffer->vertex_bindings[i].offset;
va += offset + buffer->offset;
+
+ num_records = buffer->size - offset;
+ if (cmd_buffer->device->physical_device->rad_info.chip_class != GFX8 && stride)
+ num_records /= stride;
+
desc[0] = va;
desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) | S_008F04_STRIDE(stride);
- if (cmd_buffer->device->physical_device->rad_info.chip_class <= GFX7 && stride)
- desc[2] = (buffer->size - offset - velems->format_size[i]) / stride + 1;
- else
- desc[2] = buffer->size - offset;
+ desc[2] = num_records;
desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
flush_bits |= RADV_CMD_FLAG_INV_VCACHE;
/* Unlike LLVM, ACO uses SMEM for SSBOs and we have to
* invalidate the scalar cache. */
- if (cmd_buffer->device->physical_device->use_aco)
+ if (cmd_buffer->device->physical_device->use_aco &&
+ cmd_buffer->device->physical_device->rad_info.chip_class >= GFX8)
flush_bits |= RADV_CMD_FLAG_INV_SCACHE;
if (!image_is_coherent)
struct radv_image_view *view = cmd_buffer->state.attachments[idx].iview;
struct radv_sample_locations_state *sample_locs;
VkImageSubresourceRange range;
- range.aspectMask = 0;
+ range.aspectMask = view->aspect_mask;
range.baseMipLevel = view->base_mip;
range.levelCount = 1;
range.baseArrayLayer = view->base_layer;
sample_locs = radv_get_attachment_sample_locations(cmd_buffer, idx,
begin_subpass);
- radv_handle_image_transition(cmd_buffer,
- view->image,
- cmd_buffer->state.attachments[idx].current_layout,
- cmd_buffer->state.attachments[idx].current_in_render_loop,
- att.layout, att.in_render_loop,
- 0, 0, &range, sample_locs);
+ /* Determine if the subpass uses separate depth/stencil layouts. */
+ bool uses_separate_depth_stencil_layouts = false;
+ if ((cmd_buffer->state.attachments[idx].current_layout !=
+ cmd_buffer->state.attachments[idx].current_stencil_layout) ||
+ (att.layout != att.stencil_layout)) {
+ uses_separate_depth_stencil_layouts = true;
+ }
+
+ /* For separate layouts, perform depth and stencil transitions
+ * separately.
+ */
+ if (uses_separate_depth_stencil_layouts &&
+ (range.aspectMask == (VK_IMAGE_ASPECT_DEPTH_BIT |
+ VK_IMAGE_ASPECT_STENCIL_BIT))) {
+ /* Depth-only transitions. */
+ range.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
+ radv_handle_image_transition(cmd_buffer,
+ view->image,
+ cmd_buffer->state.attachments[idx].current_layout,
+ cmd_buffer->state.attachments[idx].current_in_render_loop,
+ att.layout, att.in_render_loop,
+ 0, 0, &range, sample_locs);
+
+ /* Stencil-only transitions. */
+ range.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
+ radv_handle_image_transition(cmd_buffer,
+ view->image,
+ cmd_buffer->state.attachments[idx].current_stencil_layout,
+ cmd_buffer->state.attachments[idx].current_in_render_loop,
+ att.stencil_layout, att.in_render_loop,
+ 0, 0, &range, sample_locs);
+ } else {
+ radv_handle_image_transition(cmd_buffer,
+ view->image,
+ cmd_buffer->state.attachments[idx].current_layout,
+ cmd_buffer->state.attachments[idx].current_in_render_loop,
+ att.layout, att.in_render_loop,
+ 0, 0, &range, sample_locs);
+ }
cmd_buffer->state.attachments[idx].current_layout = att.layout;
+ cmd_buffer->state.attachments[idx].current_stencil_layout = att.stencil_layout;
cmd_buffer->state.attachments[idx].current_in_render_loop = att.in_render_loop;
}
state->attachments[i].current_layout = att->initial_layout;
+ state->attachments[i].current_stencil_layout = att->stencil_initial_layout;
state->attachments[i].sample_location.count = 0;
struct radv_image_view *iview;
for (i = 0; i < pAllocateInfo->commandBufferCount; i++) {
- if (!list_empty(&pool->free_cmd_buffers)) {
+ if (!list_is_empty(&pool->free_cmd_buffers)) {
struct radv_cmd_buffer *cmd_buffer = list_first_entry(&pool->free_cmd_buffers, struct radv_cmd_buffer, pool_link);
list_del(&cmd_buffer->pool_link);
radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, pipeline->cs.cdw);
radeon_emit_array(cmd_buffer->cs, pipeline->cs.buf, pipeline->cs.cdw);
- cmd_buffer->compute_scratch_size_needed =
- MAX2(cmd_buffer->compute_scratch_size_needed,
- pipeline->max_waves * pipeline->scratch_bytes_per_wave);
+ cmd_buffer->compute_scratch_size_per_wave_needed = MAX2(cmd_buffer->compute_scratch_size_per_wave_needed,
+ pipeline->scratch_bytes_per_wave);
+ cmd_buffer->compute_scratch_waves_wanted = MAX2(cmd_buffer->compute_scratch_waves_wanted,
+ pipeline->max_waves);
radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs,
pipeline->shaders[MESA_SHADER_COMPUTE]->bo);
for (uint32_t i = 0; i < commandBufferCount; i++) {
RADV_FROM_HANDLE(radv_cmd_buffer, secondary, pCmdBuffers[i]);
- primary->scratch_size_needed = MAX2(primary->scratch_size_needed,
- secondary->scratch_size_needed);
- primary->compute_scratch_size_needed = MAX2(primary->compute_scratch_size_needed,
- secondary->compute_scratch_size_needed);
+ primary->scratch_size_per_wave_needed = MAX2(primary->scratch_size_per_wave_needed,
+ secondary->scratch_size_per_wave_needed);
+ primary->scratch_waves_wanted = MAX2(primary->scratch_waves_wanted,
+ secondary->scratch_waves_wanted);
+ primary->compute_scratch_size_per_wave_needed = MAX2(primary->compute_scratch_size_per_wave_needed,
+ secondary->compute_scratch_size_per_wave_needed);
+ primary->compute_scratch_waves_wanted = MAX2(primary->compute_scratch_waves_wanted,
+ secondary->compute_scratch_waves_wanted);
if (secondary->esgs_ring_size_needed > primary->esgs_ring_size_needed)
primary->esgs_ring_size_needed = secondary->esgs_ring_size_needed;
continue;
VkImageLayout layout = state->pass->attachments[a].final_layout;
- struct radv_subpass_attachment att = { a, layout };
+ VkImageLayout stencil_layout = state->pass->attachments[a].stencil_final_layout;
+ struct radv_subpass_attachment att = { a, layout, stencil_layout };
radv_handle_subpass_image_transition(cmd_buffer, att, false);
}
}
ASSERTED unsigned cdw_max = radeon_check_space(ws, cs, 25);
+ if (compute_shader->info.wave_size == 32) {
+ assert(cmd_buffer->device->physical_device->rad_info.chip_class >= GFX10);
+ dispatch_initiator |= S_00B800_CS_W32_EN(1);
+ }
+
if (info->indirect) {
uint64_t va = radv_buffer_get_va(info->indirect->bo);
*/
static void radv_initialize_htile(struct radv_cmd_buffer *cmd_buffer,
struct radv_image *image,
- const VkImageSubresourceRange *range,
- uint32_t clear_word)
+ const VkImageSubresourceRange *range)
{
assert(range->baseMipLevel == 0);
assert(range->levelCount == 1 || range->levelCount == VK_REMAINING_ARRAY_LAYERS);
VkImageAspectFlags aspects = VK_IMAGE_ASPECT_DEPTH_BIT;
struct radv_cmd_state *state = &cmd_buffer->state;
+ uint32_t htile_value = vk_format_is_stencil(image->vk_format) ? 0xfffff30f : 0xfffc000f;
VkClearDepthStencilValue value = {};
state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB |
RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
- state->flush_bits |= radv_clear_htile(cmd_buffer, image, range, clear_word);
+ state->flush_bits |= radv_clear_htile(cmd_buffer, image, range, htile_value);
state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
return;
if (src_layout == VK_IMAGE_LAYOUT_UNDEFINED) {
- uint32_t clear_value = vk_format_is_stencil(image->vk_format) ? 0xfffff30f : 0xfffc000f;
-
- if (radv_layout_is_htile_compressed(image, dst_layout, dst_render_loop,
- dst_queue_mask)) {
- clear_value = 0;
- }
-
- radv_initialize_htile(cmd_buffer, image, range, clear_value);
+ radv_initialize_htile(cmd_buffer, image, range);
} else if (!radv_layout_is_htile_compressed(image, src_layout, src_render_loop, src_queue_mask) &&
radv_layout_is_htile_compressed(image, dst_layout, dst_render_loop, dst_queue_mask)) {
- uint32_t clear_value = vk_format_is_stencil(image->vk_format) ? 0xfffff30f : 0xfffc000f;
- radv_initialize_htile(cmd_buffer, image, range, clear_value);
+ radv_initialize_htile(cmd_buffer, image, range);
} else if (radv_layout_is_htile_compressed(image, src_layout, src_render_loop, src_queue_mask) &&
!radv_layout_is_htile_compressed(image, dst_layout, dst_render_loop, dst_queue_mask)) {
cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB |
si_emit_cache_flush(cmd_buffer);
+ ASSERTED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 12);
+
if (!(pipelineStage & ~VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT)) {
radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_IMM) |
va, marker,
cmd_buffer->gfx9_eop_bug_va);
}
+
+ assert(cmd_buffer->cs->cdw <= cdw_max);
}