X-Git-Url: https://git.libre-soc.org/?p=mesa.git;a=blobdiff_plain;f=src%2Famd%2Fvulkan%2Fradv_cmd_buffer.c;h=792462ed9e2764793e0ddc4d3105e7b5e5953e6f;hp=da373d39fdd30c8c6b7ce4cfcb5cbfacdc717dc5;hb=aa675cdc91fe1d317650c279b3470c0081e85527;hpb=0e1724af61d79a241026f2714dc23723f0e3d286 diff --git a/src/amd/vulkan/radv_cmd_buffer.c b/src/amd/vulkan/radv_cmd_buffer.c index da373d39fdd..792462ed9e2 100644 --- a/src/amd/vulkan/radv_cmd_buffer.c +++ b/src/amd/vulkan/radv_cmd_buffer.c @@ -54,7 +54,9 @@ enum { static void radv_handle_image_transition(struct radv_cmd_buffer *cmd_buffer, struct radv_image *image, VkImageLayout src_layout, + bool src_render_loop, VkImageLayout dst_layout, + bool dst_render_loop, uint32_t src_family, uint32_t dst_family, const VkImageSubresourceRange *range, @@ -90,6 +92,13 @@ const struct radv_dynamic_state default_dynamic_state = { .front = 0u, .back = 0u, }, + .line_stipple = { + .factor = 0u, + .pattern = 0u, + }, + .cull_mode = 0u, + .front_face = 0u, + .primitive_topology = 0u, }; static void @@ -100,15 +109,15 @@ radv_bind_dynamic_state(struct radv_cmd_buffer *cmd_buffer, uint32_t copy_mask = src->mask; uint32_t dest_mask = 0; - /* Make sure to copy the number of viewports/scissors because they can - * only be specified at pipeline creation time. - */ - dest->viewport.count = src->viewport.count; - dest->scissor.count = src->scissor.count; dest->discard_rectangle.count = src->discard_rectangle.count; dest->sample_location.count = src->sample_location.count; if (copy_mask & RADV_DYNAMIC_VIEWPORT) { + if (dest->viewport.count != src->viewport.count) { + dest->viewport.count = src->viewport.count; + dest_mask |= RADV_DYNAMIC_VIEWPORT; + } + if (memcmp(&dest->viewport.viewports, &src->viewport.viewports, src->viewport.count * sizeof(VkViewport))) { typed_memcpy(dest->viewport.viewports, @@ -119,6 +128,11 @@ radv_bind_dynamic_state(struct radv_cmd_buffer *cmd_buffer, } if (copy_mask & RADV_DYNAMIC_SCISSOR) { + if (dest->scissor.count != src->scissor.count) { + dest->scissor.count = src->scissor.count; + dest_mask |= RADV_DYNAMIC_SCISSOR; + } + if (memcmp(&dest->scissor.scissors, &src->scissor.scissors, src->scissor.count * sizeof(VkRect2D))) { typed_memcpy(dest->scissor.scissors, @@ -210,6 +224,78 @@ radv_bind_dynamic_state(struct radv_cmd_buffer *cmd_buffer, } } + if (copy_mask & RADV_DYNAMIC_LINE_STIPPLE) { + if (memcmp(&dest->line_stipple, &src->line_stipple, + sizeof(src->line_stipple))) { + dest->line_stipple = src->line_stipple; + dest_mask |= RADV_DYNAMIC_LINE_STIPPLE; + } + } + + if (copy_mask & RADV_DYNAMIC_CULL_MODE) { + if (dest->cull_mode != src->cull_mode) { + dest->cull_mode = src->cull_mode; + dest_mask |= RADV_DYNAMIC_CULL_MODE; + } + } + + if (copy_mask & RADV_DYNAMIC_FRONT_FACE) { + if (dest->front_face != src->front_face) { + dest->front_face = src->front_face; + dest_mask |= RADV_DYNAMIC_FRONT_FACE; + } + } + + if (copy_mask & RADV_DYNAMIC_PRIMITIVE_TOPOLOGY) { + if (dest->primitive_topology != src->primitive_topology) { + dest->primitive_topology = src->primitive_topology; + dest_mask |= RADV_DYNAMIC_PRIMITIVE_TOPOLOGY; + } + } + + if (copy_mask & RADV_DYNAMIC_DEPTH_TEST_ENABLE) { + if (dest->depth_test_enable != src->depth_test_enable) { + dest->depth_test_enable = src->depth_test_enable; + dest_mask |= RADV_DYNAMIC_DEPTH_TEST_ENABLE; + } + } + + if (copy_mask & RADV_DYNAMIC_DEPTH_WRITE_ENABLE) { + if (dest->depth_write_enable != src->depth_write_enable) { + dest->depth_write_enable = src->depth_write_enable; + dest_mask |= RADV_DYNAMIC_DEPTH_WRITE_ENABLE; + } + } + + if (copy_mask & RADV_DYNAMIC_DEPTH_COMPARE_OP) { + if (dest->depth_compare_op != src->depth_compare_op) { + dest->depth_compare_op = src->depth_compare_op; + dest_mask |= RADV_DYNAMIC_DEPTH_COMPARE_OP; + } + } + + if (copy_mask & RADV_DYNAMIC_DEPTH_BOUNDS_TEST_ENABLE) { + if (dest->depth_bounds_test_enable != src->depth_bounds_test_enable) { + dest->depth_bounds_test_enable = src->depth_bounds_test_enable; + dest_mask |= RADV_DYNAMIC_DEPTH_BOUNDS_TEST_ENABLE; + } + } + + if (copy_mask & RADV_DYNAMIC_STENCIL_TEST_ENABLE) { + if (dest->stencil_test_enable != src->stencil_test_enable) { + dest->stencil_test_enable = src->stencil_test_enable; + dest_mask |= RADV_DYNAMIC_STENCIL_TEST_ENABLE; + } + } + + if (copy_mask & RADV_DYNAMIC_STENCIL_OP) { + if (memcmp(&dest->stencil_op, &src->stencil_op, + sizeof(src->stencil_op))) { + dest->stencil_op = src->stencil_op; + dest_mask |= RADV_DYNAMIC_STENCIL_OP; + } + } + cmd_buffer->state.dirty |= dest_mask; } @@ -220,10 +306,11 @@ radv_bind_streamout_state(struct radv_cmd_buffer *cmd_buffer, struct radv_streamout_state *so = &cmd_buffer->state.streamout; struct radv_shader_info *info; - if (!pipeline->streamout_shader) + if (!pipeline->streamout_shader || + cmd_buffer->device->physical_device->use_ngg_streamout) return; - info = &pipeline->streamout_shader->info.info; + info = &pipeline->streamout_shader->info; for (int i = 0; i < MAX_SO_BUFFERS; i++) so->stride_in_dw[i] = info->so.strides[i]; @@ -249,6 +336,31 @@ enum ring_type radv_queue_family_to_ring(int f) { } } +static void +radv_destroy_cmd_buffer(struct radv_cmd_buffer *cmd_buffer) +{ + list_del(&cmd_buffer->pool_link); + + list_for_each_entry_safe(struct radv_cmd_buffer_upload, up, + &cmd_buffer->upload.list, list) { + cmd_buffer->device->ws->buffer_destroy(up->upload_bo); + list_del(&up->list); + free(up); + } + + if (cmd_buffer->upload.upload_bo) + cmd_buffer->device->ws->buffer_destroy(cmd_buffer->upload.upload_bo); + + if (cmd_buffer->cs) + cmd_buffer->device->ws->cs_destroy(cmd_buffer->cs); + + for (unsigned i = 0; i < MAX_BIND_POINTS; i++) + free(cmd_buffer->descriptors[i].push_set.set.mapped_ptr); + + vk_object_base_finish(&cmd_buffer->base); + vk_free(&cmd_buffer->pool->alloc, cmd_buffer); +} + static VkResult radv_create_cmd_buffer( struct radv_device * device, struct radv_cmd_pool * pool, @@ -262,28 +374,21 @@ static VkResult radv_create_cmd_buffer( if (cmd_buffer == NULL) return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY); - cmd_buffer->_loader_data.loaderMagic = ICD_LOADER_MAGIC; + vk_object_base_init(&device->vk, &cmd_buffer->base, + VK_OBJECT_TYPE_COMMAND_BUFFER); + cmd_buffer->device = device; cmd_buffer->pool = pool; cmd_buffer->level = level; - if (pool) { - list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers); - cmd_buffer->queue_family_index = pool->queue_family_index; - - } else { - /* Init the pool_link so we can safely call list_del when we destroy - * the command buffer - */ - list_inithead(&cmd_buffer->pool_link); - cmd_buffer->queue_family_index = RADV_QUEUE_GENERAL; - } + list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers); + cmd_buffer->queue_family_index = pool->queue_family_index; ring = radv_queue_family_to_ring(cmd_buffer->queue_family_index); cmd_buffer->cs = device->ws->cs_create(device->ws, ring); if (!cmd_buffer->cs) { - vk_free(&cmd_buffer->pool->alloc, cmd_buffer); + radv_destroy_cmd_buffer(cmd_buffer); return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY); } @@ -294,28 +399,6 @@ static VkResult radv_create_cmd_buffer( return VK_SUCCESS; } -static void -radv_cmd_buffer_destroy(struct radv_cmd_buffer *cmd_buffer) -{ - list_del(&cmd_buffer->pool_link); - - list_for_each_entry_safe(struct radv_cmd_buffer_upload, up, - &cmd_buffer->upload.list, list) { - cmd_buffer->device->ws->buffer_destroy(up->upload_bo); - list_del(&up->list); - free(up); - } - - if (cmd_buffer->upload.upload_bo) - cmd_buffer->device->ws->buffer_destroy(cmd_buffer->upload.upload_bo); - cmd_buffer->device->ws->cs_destroy(cmd_buffer->cs); - - for (unsigned i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; i++) - free(cmd_buffer->descriptors[i].push_set.set.mapped_ptr); - - vk_free(&cmd_buffer->pool->alloc, cmd_buffer); -} - static VkResult radv_reset_cmd_buffer(struct radv_cmd_buffer *cmd_buffer) { @@ -329,11 +412,15 @@ radv_reset_cmd_buffer(struct radv_cmd_buffer *cmd_buffer) } cmd_buffer->push_constant_stages = 0; - cmd_buffer->scratch_size_needed = 0; - cmd_buffer->compute_scratch_size_needed = 0; + cmd_buffer->scratch_size_per_wave_needed = 0; + cmd_buffer->scratch_waves_wanted = 0; + cmd_buffer->compute_scratch_size_per_wave_needed = 0; + cmd_buffer->compute_scratch_waves_wanted = 0; cmd_buffer->esgs_ring_size_needed = 0; cmd_buffer->gsvs_ring_size_needed = 0; cmd_buffer->tess_rings_needed = false; + cmd_buffer->gds_needed = false; + cmd_buffer->gds_oa_needed = false; cmd_buffer->sample_positions_needed = false; if (cmd_buffer->upload.upload_bo) @@ -345,7 +432,7 @@ radv_reset_cmd_buffer(struct radv_cmd_buffer *cmd_buffer) memset(cmd_buffer->vertex_bindings, 0, sizeof(cmd_buffer->vertex_bindings)); - for (unsigned i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; i++) { + for (unsigned i = 0; i < MAX_BIND_POINTS; i++) { cmd_buffer->descriptors[i].dirty = 0; cmd_buffer->descriptors[i].valid = 0; cmd_buffer->descriptors[i].push_dirty = false; @@ -396,7 +483,8 @@ radv_cmd_buffer_resize_upload_buf(struct radv_cmd_buffer *cmd_buffer, RADEON_DOMAIN_GTT, RADEON_FLAG_CPU_ACCESS| RADEON_FLAG_NO_INTERPROCESS_SHARING | - RADEON_FLAG_32BIT, + RADEON_FLAG_32BIT | + RADEON_FLAG_GTT_WC, RADV_BO_PRIORITY_UPLOAD_BUFFER); if (!bo) { @@ -512,6 +600,11 @@ static void radv_cmd_buffer_after_draw(struct radv_cmd_buffer *cmd_buffer, enum radv_cmd_flush_bits flags) { + if (unlikely(cmd_buffer->device->thread_trace_bo)) { + radeon_emit(cmd_buffer->cs, PKT3(PKT3_EVENT_WRITE, 0, 0)); + radeon_emit(cmd_buffer->cs, EVENT_TYPE(V_028A90_THREAD_TRACE_MARKER) | EVENT_INDEX(0)); + } + if (cmd_buffer->device->instance->debug_flags & RADV_DEBUG_SYNC_SHADERS) { assert(flags & (RADV_CMD_FLAG_PS_PARTIAL_FLUSH | RADV_CMD_FLAG_CS_PARTIAL_FLUSH)); @@ -533,14 +626,17 @@ radv_cmd_buffer_after_draw(struct radv_cmd_buffer *cmd_buffer, static void radv_save_pipeline(struct radv_cmd_buffer *cmd_buffer, - struct radv_pipeline *pipeline, enum ring_type ring) + struct radv_pipeline *pipeline) { struct radv_device *device = cmd_buffer->device; + enum ring_type ring; uint32_t data[2]; uint64_t va; va = radv_buffer_get_va(device->trace_bo); + ring = radv_queue_family_to_ring(cmd_buffer->queue_family_index); + switch (ring) { case RING_GFX: va += 8; @@ -552,8 +648,9 @@ radv_save_pipeline(struct radv_cmd_buffer *cmd_buffer, assert(!"invalid ring type"); } - data[0] = (uintptr_t)pipeline; - data[1] = (uintptr_t)pipeline >> 32; + uint64_t pipeline_address = (uintptr_t)pipeline; + data[0] = pipeline_address; + data[1] = pipeline_address >> 32; radv_emit_write_data_packet(cmd_buffer, va, 2, data); } @@ -675,8 +772,8 @@ radv_convert_user_sample_locs(struct radv_sample_locations_state *state, float shifted_pos_x = user_locs[i].x - 0.5; float shifted_pos_y = user_locs[i].y - 0.5; - int32_t scaled_pos_x = floor(shifted_pos_x * 16); - int32_t scaled_pos_y = floor(shifted_pos_y * 16); + int32_t scaled_pos_x = floorf(shifted_pos_x * 16); + int32_t scaled_pos_y = floorf(shifted_pos_y * 16); sample_locs[i].x = CLAMP(scaled_pos_x, -8, 7); sample_locs[i].y = CLAMP(scaled_pos_y, -8, 7); @@ -753,8 +850,6 @@ radv_compute_centroid_priority(struct radv_cmd_buffer *cmd_buffer, static void radv_emit_sample_locations(struct radv_cmd_buffer *cmd_buffer) { - struct radv_pipeline *pipeline = cmd_buffer->state.pipeline; - struct radv_multisample_state *ms = &pipeline->graphics.ms; struct radv_sample_locations_state *sample_location = &cmd_buffer->state.dynamic.sample_location; uint32_t num_samples = (uint32_t)sample_location->per_pixel; @@ -785,10 +880,12 @@ radv_emit_sample_locations(struct radv_cmd_buffer *cmd_buffer) num_samples); /* Compute the maximum sample distance from the specified locations. */ - for (uint32_t i = 0; i < num_samples; i++) { - VkOffset2D offset = sample_locs[0][i]; - max_sample_dist = MAX2(max_sample_dist, - MAX2(abs(offset.x), abs(offset.y))); + for (unsigned i = 0; i < 4; ++i) { + for (uint32_t j = 0; j < num_samples; j++) { + VkOffset2D offset = sample_locs[i][j]; + max_sample_dist = MAX2(max_sample_dist, + MAX2(abs(offset.x), abs(offset.y))); + } } /* Emit the specified user sample locations. */ @@ -815,13 +912,9 @@ radv_emit_sample_locations(struct radv_cmd_buffer *cmd_buffer) } /* Emit the maximum sample distance and the centroid priority. */ - uint32_t pa_sc_aa_config = ms->pa_sc_aa_config; - - pa_sc_aa_config &= C_028BE0_MAX_SAMPLE_DIST; - pa_sc_aa_config |= S_028BE0_MAX_SAMPLE_DIST(max_sample_dist); - - radeon_set_context_reg_seq(cs, R_028BE0_PA_SC_AA_CONFIG, 1); - radeon_emit(cs, pa_sc_aa_config); + radeon_set_context_reg_rmw(cs, R_028BE0_PA_SC_AA_CONFIG, + S_028BE0_MAX_SAMPLE_DIST(max_sample_dist), + ~C_028BE0_MAX_SAMPLE_DIST); radeon_set_context_reg_seq(cs, R_028BD4_PA_SC_CENTROID_PRIORITY_0, 2); radeon_emit(cs, centroid_priority); @@ -858,29 +951,16 @@ radv_update_multisample_state(struct radv_cmd_buffer *cmd_buffer, struct radv_pipeline *pipeline) { int num_samples = pipeline->graphics.ms.num_samples; - struct radv_multisample_state *ms = &pipeline->graphics.ms; struct radv_pipeline *old_pipeline = cmd_buffer->state.emitted_pipeline; - if (pipeline->shaders[MESA_SHADER_FRAGMENT]->info.info.ps.needs_sample_positions) + if (pipeline->shaders[MESA_SHADER_FRAGMENT]->info.ps.needs_sample_positions) cmd_buffer->sample_positions_needed = true; if (old_pipeline && num_samples == old_pipeline->graphics.ms.num_samples) return; - radeon_set_context_reg_seq(cmd_buffer->cs, R_028BDC_PA_SC_LINE_CNTL, 2); - radeon_emit(cmd_buffer->cs, ms->pa_sc_line_cntl); - radeon_emit(cmd_buffer->cs, ms->pa_sc_aa_config); - - radeon_set_context_reg(cmd_buffer->cs, R_028A48_PA_SC_MODE_CNTL_0, ms->pa_sc_mode_cntl_0); - radv_emit_default_sample_locations(cmd_buffer->cs, num_samples); - /* GFX9: Flush DFSM when the AA mode changes. */ - if (cmd_buffer->device->dfsm_allowed) { - radeon_emit(cmd_buffer->cs, PKT3(PKT3_EVENT_WRITE, 0, 0)); - radeon_emit(cmd_buffer->cs, EVENT_TYPE(V_028A90_FLUSH_DFSM) | EVENT_INDEX(0)); - } - cmd_buffer->state.context_roll_without_scissor_emitted = true; } @@ -986,26 +1066,29 @@ radv_emit_prefetch_L2(struct radv_cmd_buffer *cmd_buffer, static void radv_emit_rbplus_state(struct radv_cmd_buffer *cmd_buffer) { - if (!cmd_buffer->device->physical_device->rbplus_allowed) + if (!cmd_buffer->device->physical_device->rad_info.rbplus_allowed) return; struct radv_pipeline *pipeline = cmd_buffer->state.pipeline; - struct radv_framebuffer *framebuffer = cmd_buffer->state.framebuffer; const struct radv_subpass *subpass = cmd_buffer->state.subpass; unsigned sx_ps_downconvert = 0; unsigned sx_blend_opt_epsilon = 0; unsigned sx_blend_opt_control = 0; + if (!cmd_buffer->state.attachments || !subpass) + return; + for (unsigned i = 0; i < subpass->color_count; ++i) { if (subpass->color_attachments[i].attachment == VK_ATTACHMENT_UNUSED) { - sx_blend_opt_control |= S_02875C_MRT0_COLOR_OPT_DISABLE(1) << (i * 4); - sx_blend_opt_control |= S_02875C_MRT0_ALPHA_OPT_DISABLE(1) << (i * 4); + /* We don't set the DISABLE bits, because the HW can't have holes, + * so the SPI color format is set to 32-bit 1-component. */ + sx_ps_downconvert |= V_028754_SX_RT_EXPORT_32_R << (i * 4); continue; } int idx = subpass->color_attachments[i].attachment; - struct radv_color_buffer_info *cb = &framebuffer->attachments[idx].cb; + struct radv_color_buffer_info *cb = &cmd_buffer->state.attachments[idx].cb; unsigned format = G_028C70_FORMAT(cb->cb_color_info); unsigned swap = G_028C70_COMP_SWAP(cb->cb_color_info); @@ -1116,17 +1199,52 @@ radv_emit_rbplus_state(struct radv_cmd_buffer *cmd_buffer) } } - for (unsigned i = subpass->color_count; i < 8; ++i) { - sx_blend_opt_control |= S_02875C_MRT0_COLOR_OPT_DISABLE(1) << (i * 4); - sx_blend_opt_control |= S_02875C_MRT0_ALPHA_OPT_DISABLE(1) << (i * 4); - } - /* TODO: avoid redundantly setting context registers */ + /* Do not set the DISABLE bits for the unused attachments, as that + * breaks dual source blending in SkQP and does not seem to improve + * performance. */ + + if (sx_ps_downconvert == cmd_buffer->state.last_sx_ps_downconvert && + sx_blend_opt_epsilon == cmd_buffer->state.last_sx_blend_opt_epsilon && + sx_blend_opt_control == cmd_buffer->state.last_sx_blend_opt_control) + return; + radeon_set_context_reg_seq(cmd_buffer->cs, R_028754_SX_PS_DOWNCONVERT, 3); radeon_emit(cmd_buffer->cs, sx_ps_downconvert); radeon_emit(cmd_buffer->cs, sx_blend_opt_epsilon); radeon_emit(cmd_buffer->cs, sx_blend_opt_control); cmd_buffer->state.context_roll_without_scissor_emitted = true; + + cmd_buffer->state.last_sx_ps_downconvert = sx_ps_downconvert; + cmd_buffer->state.last_sx_blend_opt_epsilon = sx_blend_opt_epsilon; + cmd_buffer->state.last_sx_blend_opt_control = sx_blend_opt_control; +} + +static void +radv_emit_batch_break_on_new_ps(struct radv_cmd_buffer *cmd_buffer) +{ + if (!cmd_buffer->device->pbb_allowed) + return; + + struct radv_binning_settings settings = + radv_get_binning_settings(cmd_buffer->device->physical_device); + bool break_for_new_ps = + (!cmd_buffer->state.emitted_pipeline || + cmd_buffer->state.emitted_pipeline->shaders[MESA_SHADER_FRAGMENT] != + cmd_buffer->state.pipeline->shaders[MESA_SHADER_FRAGMENT]) && + (settings.context_states_per_bin > 1 || + settings.persistent_states_per_bin > 1); + bool break_for_new_cb_target_mask = + (!cmd_buffer->state.emitted_pipeline || + cmd_buffer->state.emitted_pipeline->graphics.cb_target_mask != + cmd_buffer->state.pipeline->graphics.cb_target_mask) && + settings.context_states_per_bin > 1; + + if (!break_for_new_ps && !break_for_new_cb_target_mask) + return; + + radeon_emit(cmd_buffer->cs, PKT3(PKT3_EVENT_WRITE, 0, 0)); + radeon_emit(cmd_buffer->cs, EVENT_TYPE(V_028A90_BREAK_BATCH) | EVENT_INDEX(0)); } static void @@ -1140,15 +1258,38 @@ radv_emit_graphics_pipeline(struct radv_cmd_buffer *cmd_buffer) radv_update_multisample_state(cmd_buffer, pipeline); radv_update_binning_state(cmd_buffer, pipeline); - cmd_buffer->scratch_size_needed = - MAX2(cmd_buffer->scratch_size_needed, - pipeline->max_waves * pipeline->scratch_bytes_per_wave); + cmd_buffer->scratch_size_per_wave_needed = MAX2(cmd_buffer->scratch_size_per_wave_needed, + pipeline->scratch_bytes_per_wave); + cmd_buffer->scratch_waves_wanted = MAX2(cmd_buffer->scratch_waves_wanted, + pipeline->max_waves); if (!cmd_buffer->state.emitted_pipeline || cmd_buffer->state.emitted_pipeline->graphics.can_use_guardband != pipeline->graphics.can_use_guardband) cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_SCISSOR; + if (!cmd_buffer->state.emitted_pipeline || + cmd_buffer->state.emitted_pipeline->graphics.pa_su_sc_mode_cntl != + pipeline->graphics.pa_su_sc_mode_cntl) + cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_CULL_MODE | + RADV_CMD_DIRTY_DYNAMIC_FRONT_FACE; + + if (!cmd_buffer->state.emitted_pipeline) + cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_PRIMITIVE_TOPOLOGY; + + if (!cmd_buffer->state.emitted_pipeline || + cmd_buffer->state.emitted_pipeline->graphics.db_depth_control != + pipeline->graphics.db_depth_control) + cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_DEPTH_TEST_ENABLE | + RADV_CMD_DIRTY_DYNAMIC_DEPTH_WRITE_ENABLE | + RADV_CMD_DIRTY_DYNAMIC_DEPTH_COMPARE_OP | + RADV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS_TEST_ENABLE | + RADV_CMD_DIRTY_DYNAMIC_STENCIL_TEST_ENABLE | + RADV_CMD_DIRTY_DYNAMIC_STENCIL_OP; + + if (!cmd_buffer->state.emitted_pipeline) + cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_STENCIL_OP; + radeon_emit_array(cmd_buffer->cs, pipeline->cs.buf, pipeline->cs.cdw); if (!cmd_buffer->state.emitted_pipeline || @@ -1160,6 +1301,8 @@ radv_emit_graphics_pipeline(struct radv_cmd_buffer *cmd_buffer) cmd_buffer->state.context_roll_without_scissor_emitted = true; } + radv_emit_batch_break_on_new_ps(cmd_buffer); + for (unsigned i = 0; i < MESA_SHADER_COMPUTE; i++) { if (!pipeline->shaders[i]) continue; @@ -1173,7 +1316,7 @@ radv_emit_graphics_pipeline(struct radv_cmd_buffer *cmd_buffer) pipeline->gs_copy_shader->bo); if (unlikely(cmd_buffer->device->trace_bo)) - radv_save_pipeline(cmd_buffer, pipeline, RING_GFX); + radv_save_pipeline(cmd_buffer, pipeline); cmd_buffer->state.emitted_pipeline = pipeline; @@ -1222,7 +1365,7 @@ radv_emit_line_width(struct radv_cmd_buffer *cmd_buffer) unsigned width = cmd_buffer->state.dynamic.line_width * 8; radeon_set_context_reg(cmd_buffer->cs, R_028A08_PA_SU_LINE_CNTL, - S_028A08_WIDTH(CLAMP(width, 0, 0xFFF))); + S_028A08_WIDTH(CLAMP(width, 0, 0xFFFF))); } static void @@ -1281,25 +1424,147 @@ radv_emit_depth_bias(struct radv_cmd_buffer *cmd_buffer) radeon_emit(cmd_buffer->cs, bias); /* BACK OFFSET */ } +static void +radv_emit_line_stipple(struct radv_cmd_buffer *cmd_buffer) +{ + struct radv_dynamic_state *d = &cmd_buffer->state.dynamic; + uint32_t auto_reset_cntl = 1; + + if (d->primitive_topology == V_008958_DI_PT_LINESTRIP) + auto_reset_cntl = 2; + + radeon_set_context_reg(cmd_buffer->cs, R_028A0C_PA_SC_LINE_STIPPLE, + S_028A0C_LINE_PATTERN(d->line_stipple.pattern) | + S_028A0C_REPEAT_COUNT(d->line_stipple.factor - 1) | + S_028A0C_AUTO_RESET_CNTL(auto_reset_cntl)); +} + +static void +radv_emit_culling(struct radv_cmd_buffer *cmd_buffer, uint32_t states) +{ + unsigned pa_su_sc_mode_cntl = cmd_buffer->state.pipeline->graphics.pa_su_sc_mode_cntl; + struct radv_dynamic_state *d = &cmd_buffer->state.dynamic; + + if (states & RADV_CMD_DIRTY_DYNAMIC_CULL_MODE) { + pa_su_sc_mode_cntl &= C_028814_CULL_FRONT; + pa_su_sc_mode_cntl |= S_028814_CULL_FRONT(!!(d->cull_mode & VK_CULL_MODE_FRONT_BIT)); + + pa_su_sc_mode_cntl &= C_028814_CULL_BACK; + pa_su_sc_mode_cntl |= S_028814_CULL_BACK(!!(d->cull_mode & VK_CULL_MODE_BACK_BIT)); + } + + if (states & RADV_CMD_DIRTY_DYNAMIC_FRONT_FACE) { + pa_su_sc_mode_cntl &= C_028814_FACE; + pa_su_sc_mode_cntl |= S_028814_FACE(d->front_face); + } + + radeon_set_context_reg(cmd_buffer->cs, R_028814_PA_SU_SC_MODE_CNTL, + pa_su_sc_mode_cntl); +} + +static void +radv_emit_primitive_topology(struct radv_cmd_buffer *cmd_buffer) +{ + struct radv_dynamic_state *d = &cmd_buffer->state.dynamic; + + if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX7) { + radeon_set_uconfig_reg_idx(cmd_buffer->device->physical_device, + cmd_buffer->cs, + R_030908_VGT_PRIMITIVE_TYPE, 1, + d->primitive_topology); + } else { + radeon_set_config_reg(cmd_buffer->cs, + R_008958_VGT_PRIMITIVE_TYPE, + d->primitive_topology); + } +} + +static void +radv_emit_depth_control(struct radv_cmd_buffer *cmd_buffer, uint32_t states) +{ + unsigned db_depth_control = cmd_buffer->state.pipeline->graphics.db_depth_control; + struct radv_dynamic_state *d = &cmd_buffer->state.dynamic; + + if (states & RADV_CMD_DIRTY_DYNAMIC_DEPTH_TEST_ENABLE) { + db_depth_control &= C_028800_Z_ENABLE; + db_depth_control |= S_028800_Z_ENABLE(d->depth_test_enable ? 1 : 0); + } + + if (states & RADV_CMD_DIRTY_DYNAMIC_DEPTH_WRITE_ENABLE) { + db_depth_control &= C_028800_Z_WRITE_ENABLE; + db_depth_control |= S_028800_Z_WRITE_ENABLE(d->depth_write_enable ? 1 : 0); + } + + if (states & RADV_CMD_DIRTY_DYNAMIC_DEPTH_COMPARE_OP) { + db_depth_control &= C_028800_ZFUNC; + db_depth_control |= S_028800_ZFUNC(d->depth_compare_op); + } + + if (states & RADV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS_TEST_ENABLE) { + db_depth_control &= C_028800_DEPTH_BOUNDS_ENABLE; + db_depth_control |= S_028800_DEPTH_BOUNDS_ENABLE(d->depth_bounds_test_enable ? 1 : 0); + } + + if (states & RADV_CMD_DIRTY_DYNAMIC_STENCIL_TEST_ENABLE) { + db_depth_control &= C_028800_STENCIL_ENABLE; + db_depth_control |= S_028800_STENCIL_ENABLE(d->stencil_test_enable ? 1 : 0); + + db_depth_control &= C_028800_BACKFACE_ENABLE; + db_depth_control |= S_028800_BACKFACE_ENABLE(d->stencil_test_enable ? 1 : 0); + } + + if (states & RADV_CMD_DIRTY_DYNAMIC_STENCIL_OP) { + db_depth_control &= C_028800_STENCILFUNC; + db_depth_control |= S_028800_STENCILFUNC(d->stencil_op.front.compare_op); + + db_depth_control &= C_028800_STENCILFUNC_BF; + db_depth_control |= S_028800_STENCILFUNC_BF(d->stencil_op.back.compare_op); + } + + radeon_set_context_reg(cmd_buffer->cs, R_028800_DB_DEPTH_CONTROL, + db_depth_control); +} + +static void +radv_emit_stencil_control(struct radv_cmd_buffer *cmd_buffer) +{ + struct radv_dynamic_state *d = &cmd_buffer->state.dynamic; + + radeon_set_context_reg(cmd_buffer->cs, R_02842C_DB_STENCIL_CONTROL, + S_02842C_STENCILFAIL(si_translate_stencil_op(d->stencil_op.front.fail_op)) | + S_02842C_STENCILZPASS(si_translate_stencil_op(d->stencil_op.front.pass_op)) | + S_02842C_STENCILZFAIL(si_translate_stencil_op(d->stencil_op.front.depth_fail_op)) | + S_02842C_STENCILFAIL_BF(si_translate_stencil_op(d->stencil_op.back.fail_op)) | + S_02842C_STENCILZPASS_BF(si_translate_stencil_op(d->stencil_op.back.pass_op)) | + S_02842C_STENCILZFAIL_BF(si_translate_stencil_op(d->stencil_op.back.depth_fail_op))); +} + static void radv_emit_fb_color_state(struct radv_cmd_buffer *cmd_buffer, int index, - struct radv_attachment_info *att, + struct radv_color_buffer_info *cb, struct radv_image_view *iview, - VkImageLayout layout) + VkImageLayout layout, + bool in_render_loop) { bool is_vi = cmd_buffer->device->physical_device->rad_info.chip_class >= GFX8; - struct radv_color_buffer_info *cb = &att->cb; uint32_t cb_color_info = cb->cb_color_info; struct radv_image *image = iview->image; - if (!radv_layout_dcc_compressed(image, layout, + if (!radv_layout_dcc_compressed(cmd_buffer->device, image, layout, in_render_loop, radv_image_queue_family_mask(image, cmd_buffer->queue_family_index, cmd_buffer->queue_family_index))) { cb_color_info &= C_028C70_DCC_ENABLE; } + if (!radv_layout_can_fast_clear(image, layout, in_render_loop, + radv_image_queue_family_mask(image, + cmd_buffer->queue_family_index, + cmd_buffer->queue_family_index))) { + cb_color_info &= C_028C70_COMPRESSION; + } + if (radv_image_is_tc_compat_cmask(image) && (radv_is_fmask_decompress_pipeline(cmd_buffer) || radv_is_dcc_decompress_pipeline(cmd_buffer))) { @@ -1309,6 +1574,19 @@ radv_emit_fb_color_state(struct radv_cmd_buffer *cmd_buffer, cb_color_info &= C_028C70_FMASK_COMPRESS_1FRAG_ONLY; } + if (radv_image_has_fmask(image) && + (radv_is_fmask_decompress_pipeline(cmd_buffer) || + radv_is_hw_resolve_pipeline(cmd_buffer))) { + /* Make sure FMASK is enabled if it has been cleared because: + * + * 1) it's required for FMASK_DECOMPRESS operations to avoid + * GPU hangs + * 2) it's necessary for CB_RESOLVE which can read compressed + * FMASK data anyways. + */ + cb_color_info |= S_028C70_COMPRESSION(1); + } + if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX10) { radeon_set_context_reg_seq(cmd_buffer->cs, R_028C60_CB_COLOR0_BASE + index * 0x3c, 11); radeon_emit(cmd_buffer->cs, cb->cb_color_base); @@ -1394,20 +1672,22 @@ radv_emit_fb_color_state(struct radv_cmd_buffer *cmd_buffer, static void radv_update_zrange_precision(struct radv_cmd_buffer *cmd_buffer, struct radv_ds_buffer_info *ds, - struct radv_image *image, VkImageLayout layout, - bool requires_cond_exec) + const struct radv_image_view *iview, + VkImageLayout layout, + bool in_render_loop, bool requires_cond_exec) { + const struct radv_image *image = iview->image; uint32_t db_z_info = ds->db_z_info; uint32_t db_z_info_reg; - if (!cmd_buffer->device->physical_device->has_tc_compat_zrange_bug || + if (!cmd_buffer->device->physical_device->rad_info.has_tc_compat_zrange_bug || !radv_image_is_tc_compat_htile(image)) return; - if (!radv_layout_has_htile(image, layout, - radv_image_queue_family_mask(image, - cmd_buffer->queue_family_index, - cmd_buffer->queue_family_index))) { + if (!radv_layout_is_htile_compressed(image, layout, in_render_loop, + radv_image_queue_family_mask(image, + cmd_buffer->queue_family_index, + cmd_buffer->queue_family_index))) { db_z_info &= C_028040_TILE_SURFACE_ENABLE; } @@ -1424,8 +1704,7 @@ radv_update_zrange_precision(struct radv_cmd_buffer *cmd_buffer, * SET_CONTEXT_REG packet. */ if (requires_cond_exec) { - uint64_t va = radv_buffer_get_va(image->bo); - va += image->offset + image->tc_compat_zrange_offset; + uint64_t va = radv_get_tc_compat_zrange_va(image, iview->base_mip); radeon_emit(cmd_buffer->cs, PKT3(PKT3_COND_EXEC, 3, 0)); radeon_emit(cmd_buffer->cs, va); @@ -1440,16 +1719,18 @@ radv_update_zrange_precision(struct radv_cmd_buffer *cmd_buffer, static void radv_emit_fb_ds_state(struct radv_cmd_buffer *cmd_buffer, struct radv_ds_buffer_info *ds, - struct radv_image *image, - VkImageLayout layout) + struct radv_image_view *iview, + VkImageLayout layout, + bool in_render_loop) { + const struct radv_image *image = iview->image; uint32_t db_z_info = ds->db_z_info; uint32_t db_stencil_info = ds->db_stencil_info; - if (!radv_layout_has_htile(image, layout, - radv_image_queue_family_mask(image, - cmd_buffer->queue_family_index, - cmd_buffer->queue_family_index))) { + if (!radv_layout_is_htile_compressed(image, layout, in_render_loop, + radv_image_queue_family_mask(image, + cmd_buffer->queue_family_index, + cmd_buffer->queue_family_index))) { db_z_info &= C_028040_TILE_SURFACE_ENABLE; db_stencil_info |= S_028044_TILE_STENCIL_DISABLE(1); } @@ -1458,6 +1739,20 @@ radv_emit_fb_ds_state(struct radv_cmd_buffer *cmd_buffer, radeon_set_context_reg(cmd_buffer->cs, R_028ABC_DB_HTILE_SURFACE, ds->db_htile_surface); if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX10) { + /* Enable HTILE caching in L2 for small chips. */ + unsigned meta_write_policy, meta_read_policy; + /* TODO: investigate whether LRU improves performance on other chips too */ + if (cmd_buffer->device->physical_device->rad_info.num_render_backends <= 4) { + meta_write_policy = V_02807C_CACHE_LRU_WR; /* cache writes */ + meta_read_policy = V_02807C_CACHE_LRU_RD; /* cache reads */ + } else { + meta_write_policy = V_02807C_CACHE_STREAM_WR; /* write combine */ + meta_read_policy = V_02807C_CACHE_NOA_RD; /* don't cache reads */ + } + + bool zs_big_page = cmd_buffer->device->physical_device->rad_info.chip_class >= GFX10_3 && + (image->alignment % (64 * 1024) == 0); + radeon_set_context_reg(cmd_buffer->cs, R_028014_DB_HTILE_DATA_BASE, ds->db_htile_data_base); radeon_set_context_reg(cmd_buffer->cs, R_02801C_DB_DEPTH_SIZE_XY, ds->db_depth_size); @@ -1470,12 +1765,22 @@ radv_emit_fb_ds_state(struct radv_cmd_buffer *cmd_buffer, radeon_emit(cmd_buffer->cs, ds->db_z_read_base); radeon_emit(cmd_buffer->cs, ds->db_stencil_read_base); - radeon_set_context_reg_seq(cmd_buffer->cs, R_028068_DB_Z_READ_BASE_HI, 5); + radeon_set_context_reg_seq(cmd_buffer->cs, R_028068_DB_Z_READ_BASE_HI, 6); radeon_emit(cmd_buffer->cs, ds->db_z_read_base >> 32); radeon_emit(cmd_buffer->cs, ds->db_stencil_read_base >> 32); radeon_emit(cmd_buffer->cs, ds->db_z_read_base >> 32); radeon_emit(cmd_buffer->cs, ds->db_stencil_read_base >> 32); radeon_emit(cmd_buffer->cs, ds->db_htile_data_base >> 32); + radeon_emit(cmd_buffer->cs, + S_02807C_Z_WR_POLICY(V_02807C_CACHE_STREAM_WR) | + S_02807C_S_WR_POLICY(V_02807C_CACHE_STREAM_WR) | + S_02807C_HTILE_WR_POLICY(meta_write_policy) | + S_02807C_ZPCPSD_WR_POLICY(V_02807C_CACHE_STREAM_WR) | + S_02807C_Z_RD_POLICY(V_02807C_CACHE_NOA_RD) | + S_02807C_S_RD_POLICY(V_02807C_CACHE_NOA_RD) | + S_02807C_HTILE_RD_POLICY(meta_read_policy) | + S_02807C_Z_BIG_PAGE(zs_big_page) | + S_02807C_S_BIG_PAGE(zs_big_page)); } else if (cmd_buffer->device->physical_device->rad_info.chip_class == GFX9) { radeon_set_context_reg_seq(cmd_buffer->cs, R_028014_DB_HTILE_DATA_BASE, 3); radeon_emit(cmd_buffer->cs, ds->db_htile_data_base); @@ -1514,7 +1819,8 @@ radv_emit_fb_ds_state(struct radv_cmd_buffer *cmd_buffer, } /* Update the ZRANGE_PRECISION value for the TC-compat bug. */ - radv_update_zrange_precision(cmd_buffer, ds, image, layout, true); + radv_update_zrange_precision(cmd_buffer, ds, iview, layout, + in_render_loop, true); radeon_set_context_reg(cmd_buffer->cs, R_028B78_PA_SU_POLY_OFFSET_DB_FMT_CNTL, ds->pa_su_poly_offset_db_fmt_cntl); @@ -1526,30 +1832,38 @@ radv_emit_fb_ds_state(struct radv_cmd_buffer *cmd_buffer, */ static void radv_update_bound_fast_clear_ds(struct radv_cmd_buffer *cmd_buffer, - struct radv_image *image, + const struct radv_image_view *iview, VkClearDepthStencilValue ds_clear_value, VkImageAspectFlags aspects) { - struct radv_framebuffer *framebuffer = cmd_buffer->state.framebuffer; const struct radv_subpass *subpass = cmd_buffer->state.subpass; + const struct radv_image *image = iview->image; struct radeon_cmdbuf *cs = cmd_buffer->cs; - struct radv_attachment_info *att; uint32_t att_idx; - if (!framebuffer || !subpass) + if (!cmd_buffer->state.attachments || !subpass) return; if (!subpass->depth_stencil_attachment) return; att_idx = subpass->depth_stencil_attachment->attachment; - att = &framebuffer->attachments[att_idx]; - if (att->attachment->image != image) + if (cmd_buffer->state.attachments[att_idx].iview->image != image) return; - radeon_set_context_reg_seq(cs, R_028028_DB_STENCIL_CLEAR, 2); - radeon_emit(cs, ds_clear_value.stencil); - radeon_emit(cs, fui(ds_clear_value.depth)); + if (aspects == (VK_IMAGE_ASPECT_DEPTH_BIT | + VK_IMAGE_ASPECT_STENCIL_BIT)) { + radeon_set_context_reg_seq(cs, R_028028_DB_STENCIL_CLEAR, 2); + radeon_emit(cs, ds_clear_value.stencil); + radeon_emit(cs, fui(ds_clear_value.depth)); + } else if (aspects == VK_IMAGE_ASPECT_DEPTH_BIT) { + radeon_set_context_reg_seq(cs, R_02802C_DB_DEPTH_CLEAR, 1); + radeon_emit(cs, fui(ds_clear_value.depth)); + } else { + assert(aspects == VK_IMAGE_ASPECT_STENCIL_BIT); + radeon_set_context_reg_seq(cs, R_028028_DB_STENCIL_CLEAR, 1); + radeon_emit(cs, ds_clear_value.stencil); + } /* Update the ZRANGE_PRECISION value for the TC-compat bug. This is * only needed when clearing Z to 0.0. @@ -1557,9 +1871,10 @@ radv_update_bound_fast_clear_ds(struct radv_cmd_buffer *cmd_buffer, if ((aspects & VK_IMAGE_ASPECT_DEPTH_BIT) && ds_clear_value.depth == 0.0) { VkImageLayout layout = subpass->depth_stencil_attachment->layout; + bool in_render_loop = subpass->depth_stencil_attachment->in_render_loop; - radv_update_zrange_precision(cmd_buffer, &att->ds, image, - layout, false); + radv_update_zrange_precision(cmd_buffer, &cmd_buffer->state.attachments[att_idx].ds, + iview, layout, in_render_loop, false); } cmd_buffer->state.context_roll_without_scissor_emitted = true; @@ -1571,34 +1886,51 @@ radv_update_bound_fast_clear_ds(struct radv_cmd_buffer *cmd_buffer, static void radv_set_ds_clear_metadata(struct radv_cmd_buffer *cmd_buffer, struct radv_image *image, + const VkImageSubresourceRange *range, VkClearDepthStencilValue ds_clear_value, VkImageAspectFlags aspects) { struct radeon_cmdbuf *cs = cmd_buffer->cs; - uint64_t va = radv_buffer_get_va(image->bo); - unsigned reg_offset = 0, reg_count = 0; + uint64_t va = radv_get_ds_clear_value_va(image, range->baseMipLevel); + uint32_t level_count = radv_get_levelCount(image, range); - va += image->offset + image->clear_value_offset; + if (aspects == (VK_IMAGE_ASPECT_DEPTH_BIT | + VK_IMAGE_ASPECT_STENCIL_BIT)) { + /* Use the fastest way when both aspects are used. */ + radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 2 + 2 * level_count, cmd_buffer->state.predicating)); + radeon_emit(cs, S_370_DST_SEL(V_370_MEM) | + S_370_WR_CONFIRM(1) | + S_370_ENGINE_SEL(V_370_PFP)); + radeon_emit(cs, va); + radeon_emit(cs, va >> 32); - if (aspects & VK_IMAGE_ASPECT_STENCIL_BIT) { - ++reg_count; + for (uint32_t l = 0; l < level_count; l++) { + radeon_emit(cs, ds_clear_value.stencil); + radeon_emit(cs, fui(ds_clear_value.depth)); + } } else { - ++reg_offset; - va += 4; - } - if (aspects & VK_IMAGE_ASPECT_DEPTH_BIT) - ++reg_count; + /* Otherwise we need one WRITE_DATA packet per level. */ + for (uint32_t l = 0; l < level_count; l++) { + uint64_t va = radv_get_ds_clear_value_va(image, range->baseMipLevel + l); + unsigned value; + + if (aspects == VK_IMAGE_ASPECT_DEPTH_BIT) { + value = fui(ds_clear_value.depth); + va += 4; + } else { + assert(aspects == VK_IMAGE_ASPECT_STENCIL_BIT); + value = ds_clear_value.stencil; + } - radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 2 + reg_count, cmd_buffer->state.predicating)); - radeon_emit(cs, S_370_DST_SEL(V_370_MEM) | - S_370_WR_CONFIRM(1) | - S_370_ENGINE_SEL(V_370_PFP)); - radeon_emit(cs, va); - radeon_emit(cs, va >> 32); - if (aspects & VK_IMAGE_ASPECT_STENCIL_BIT) - radeon_emit(cs, ds_clear_value.stencil); - if (aspects & VK_IMAGE_ASPECT_DEPTH_BIT) - radeon_emit(cs, fui(ds_clear_value.depth)); + radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 3, cmd_buffer->state.predicating)); + radeon_emit(cs, S_370_DST_SEL(V_370_MEM) | + S_370_WR_CONFIRM(1) | + S_370_ENGINE_SEL(V_370_PFP)); + radeon_emit(cs, va); + radeon_emit(cs, va >> 32); + radeon_emit(cs, value); + } + } } /** @@ -1607,30 +1939,40 @@ radv_set_ds_clear_metadata(struct radv_cmd_buffer *cmd_buffer, static void radv_set_tc_compat_zrange_metadata(struct radv_cmd_buffer *cmd_buffer, struct radv_image *image, + const VkImageSubresourceRange *range, uint32_t value) { struct radeon_cmdbuf *cs = cmd_buffer->cs; - uint64_t va = radv_buffer_get_va(image->bo); - if (!cmd_buffer->device->physical_device->has_tc_compat_zrange_bug) + if (!cmd_buffer->device->physical_device->rad_info.has_tc_compat_zrange_bug) return; - va += image->offset + image->tc_compat_zrange_offset; + uint64_t va = radv_get_tc_compat_zrange_va(image, range->baseMipLevel); + uint32_t level_count = radv_get_levelCount(image, range); - radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 3, cmd_buffer->state.predicating)); + radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 2 + level_count, cmd_buffer->state.predicating)); radeon_emit(cs, S_370_DST_SEL(V_370_MEM) | S_370_WR_CONFIRM(1) | S_370_ENGINE_SEL(V_370_PFP)); radeon_emit(cs, va); radeon_emit(cs, va >> 32); - radeon_emit(cs, value); + + for (uint32_t l = 0; l < level_count; l++) + radeon_emit(cs, value); } static void radv_update_tc_compat_zrange_metadata(struct radv_cmd_buffer *cmd_buffer, - struct radv_image *image, + const struct radv_image_view *iview, VkClearDepthStencilValue ds_clear_value) { + VkImageSubresourceRange range = { + .aspectMask = iview->aspect_mask, + .baseMipLevel = iview->base_mip, + .levelCount = iview->level_count, + .baseArrayLayer = iview->base_layer, + .layerCount = iview->layer_count, + }; uint32_t cond_val; /* Conditionally set DB_Z_INFO.ZRANGE_PRECISION to 0 when the last @@ -1638,7 +1980,8 @@ radv_update_tc_compat_zrange_metadata(struct radv_cmd_buffer *cmd_buffer, */ cond_val = ds_clear_value.depth == 0.0f ? UINT_MAX : 0; - radv_set_tc_compat_zrange_metadata(cmd_buffer, image, cond_val); + radv_set_tc_compat_zrange_metadata(cmd_buffer, iview->image, &range, + cond_val); } /** @@ -1646,22 +1989,32 @@ radv_update_tc_compat_zrange_metadata(struct radv_cmd_buffer *cmd_buffer, */ void radv_update_ds_clear_metadata(struct radv_cmd_buffer *cmd_buffer, - struct radv_image *image, + const struct radv_image_view *iview, VkClearDepthStencilValue ds_clear_value, VkImageAspectFlags aspects) { + VkImageSubresourceRange range = { + .aspectMask = iview->aspect_mask, + .baseMipLevel = iview->base_mip, + .levelCount = iview->level_count, + .baseArrayLayer = iview->base_layer, + .layerCount = iview->layer_count, + }; + struct radv_image *image = iview->image; + assert(radv_image_has_htile(image)); - radv_set_ds_clear_metadata(cmd_buffer, image, ds_clear_value, aspects); + radv_set_ds_clear_metadata(cmd_buffer, iview->image, &range, + ds_clear_value, aspects); if (radv_image_is_tc_compat_htile(image) && (aspects & VK_IMAGE_ASPECT_DEPTH_BIT)) { - radv_update_tc_compat_zrange_metadata(cmd_buffer, image, + radv_update_tc_compat_zrange_metadata(cmd_buffer, iview, ds_clear_value); } - radv_update_bound_fast_clear_ds(cmd_buffer, image, ds_clear_value, - aspects); + radv_update_bound_fast_clear_ds(cmd_buffer, iview, ds_clear_value, + aspects); } /** @@ -1669,15 +2022,14 @@ radv_update_ds_clear_metadata(struct radv_cmd_buffer *cmd_buffer, */ static void radv_load_ds_clear_metadata(struct radv_cmd_buffer *cmd_buffer, - struct radv_image *image) + const struct radv_image_view *iview) { struct radeon_cmdbuf *cs = cmd_buffer->cs; + const struct radv_image *image = iview->image; VkImageAspectFlags aspects = vk_format_aspects(image->vk_format); - uint64_t va = radv_buffer_get_va(image->bo); + uint64_t va = radv_get_ds_clear_value_va(image, iview->base_mip); unsigned reg_offset = 0, reg_count = 0; - va += image->offset + image->clear_value_offset; - if (!radv_image_has_htile(image)) return; @@ -1692,8 +2044,8 @@ radv_load_ds_clear_metadata(struct radv_cmd_buffer *cmd_buffer, uint32_t reg = R_028028_DB_STENCIL_CLEAR + 4 * reg_offset; - if (cmd_buffer->device->physical_device->has_load_ctx_reg_pkt) { - radeon_emit(cs, PKT3(PKT3_LOAD_CONTEXT_REG, 3, 0)); + if (cmd_buffer->device->physical_device->rad_info.has_load_ctx_reg_pkt) { + radeon_emit(cs, PKT3(PKT3_LOAD_CONTEXT_REG_INDEX, 3, 0)); radeon_emit(cs, va); radeon_emit(cs, va >> 32); radeon_emit(cs, (reg - SI_CONTEXT_REG_OFFSET) >> 2); @@ -1780,21 +2132,18 @@ radv_update_bound_fast_clear_color(struct radv_cmd_buffer *cmd_buffer, int cb_idx, uint32_t color_values[2]) { - struct radv_framebuffer *framebuffer = cmd_buffer->state.framebuffer; const struct radv_subpass *subpass = cmd_buffer->state.subpass; struct radeon_cmdbuf *cs = cmd_buffer->cs; - struct radv_attachment_info *att; uint32_t att_idx; - if (!framebuffer || !subpass) + if (!cmd_buffer->state.attachments || !subpass) return; att_idx = subpass->color_attachments[cb_idx].attachment; if (att_idx == VK_ATTACHMENT_UNUSED) return; - att = &framebuffer->attachments[att_idx]; - if (att->attachment->image != image) + if (cmd_buffer->state.attachments[att_idx].iview->image != image) return; radeon_set_context_reg_seq(cs, R_028C8C_CB_COLOR0_CLEAR_WORD0 + cb_idx * 0x3c, 2); @@ -1879,8 +2228,8 @@ radv_load_color_clear_metadata(struct radv_cmd_buffer *cmd_buffer, uint32_t reg = R_028C8C_CB_COLOR0_CLEAR_WORD0 + cb_idx * 0x3c; - if (cmd_buffer->device->physical_device->has_load_ctx_reg_pkt) { - radeon_emit(cs, PKT3(PKT3_LOAD_CONTEXT_REG, 3, cmd_buffer->state.predicating)); + if (cmd_buffer->device->physical_device->rad_info.has_load_ctx_reg_pkt) { + radeon_emit(cs, PKT3(PKT3_LOAD_CONTEXT_REG_INDEX, 3, cmd_buffer->state.predicating)); radeon_emit(cs, va); radeon_emit(cs, va >> 32); radeon_emit(cs, (reg - SI_CONTEXT_REG_OFFSET) >> 2); @@ -1906,6 +2255,7 @@ radv_emit_framebuffer_state(struct radv_cmd_buffer *cmd_buffer) int i; struct radv_framebuffer *framebuffer = cmd_buffer->state.framebuffer; const struct radv_subpass *subpass = cmd_buffer->state.subpass; + bool color_big_page = true; /* this may happen for inherited secondary recording */ if (!framebuffer) @@ -1919,39 +2269,39 @@ radv_emit_framebuffer_state(struct radv_cmd_buffer *cmd_buffer) } int idx = subpass->color_attachments[i].attachment; - struct radv_attachment_info *att = &framebuffer->attachments[idx]; - struct radv_image_view *iview = att->attachment; + struct radv_image_view *iview = cmd_buffer->state.attachments[idx].iview; VkImageLayout layout = subpass->color_attachments[i].layout; + bool in_render_loop = subpass->color_attachments[i].in_render_loop; - radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, att->attachment->bo); + radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, iview->bo); - assert(att->attachment->aspect_mask & (VK_IMAGE_ASPECT_COLOR_BIT | VK_IMAGE_ASPECT_PLANE_0_BIT | + assert(iview->aspect_mask & (VK_IMAGE_ASPECT_COLOR_BIT | VK_IMAGE_ASPECT_PLANE_0_BIT | VK_IMAGE_ASPECT_PLANE_1_BIT | VK_IMAGE_ASPECT_PLANE_2_BIT)); - radv_emit_fb_color_state(cmd_buffer, i, att, iview, layout); + radv_emit_fb_color_state(cmd_buffer, i, &cmd_buffer->state.attachments[idx].cb, iview, layout, in_render_loop); radv_load_color_clear_metadata(cmd_buffer, iview, i); + + /* BIG_PAGE is an optimization that can only be enabled if all + * color targets are compatible. + */ + color_big_page &= cmd_buffer->device->physical_device->rad_info.chip_class >= GFX10_3 && + (iview->image->alignment % (64 * 1024) == 0); } if (subpass->depth_stencil_attachment) { int idx = subpass->depth_stencil_attachment->attachment; VkImageLayout layout = subpass->depth_stencil_attachment->layout; - struct radv_attachment_info *att = &framebuffer->attachments[idx]; - struct radv_image *image = att->attachment->image; - radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, att->attachment->bo); - ASSERTED uint32_t queue_mask = radv_image_queue_family_mask(image, - cmd_buffer->queue_family_index, - cmd_buffer->queue_family_index); - /* We currently don't support writing decompressed HTILE */ - assert(radv_layout_has_htile(image, layout, queue_mask) == - radv_layout_is_htile_compressed(image, layout, queue_mask)); - - radv_emit_fb_ds_state(cmd_buffer, &att->ds, image, layout); - - if (att->ds.offset_scale != cmd_buffer->state.offset_scale) { + bool in_render_loop = subpass->depth_stencil_attachment->in_render_loop; + struct radv_image_view *iview = cmd_buffer->state.attachments[idx].iview; + radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, cmd_buffer->state.attachments[idx].iview->bo); + + radv_emit_fb_ds_state(cmd_buffer, &cmd_buffer->state.attachments[idx].ds, iview, layout, in_render_loop); + + if (cmd_buffer->state.attachments[idx].ds.offset_scale != cmd_buffer->state.offset_scale) { cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS; - cmd_buffer->state.offset_scale = att->ds.offset_scale; + cmd_buffer->state.offset_scale = cmd_buffer->state.attachments[idx].ds.offset_scale; } - radv_load_ds_clear_metadata(cmd_buffer, image); + radv_load_ds_clear_metadata(cmd_buffer, iview); } else { if (cmd_buffer->device->physical_device->rad_info.chip_class == GFX9) radeon_set_context_reg_seq(cmd_buffer->cs, R_028038_DB_Z_INFO, 2); @@ -1967,7 +2317,7 @@ radv_emit_framebuffer_state(struct radv_cmd_buffer *cmd_buffer) if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX8) { bool disable_constant_encode = - cmd_buffer->device->physical_device->has_dcc_constant_encode; + cmd_buffer->device->physical_device->rad_info.has_dcc_constant_encode; enum chip_class chip_class = cmd_buffer->device->physical_device->rad_info.chip_class; uint8_t watermark = chip_class >= GFX10 ? 6 : 4; @@ -1978,7 +2328,32 @@ radv_emit_framebuffer_state(struct radv_cmd_buffer *cmd_buffer) S_028424_DISABLE_CONSTANT_ENCODE_REG(disable_constant_encode)); } - if (cmd_buffer->device->pbb_allowed) { + if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX10) { + /* Enable CMASK/FMASK/DCC caching in L2 for small chips. */ + unsigned meta_write_policy, meta_read_policy; + /* TODO: investigate whether LRU improves performance on other chips too */ + if (cmd_buffer->device->physical_device->rad_info.num_render_backends <= 4) { + meta_write_policy = V_02807C_CACHE_LRU_WR; /* cache writes */ + meta_read_policy = V_02807C_CACHE_LRU_RD; /* cache reads */ + } else { + meta_write_policy = V_02807C_CACHE_STREAM_WR; /* write combine */ + meta_read_policy = V_02807C_CACHE_NOA_RD; /* don't cache reads */ + } + + radeon_set_context_reg(cmd_buffer->cs, R_028410_CB_RMI_GL2_CACHE_CONTROL, + S_028410_CMASK_WR_POLICY(meta_write_policy) | + S_028410_FMASK_WR_POLICY(meta_write_policy) | + S_028410_DCC_WR_POLICY(meta_write_policy) | + S_028410_COLOR_WR_POLICY(V_028410_CACHE_STREAM_WR) | + S_028410_CMASK_RD_POLICY(meta_read_policy) | + S_028410_FMASK_RD_POLICY(meta_read_policy) | + S_028410_DCC_RD_POLICY(meta_read_policy) | + S_028410_COLOR_RD_POLICY(V_028410_CACHE_NOA_RD) | + S_028410_FMASK_BIG_PAGE(color_big_page) | + S_028410_COLOR_BIG_PAGE(color_big_page)); + } + + if (cmd_buffer->device->dfsm_allowed) { radeon_emit(cmd_buffer->cs, PKT3(PKT3_EVENT_WRITE, 0, 0)); radeon_emit(cmd_buffer->cs, EVENT_TYPE(V_028A90_BREAK_BATCH) | EVENT_INDEX(0)); } @@ -1987,7 +2362,7 @@ radv_emit_framebuffer_state(struct radv_cmd_buffer *cmd_buffer) } static void -radv_emit_index_buffer(struct radv_cmd_buffer *cmd_buffer) +radv_emit_index_buffer(struct radv_cmd_buffer *cmd_buffer, bool indirect) { struct radeon_cmdbuf *cs = cmd_buffer->cs; struct radv_cmd_state *state = &cmd_buffer->state; @@ -2005,6 +2380,11 @@ radv_emit_index_buffer(struct radv_cmd_buffer *cmd_buffer) state->last_index_type = state->index_type; } + /* For the direct indexed draws we use DRAW_INDEX_2, which includes + * the index_va and max_index_count already. */ + if (!indirect) + return; + radeon_emit(cs, PKT3(PKT3_INDEX_BASE, 1, 0)); radeon_emit(cs, state->index_va); radeon_emit(cs, state->index_va >> 32); @@ -2045,8 +2425,11 @@ void radv_set_db_count_control(struct radv_cmd_buffer *cmd_buffer) bool gfx10_perfect = cmd_buffer->device->physical_device->rad_info.chip_class >= GFX10 && has_perfect_queries; if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX7) { + /* Always enable PERFECT_ZPASS_COUNTS due to issues with partially + * covered tiles, discards, and early depth testing. For more details, + * see https://gitlab.freedesktop.org/mesa/mesa/-/issues/3218 */ db_count_control = - S_028004_PERFECT_ZPASS_COUNTS(has_perfect_queries) | + S_028004_PERFECT_ZPASS_COUNTS(1) | S_028004_DISABLE_CONSERVATIVE_ZPASS_COUNTS(gfx10_perfect) | S_028004_SAMPLE_RATE(sample_rate) | S_028004_ZPASS_ENABLE(1) | @@ -2087,7 +2470,7 @@ radv_cmd_buffer_flush_dynamic_state(struct radv_cmd_buffer *cmd_buffer) radv_emit_viewport(cmd_buffer); if (states & (RADV_CMD_DIRTY_DYNAMIC_SCISSOR | RADV_CMD_DIRTY_DYNAMIC_VIEWPORT) && - !cmd_buffer->device->physical_device->has_scissor_bug) + !cmd_buffer->device->physical_device->rad_info.has_gfx9_scissor_bug) radv_emit_scissor(cmd_buffer); if (states & RADV_CMD_DIRTY_DYNAMIC_LINE_WIDTH) @@ -2113,6 +2496,27 @@ radv_cmd_buffer_flush_dynamic_state(struct radv_cmd_buffer *cmd_buffer) if (states & RADV_CMD_DIRTY_DYNAMIC_SAMPLE_LOCATIONS) radv_emit_sample_locations(cmd_buffer); + if (states & RADV_CMD_DIRTY_DYNAMIC_LINE_STIPPLE) + radv_emit_line_stipple(cmd_buffer); + + if (states & (RADV_CMD_DIRTY_DYNAMIC_CULL_MODE | + RADV_CMD_DIRTY_DYNAMIC_FRONT_FACE)) + radv_emit_culling(cmd_buffer, states); + + if (states & RADV_CMD_DIRTY_DYNAMIC_PRIMITIVE_TOPOLOGY) + radv_emit_primitive_topology(cmd_buffer); + + if (states & (RADV_CMD_DIRTY_DYNAMIC_DEPTH_TEST_ENABLE | + RADV_CMD_DIRTY_DYNAMIC_DEPTH_WRITE_ENABLE | + RADV_CMD_DIRTY_DYNAMIC_DEPTH_COMPARE_OP | + RADV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS_TEST_ENABLE | + RADV_CMD_DIRTY_DYNAMIC_STENCIL_TEST_ENABLE | + RADV_CMD_DIRTY_DYNAMIC_STENCIL_OP)) + radv_emit_depth_control(cmd_buffer, states); + + if (states & RADV_CMD_DIRTY_DYNAMIC_STENCIL_OP) + radv_emit_stencil_control(cmd_buffer); + cmd_buffer->state.dirty &= ~states; } @@ -2271,14 +2675,15 @@ radv_flush_constants(struct radv_cmd_buffer *cmd_buffer, return; radv_foreach_stage(stage, stages) { - if (!pipeline->shaders[stage]) + shader = radv_get_shader(pipeline, stage); + if (!shader) continue; - need_push_constants |= pipeline->shaders[stage]->info.info.loads_push_constants; - need_push_constants |= pipeline->shaders[stage]->info.info.loads_dynamic_offsets; + need_push_constants |= shader->info.loads_push_constants; + need_push_constants |= shader->info.loads_dynamic_offsets; - uint8_t base = pipeline->shaders[stage]->info.info.base_inline_push_consts; - uint8_t count = pipeline->shaders[stage]->info.info.num_inline_push_consts; + uint8_t base = shader->info.base_inline_push_consts; + uint8_t count = shader->info.num_inline_push_consts; radv_emit_inline_push_consts(cmd_buffer, pipeline, stage, AC_UD_INLINE_PUSH_CONSTANTS, @@ -2329,8 +2734,7 @@ radv_flush_vertex_descriptors(struct radv_cmd_buffer *cmd_buffer, if ((pipeline_is_dirty || (cmd_buffer->state.dirty & RADV_CMD_DIRTY_VERTEX_BUFFER)) && cmd_buffer->state.pipeline->num_vertex_bindings && - radv_get_shader(cmd_buffer->state.pipeline, MESA_SHADER_VERTEX)->info.info.vs.has_vertex_buffers) { - struct radv_vertex_elements_info *velems = &cmd_buffer->state.pipeline->vertex_elements; + radv_get_shader(cmd_buffer->state.pipeline, MESA_SHADER_VERTEX)->info.vs.has_vertex_buffers) { unsigned vb_offset; void *vb_ptr; uint32_t i = 0; @@ -2346,7 +2750,8 @@ radv_flush_vertex_descriptors(struct radv_cmd_buffer *cmd_buffer, uint32_t *desc = &((uint32_t *)vb_ptr)[i * 4]; uint32_t offset; struct radv_buffer *buffer = cmd_buffer->vertex_bindings[i].buffer; - uint32_t stride = cmd_buffer->state.pipeline->binding_stride[i]; + unsigned num_records; + unsigned stride; if (!buffer) continue; @@ -2355,25 +2760,46 @@ radv_flush_vertex_descriptors(struct radv_cmd_buffer *cmd_buffer, offset = cmd_buffer->vertex_bindings[i].offset; va += offset + buffer->offset; - desc[0] = va; - desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) | S_008F04_STRIDE(stride); - if (cmd_buffer->device->physical_device->rad_info.chip_class <= GFX7 && stride) - desc[2] = (buffer->size - offset - velems->format_size[i]) / stride + 1; - else - desc[2] = buffer->size - offset; - desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) | - S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) | - S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) | - S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W); + + if (cmd_buffer->vertex_bindings[i].size) { + num_records = cmd_buffer->vertex_bindings[i].size; + } else { + num_records = buffer->size - offset; + } + + if (cmd_buffer->state.pipeline->graphics.uses_dynamic_stride) { + stride = cmd_buffer->vertex_bindings[i].stride; + } else { + stride = cmd_buffer->state.pipeline->binding_stride[i]; + } + + if (cmd_buffer->device->physical_device->rad_info.chip_class != GFX8 && stride) + num_records /= stride; + + uint32_t rsrc_word3 = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) | + S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) | + S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) | + S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W); if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX10) { - desc[3] |= S_008F0C_FORMAT(V_008F0C_IMG_FORMAT_32_UINT) | - S_008F0C_OOB_SELECT(1) | - S_008F0C_RESOURCE_LEVEL(1); + /* OOB_SELECT chooses the out-of-bounds check: + * - 1: index >= NUM_RECORDS (Structured) + * - 3: offset >= NUM_RECORDS (Raw) + */ + int oob_select = stride ? V_008F0C_OOB_SELECT_STRUCTURED : V_008F0C_OOB_SELECT_RAW; + + rsrc_word3 |= S_008F0C_FORMAT(V_008F0C_IMG_FORMAT_32_UINT) | + S_008F0C_OOB_SELECT(oob_select) | + S_008F0C_RESOURCE_LEVEL(1); } else { - desc[3] |= S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_UINT) | - S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32); + rsrc_word3 |= S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_UINT) | + S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32); } + + desc[0] = va; + desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) | S_008F04_STRIDE(stride); + desc[2] = num_records; + desc[3] = rsrc_word3; } va = radv_buffer_get_va(cmd_buffer->upload.upload_bo); @@ -2455,21 +2881,32 @@ radv_flush_streamout_descriptors(struct radv_cmd_buffer *cmd_buffer) * the buffer will be considered not bound and store * instructions will be no-ops. */ - desc[0] = va; - desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32); - desc[2] = 0xffffffff; - desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) | - S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) | - S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) | - S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W); + uint32_t size = 0xffffffff; + + /* Compute the correct buffer size for NGG streamout + * because it's used to determine the max emit per + * buffer. + */ + if (cmd_buffer->device->physical_device->use_ngg_streamout) + size = buffer->size - sb[i].offset; + + uint32_t rsrc_word3 = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) | + S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) | + S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) | + S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W); if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX10) { - desc[3] |= S_008F0C_FORMAT(V_008F0C_IMG_FORMAT_32_FLOAT) | - S_008F0C_OOB_SELECT(3) | - S_008F0C_RESOURCE_LEVEL(1); + rsrc_word3 |= S_008F0C_FORMAT(V_008F0C_IMG_FORMAT_32_FLOAT) | + S_008F0C_OOB_SELECT(V_008F0C_OOB_SELECT_RAW) | + S_008F0C_RESOURCE_LEVEL(1); } else { - desc[3] |= S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32); + rsrc_word3 |= S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32); } + + desc[0] = va; + desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32); + desc[2] = size; + desc[3] = rsrc_word3; } va = radv_buffer_get_va(cmd_buffer->upload.upload_bo); @@ -2481,6 +2918,35 @@ radv_flush_streamout_descriptors(struct radv_cmd_buffer *cmd_buffer) cmd_buffer->state.dirty &= ~RADV_CMD_DIRTY_STREAMOUT_BUFFER; } +static void +radv_flush_ngg_gs_state(struct radv_cmd_buffer *cmd_buffer) +{ + struct radv_pipeline *pipeline = cmd_buffer->state.pipeline; + struct radv_userdata_info *loc; + uint32_t ngg_gs_state = 0; + uint32_t base_reg; + + if (!radv_pipeline_has_gs(pipeline) || + !radv_pipeline_has_ngg(pipeline)) + return; + + /* By default NGG GS queries are disabled but they are enabled if the + * command buffer has active GDS queries or if it's a secondary command + * buffer that inherits the number of generated primitives. + */ + if (cmd_buffer->state.active_pipeline_gds_queries || + (cmd_buffer->state.inherited_pipeline_statistics & VK_QUERY_PIPELINE_STATISTIC_GEOMETRY_SHADER_PRIMITIVES_BIT)) + ngg_gs_state = 1; + + loc = radv_lookup_user_sgpr(pipeline, MESA_SHADER_GEOMETRY, + AC_UD_NGG_GS_STATE); + base_reg = pipeline->user_data_0[MESA_SHADER_GEOMETRY]; + assert(loc->sgpr_idx != -1); + + radeon_set_sh_reg(cmd_buffer->cs, base_reg + loc->sgpr_idx * 4, + ngg_gs_state); +} + static void radv_upload_graphics_shader_descriptors(struct radv_cmd_buffer *cmd_buffer, bool pipeline_is_dirty) { @@ -2488,6 +2954,7 @@ radv_upload_graphics_shader_descriptors(struct radv_cmd_buffer *cmd_buffer, bool radv_flush_streamout_descriptors(cmd_buffer); radv_flush_descriptors(cmd_buffer, VK_SHADER_STAGE_ALL_GRAPHICS); radv_flush_constants(cmd_buffer, VK_SHADER_STAGE_ALL_GRAPHICS); + radv_flush_ngg_gs_state(cmd_buffer); } struct radv_draw_info { @@ -2564,6 +3031,7 @@ si_emit_ia_multi_vgt_param(struct radv_cmd_buffer *cmd_buffer, { struct radeon_info *info = &cmd_buffer->device->physical_device->rad_info; struct radv_cmd_state *state = &cmd_buffer->state; + unsigned topology = state->dynamic.primitive_topology; struct radeon_cmdbuf *cs = cmd_buffer->cs; unsigned ia_multi_vgt_param; @@ -2571,7 +3039,8 @@ si_emit_ia_multi_vgt_param(struct radv_cmd_buffer *cmd_buffer, si_get_ia_multi_vgt_param(cmd_buffer, instanced_draw, indirect_draw, count_from_stream_output, - draw_vertex_count); + draw_vertex_count, + topology); if (state->last_ia_multi_vgt_param != ia_multi_vgt_param) { if (info->chip_class == GFX9) { @@ -2727,6 +3196,17 @@ radv_src_access_flush(struct radv_cmd_buffer *cmd_buffer, RADV_CMD_FLAG_FLUSH_AND_INV_DB | RADV_CMD_FLAG_INV_L2; + if (flush_CB_meta) + flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB_META; + if (flush_DB_meta) + flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB_META; + break; + case VK_ACCESS_MEMORY_WRITE_BIT: + flush_bits |= RADV_CMD_FLAG_INV_L2 | + RADV_CMD_FLAG_WB_L2 | + RADV_CMD_FLAG_FLUSH_AND_INV_CB | + RADV_CMD_FLAG_FLUSH_AND_INV_DB; + if (flush_CB_meta) flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB_META; if (flush_DB_meta) @@ -2794,6 +3274,10 @@ radv_dst_access_flush(struct radv_cmd_buffer *cmd_buffer, break; case VK_ACCESS_SHADER_READ_BIT: flush_bits |= RADV_CMD_FLAG_INV_VCACHE; + /* Unlike LLVM, ACO uses SMEM for SSBOs and we have to + * invalidate the scalar cache. */ + if (!cmd_buffer->device->physical_device->use_llvm) + flush_bits |= RADV_CMD_FLAG_INV_SCACHE; if (!image_is_coherent) flush_bits |= RADV_CMD_FLAG_INV_L2; @@ -2810,6 +3294,19 @@ radv_dst_access_flush(struct radv_cmd_buffer *cmd_buffer, if (flush_DB_meta) flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB_META; break; + case VK_ACCESS_MEMORY_READ_BIT: + flush_bits |= RADV_CMD_FLAG_INV_VCACHE | + RADV_CMD_FLAG_INV_SCACHE | + RADV_CMD_FLAG_INV_L2; + if (flush_CB) + flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB; + if (flush_CB_meta) + flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB_META; + if (flush_DB) + flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB; + if (flush_DB_meta) + flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB_META; + break; default: break; } @@ -2847,7 +3344,7 @@ radv_get_attachment_sample_locations(struct radv_cmd_buffer *cmd_buffer, { struct radv_cmd_state *state = &cmd_buffer->state; uint32_t subpass_id = radv_get_subpass_id(cmd_buffer); - struct radv_image_view *view = state->framebuffer->attachments[att_idx].attachment; + struct radv_image_view *view = state->attachments[att_idx].iview; if (view->image->info.samples == 1) return NULL; @@ -2886,10 +3383,10 @@ static void radv_handle_subpass_image_transition(struct radv_cmd_buffer *cmd_buf bool begin_subpass) { unsigned idx = att.attachment; - struct radv_image_view *view = cmd_buffer->state.framebuffer->attachments[idx].attachment; + struct radv_image_view *view = cmd_buffer->state.attachments[idx].iview; struct radv_sample_locations_state *sample_locs; VkImageSubresourceRange range; - range.aspectMask = 0; + range.aspectMask = view->aspect_mask; range.baseMipLevel = view->base_mip; range.levelCount = 1; range.baseArrayLayer = view->base_layer; @@ -2912,12 +3409,49 @@ static void radv_handle_subpass_image_transition(struct radv_cmd_buffer *cmd_buf sample_locs = radv_get_attachment_sample_locations(cmd_buffer, idx, begin_subpass); - radv_handle_image_transition(cmd_buffer, - view->image, - cmd_buffer->state.attachments[idx].current_layout, - att.layout, 0, 0, &range, sample_locs); + /* Determine if the subpass uses separate depth/stencil layouts. */ + bool uses_separate_depth_stencil_layouts = false; + if ((cmd_buffer->state.attachments[idx].current_layout != + cmd_buffer->state.attachments[idx].current_stencil_layout) || + (att.layout != att.stencil_layout)) { + uses_separate_depth_stencil_layouts = true; + } + + /* For separate layouts, perform depth and stencil transitions + * separately. + */ + if (uses_separate_depth_stencil_layouts && + (range.aspectMask == (VK_IMAGE_ASPECT_DEPTH_BIT | + VK_IMAGE_ASPECT_STENCIL_BIT))) { + /* Depth-only transitions. */ + range.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT; + radv_handle_image_transition(cmd_buffer, + view->image, + cmd_buffer->state.attachments[idx].current_layout, + cmd_buffer->state.attachments[idx].current_in_render_loop, + att.layout, att.in_render_loop, + 0, 0, &range, sample_locs); + + /* Stencil-only transitions. */ + range.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT; + radv_handle_image_transition(cmd_buffer, + view->image, + cmd_buffer->state.attachments[idx].current_stencil_layout, + cmd_buffer->state.attachments[idx].current_in_render_loop, + att.stencil_layout, att.in_render_loop, + 0, 0, &range, sample_locs); + } else { + radv_handle_image_transition(cmd_buffer, + view->image, + cmd_buffer->state.attachments[idx].current_layout, + cmd_buffer->state.attachments[idx].current_in_render_loop, + att.layout, att.in_render_loop, + 0, 0, &range, sample_locs); + } cmd_buffer->state.attachments[idx].current_layout = att.layout; + cmd_buffer->state.attachments[idx].current_stencil_layout = att.stencil_layout; + cmd_buffer->state.attachments[idx].current_in_render_loop = att.in_render_loop; } @@ -2940,7 +3474,6 @@ radv_cmd_state_setup_sample_locations(struct radv_cmd_buffer *cmd_buffer, vk_find_struct_const(info->pNext, RENDER_PASS_SAMPLE_LOCATIONS_BEGIN_INFO_EXT); struct radv_cmd_state *state = &cmd_buffer->state; - struct radv_framebuffer *framebuffer = state->framebuffer; if (!sample_locs) { state->subpass_sample_locs = NULL; @@ -2951,8 +3484,7 @@ radv_cmd_state_setup_sample_locations(struct radv_cmd_buffer *cmd_buffer, const VkAttachmentSampleLocationsEXT *att_sample_locs = &sample_locs->pAttachmentInitialSampleLocations[i]; uint32_t att_idx = att_sample_locs->attachmentIndex; - struct radv_attachment_info *att = &framebuffer->attachments[att_idx]; - struct radv_image *image = att->attachment->image; + struct radv_image *image = cmd_buffer->state.attachments[att_idx].iview->image; assert(vk_format_is_depth_or_stencil(image->vk_format)); @@ -3020,6 +3552,13 @@ radv_cmd_state_setup_attachments(struct radv_cmd_buffer *cmd_buffer, const VkRenderPassBeginInfo *info) { struct radv_cmd_state *state = &cmd_buffer->state; + const struct VkRenderPassAttachmentBeginInfo *attachment_info = NULL; + + if (info) { + attachment_info = vk_find_struct_const(info->pNext, + RENDER_PASS_ATTACHMENT_BEGIN_INFO); + } + if (pass->attachment_count == 0) { state->attachments = NULL; @@ -3068,7 +3607,23 @@ radv_cmd_state_setup_attachments(struct radv_cmd_buffer *cmd_buffer, } state->attachments[i].current_layout = att->initial_layout; + state->attachments[i].current_in_render_loop = false; + state->attachments[i].current_stencil_layout = att->stencil_initial_layout; state->attachments[i].sample_location.count = 0; + + struct radv_image_view *iview; + if (attachment_info && attachment_info->attachmentCount > i) { + iview = radv_image_view_from_handle(attachment_info->pAttachments[i]); + } else { + iview = state->framebuffer->attachments[i]; + } + + state->attachments[i].iview = iview; + if (iview->aspect_mask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) { + radv_initialise_ds_surface(cmd_buffer->device, &state->attachments[i].ds, iview); + } else { + radv_initialise_color_surface(cmd_buffer->device, &state->attachments[i].cb, iview); + } } return VK_SUCCESS; @@ -3087,14 +3642,13 @@ VkResult radv_AllocateCommandBuffers( for (i = 0; i < pAllocateInfo->commandBufferCount; i++) { - if (!list_empty(&pool->free_cmd_buffers)) { + if (!list_is_empty(&pool->free_cmd_buffers)) { struct radv_cmd_buffer *cmd_buffer = list_first_entry(&pool->free_cmd_buffers, struct radv_cmd_buffer, pool_link); list_del(&cmd_buffer->pool_link); list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers); result = radv_reset_cmd_buffer(cmd_buffer); - cmd_buffer->_loader_data.loaderMagic = ICD_LOADER_MAGIC; cmd_buffer->level = pAllocateInfo->level; pCommandBuffers[i] = radv_cmd_buffer_to_handle(cmd_buffer); @@ -3140,7 +3694,7 @@ void radv_FreeCommandBuffers( list_del(&cmd_buffer->pool_link); list_addtail(&cmd_buffer->pool_link, &cmd_buffer->pool->free_cmd_buffers); } else - radv_cmd_buffer_destroy(cmd_buffer); + radv_destroy_cmd_buffer(cmd_buffer); } } @@ -3177,6 +3731,9 @@ VkResult radv_BeginCommandBuffer( cmd_buffer->state.last_vertex_offset = -1; cmd_buffer->state.last_first_instance = -1; cmd_buffer->state.predication_type = -1; + cmd_buffer->state.last_sx_ps_downconvert = -1; + cmd_buffer->state.last_sx_blend_opt_epsilon = -1; + cmd_buffer->state.last_sx_blend_opt_control = -1; cmd_buffer->usage_flags = pBeginInfo->flags; if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY && @@ -3188,33 +3745,48 @@ VkResult radv_BeginCommandBuffer( struct radv_subpass *subpass = &cmd_buffer->state.pass->subpasses[pBeginInfo->pInheritanceInfo->subpass]; - result = radv_cmd_state_setup_attachments(cmd_buffer, cmd_buffer->state.pass, NULL); - if (result != VK_SUCCESS) - return result; + if (cmd_buffer->state.framebuffer) { + result = radv_cmd_state_setup_attachments(cmd_buffer, cmd_buffer->state.pass, NULL); + if (result != VK_SUCCESS) + return result; + } + + cmd_buffer->state.inherited_pipeline_statistics = + pBeginInfo->pInheritanceInfo->pipelineStatistics; radv_cmd_buffer_set_subpass(cmd_buffer, subpass); } - if (unlikely(cmd_buffer->device->trace_bo)) { - struct radv_device *device = cmd_buffer->device; - - radv_cs_add_buffer(device->ws, cmd_buffer->cs, - device->trace_bo); - + if (unlikely(cmd_buffer->device->trace_bo)) radv_cmd_buffer_trace_emit(cmd_buffer); - } + + radv_describe_begin_cmd_buffer(cmd_buffer); cmd_buffer->status = RADV_CMD_BUFFER_STATUS_RECORDING; return result; } -void radv_CmdBindVertexBuffers( +void radv_CmdBindVertexBuffers( + VkCommandBuffer commandBuffer, + uint32_t firstBinding, + uint32_t bindingCount, + const VkBuffer* pBuffers, + const VkDeviceSize* pOffsets) +{ + radv_CmdBindVertexBuffers2EXT(commandBuffer, firstBinding, + bindingCount, pBuffers, pOffsets, + NULL, NULL); +} + +void radv_CmdBindVertexBuffers2EXT( VkCommandBuffer commandBuffer, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer* pBuffers, - const VkDeviceSize* pOffsets) + const VkDeviceSize* pOffsets, + const VkDeviceSize* pSizes, + const VkDeviceSize* pStrides) { RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); struct radv_vertex_binding *vb = cmd_buffer->vertex_bindings; @@ -3225,19 +3797,29 @@ void radv_CmdBindVertexBuffers( assert(firstBinding + bindingCount <= MAX_VBS); for (uint32_t i = 0; i < bindingCount; i++) { + RADV_FROM_HANDLE(radv_buffer, buffer, pBuffers[i]); uint32_t idx = firstBinding + i; + VkDeviceSize size = pSizes ? pSizes[i] : 0; + VkDeviceSize stride = pStrides ? pStrides[i] : 0; + /* pSizes and pStrides are optional. */ if (!changed && - (vb[idx].buffer != radv_buffer_from_handle(pBuffers[i]) || - vb[idx].offset != pOffsets[i])) { + (vb[idx].buffer != buffer || + vb[idx].offset != pOffsets[i] || + vb[idx].size != size || + vb[idx].stride != stride)) { changed = true; } - vb[idx].buffer = radv_buffer_from_handle(pBuffers[i]); + vb[idx].buffer = buffer; vb[idx].offset = pOffsets[i]; + vb[idx].size = size; + vb[idx].stride = stride; - radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, - vb[idx].buffer->bo); + if (buffer) { + radv_cs_add_buffer(cmd_buffer->device->ws, + cmd_buffer->cs, vb[idx].buffer->bo); + } } if (!changed) { @@ -3300,7 +3882,7 @@ void radv_CmdBindIndexBuffer( cmd_buffer->state.index_va = radv_buffer_get_va(index_buffer->bo); cmd_buffer->state.index_va += index_buffer->offset + offset; - int index_size = radv_get_vgt_index_size(indexType); + int index_size = radv_get_vgt_index_size(vk_to_index_type(indexType)); cmd_buffer->state.max_index_count = (index_buffer->size - offset) / index_size; cmd_buffer->state.dirty |= RADV_CMD_DIRTY_INDEX_BUFFER; radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, index_buffer->bo); @@ -3320,7 +3902,7 @@ radv_bind_descriptor_set(struct radv_cmd_buffer *cmd_buffer, assert(!(set->layout->flags & VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR)); if (!cmd_buffer->device->use_global_bo_list) { - for (unsigned j = 0; j < set->layout->buffer_count; ++j) + for (unsigned j = 0; j < set->buffer_count; ++j) if (set->descriptors[j]) radv_cs_add_buffer(ws, cmd_buffer->cs, set->descriptors[j]); } @@ -3350,7 +3932,13 @@ void radv_CmdBindDescriptorSets( for (unsigned i = 0; i < descriptorSetCount; ++i) { unsigned idx = i + firstSet; RADV_FROM_HANDLE(radv_descriptor_set, set, pDescriptorSets[i]); - radv_bind_descriptor_set(cmd_buffer, pipelineBindPoint, set, idx); + + /* If the set is already bound we only need to update the + * (potentially changed) dynamic offsets. */ + if (descriptors_state->sets[idx] != set || + !(descriptors_state->valid & (1u << idx))) { + radv_bind_descriptor_set(cmd_buffer, pipelineBindPoint, set, idx); + } for(unsigned j = 0; j < set->layout->dynamic_offset_count; ++j, ++dyn_idx) { unsigned idx = j + layout->set[i + firstSet].dynamic_offset_start; @@ -3369,7 +3957,7 @@ void radv_CmdBindDescriptorSets( if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX10) { dst[3] |= S_008F0C_FORMAT(V_008F0C_IMG_FORMAT_32_FLOAT) | - S_008F0C_OOB_SELECT(3) | + S_008F0C_OOB_SELECT(V_008F0C_OOB_SELECT_RAW) | S_008F0C_RESOURCE_LEVEL(1); } else { dst[3] |= S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) | @@ -3536,6 +4124,13 @@ VkResult radv_EndCommandBuffer( */ cmd_buffer->state.flush_bits |= cmd_buffer->active_query_flush_bits; + /* Since NGG streamout uses GDS, we need to make GDS idle when + * we leave the IB, otherwise another process might overwrite + * it while our shaders are busy. + */ + if (cmd_buffer->gds_needed) + cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_PS_PARTIAL_FLUSH; + si_emit_cache_flush(cmd_buffer); } @@ -3544,11 +4139,14 @@ VkResult radv_EndCommandBuffer( */ si_cp_dma_wait_for_idle(cmd_buffer); + radv_describe_end_cmd_buffer(cmd_buffer); + vk_free(&cmd_buffer->pool->alloc, cmd_buffer->state.attachments); vk_free(&cmd_buffer->pool->alloc, cmd_buffer->state.subpass_sample_locs); - if (!cmd_buffer->device->ws->cs_finalize(cmd_buffer->cs)) - return vk_error(cmd_buffer->device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY); + VkResult result = cmd_buffer->device->ws->cs_finalize(cmd_buffer->cs); + if (result != VK_SUCCESS) + return vk_error(cmd_buffer->device->instance, result); cmd_buffer->status = RADV_CMD_BUFFER_STATUS_EXECUTABLE; @@ -3570,15 +4168,16 @@ radv_emit_compute_pipeline(struct radv_cmd_buffer *cmd_buffer) radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, pipeline->cs.cdw); radeon_emit_array(cmd_buffer->cs, pipeline->cs.buf, pipeline->cs.cdw); - cmd_buffer->compute_scratch_size_needed = - MAX2(cmd_buffer->compute_scratch_size_needed, - pipeline->max_waves * pipeline->scratch_bytes_per_wave); + cmd_buffer->compute_scratch_size_per_wave_needed = MAX2(cmd_buffer->compute_scratch_size_per_wave_needed, + pipeline->scratch_bytes_per_wave); + cmd_buffer->compute_scratch_waves_wanted = MAX2(cmd_buffer->compute_scratch_waves_wanted, + pipeline->max_waves); radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, pipeline->shaders[MESA_SHADER_COMPUTE]->bo); if (unlikely(cmd_buffer->device->trace_bo)) - radv_save_pipeline(cmd_buffer, pipeline, RING_COMPUTE); + radv_save_pipeline(cmd_buffer, pipeline); } static void radv_mark_descriptor_sets_dirty(struct radv_cmd_buffer *cmd_buffer, @@ -3626,9 +4225,7 @@ void radv_CmdBindPipeline( /* Prefetch all pipeline shaders at first draw time. */ cmd_buffer->state.prefetch_L2_mask |= RADV_PREFETCH_SHADERS; - if ((cmd_buffer->device->physical_device->rad_info.family == CHIP_NAVI10 || - cmd_buffer->device->physical_device->rad_info.family == CHIP_NAVI12 || - cmd_buffer->device->physical_device->rad_info.family == CHIP_NAVI14) && + if (cmd_buffer->device->physical_device->rad_info.chip_class == GFX10 && cmd_buffer->state.emitted_pipeline && radv_pipeline_has_ngg(cmd_buffer->state.emitted_pipeline) && !radv_pipeline_has_ngg(cmd_buffer->state.pipeline)) { @@ -3670,11 +4267,15 @@ void radv_CmdSetViewport( assert(firstViewport < MAX_VIEWPORTS); assert(total_count >= 1 && total_count <= MAX_VIEWPORTS); - if (!memcmp(state->dynamic.viewport.viewports + firstViewport, + if (total_count <= state->dynamic.viewport.count && + !memcmp(state->dynamic.viewport.viewports + firstViewport, pViewports, viewportCount * sizeof(*pViewports))) { return; } + if (state->dynamic.viewport.count < total_count) + state->dynamic.viewport.count = total_count; + memcpy(state->dynamic.viewport.viewports + firstViewport, pViewports, viewportCount * sizeof(*pViewports)); @@ -3694,11 +4295,15 @@ void radv_CmdSetScissor( assert(firstScissor < MAX_SCISSORS); assert(total_count >= 1 && total_count <= MAX_SCISSORS); - if (!memcmp(state->dynamic.scissor.scissors + firstScissor, pScissors, + if (total_count <= state->dynamic.scissor.count && + !memcmp(state->dynamic.scissor.scissors + firstScissor, pScissors, scissorCount * sizeof(*pScissors))) { return; } + if (state->dynamic.scissor.count < total_count) + state->dynamic.scissor.count = total_count; + memcpy(state->dynamic.scissor.scissors + firstScissor, pScissors, scissorCount * sizeof(*pScissors)); @@ -3886,6 +4491,200 @@ void radv_CmdSetSampleLocationsEXT( state->dirty |= RADV_CMD_DIRTY_DYNAMIC_SAMPLE_LOCATIONS; } +void radv_CmdSetLineStippleEXT( + VkCommandBuffer commandBuffer, + uint32_t lineStippleFactor, + uint16_t lineStipplePattern) +{ + RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); + struct radv_cmd_state *state = &cmd_buffer->state; + + state->dynamic.line_stipple.factor = lineStippleFactor; + state->dynamic.line_stipple.pattern = lineStipplePattern; + + state->dirty |= RADV_CMD_DIRTY_DYNAMIC_LINE_STIPPLE; +} + +void radv_CmdSetCullModeEXT( + VkCommandBuffer commandBuffer, + VkCullModeFlags cullMode) +{ + RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); + struct radv_cmd_state *state = &cmd_buffer->state; + + if (state->dynamic.cull_mode == cullMode) + return; + + state->dynamic.cull_mode = cullMode; + + state->dirty |= RADV_CMD_DIRTY_DYNAMIC_CULL_MODE; +} + +void radv_CmdSetFrontFaceEXT( + VkCommandBuffer commandBuffer, + VkFrontFace frontFace) +{ + RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); + struct radv_cmd_state *state = &cmd_buffer->state; + + if (state->dynamic.front_face == frontFace) + return; + + state->dynamic.front_face = frontFace; + + state->dirty |= RADV_CMD_DIRTY_DYNAMIC_FRONT_FACE; +} + +void radv_CmdSetPrimitiveTopologyEXT( + VkCommandBuffer commandBuffer, + VkPrimitiveTopology primitiveTopology) +{ + RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); + struct radv_cmd_state *state = &cmd_buffer->state; + unsigned primitive_topology = si_translate_prim(primitiveTopology); + + if (state->dynamic.primitive_topology == primitive_topology) + return; + + state->dynamic.primitive_topology = primitive_topology; + + state->dirty |= RADV_CMD_DIRTY_DYNAMIC_PRIMITIVE_TOPOLOGY; +} + +void radv_CmdSetViewportWithCountEXT( + VkCommandBuffer commandBuffer, + uint32_t viewportCount, + const VkViewport* pViewports) +{ + radv_CmdSetViewport(commandBuffer, 0, viewportCount, pViewports); +} + +void radv_CmdSetScissorWithCountEXT( + VkCommandBuffer commandBuffer, + uint32_t scissorCount, + const VkRect2D* pScissors) +{ + radv_CmdSetScissor(commandBuffer, 0, scissorCount, pScissors); +} + +void radv_CmdSetDepthTestEnableEXT( + VkCommandBuffer commandBuffer, + VkBool32 depthTestEnable) + +{ + RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); + struct radv_cmd_state *state = &cmd_buffer->state; + + if (state->dynamic.depth_test_enable == depthTestEnable) + return; + + state->dynamic.depth_test_enable = depthTestEnable; + + state->dirty |= RADV_CMD_DIRTY_DYNAMIC_DEPTH_TEST_ENABLE; +} + +void radv_CmdSetDepthWriteEnableEXT( + VkCommandBuffer commandBuffer, + VkBool32 depthWriteEnable) +{ + RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); + struct radv_cmd_state *state = &cmd_buffer->state; + + if (state->dynamic.depth_write_enable == depthWriteEnable) + return; + + state->dynamic.depth_write_enable = depthWriteEnable; + + state->dirty |= RADV_CMD_DIRTY_DYNAMIC_DEPTH_WRITE_ENABLE; +} + +void radv_CmdSetDepthCompareOpEXT( + VkCommandBuffer commandBuffer, + VkCompareOp depthCompareOp) +{ + RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); + struct radv_cmd_state *state = &cmd_buffer->state; + + if (state->dynamic.depth_compare_op == depthCompareOp) + return; + + state->dynamic.depth_compare_op = depthCompareOp; + + state->dirty |= RADV_CMD_DIRTY_DYNAMIC_DEPTH_COMPARE_OP; +} + +void radv_CmdSetDepthBoundsTestEnableEXT( + VkCommandBuffer commandBuffer, + VkBool32 depthBoundsTestEnable) +{ + RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); + struct radv_cmd_state *state = &cmd_buffer->state; + + if (state->dynamic.depth_bounds_test_enable == depthBoundsTestEnable) + return; + + state->dynamic.depth_bounds_test_enable = depthBoundsTestEnable; + + state->dirty |= RADV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS_TEST_ENABLE; +} + +void radv_CmdSetStencilTestEnableEXT( + VkCommandBuffer commandBuffer, + VkBool32 stencilTestEnable) +{ + RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); + struct radv_cmd_state *state = &cmd_buffer->state; + + if (state->dynamic.stencil_test_enable == stencilTestEnable) + return; + + state->dynamic.stencil_test_enable = stencilTestEnable; + + state->dirty |= RADV_CMD_DIRTY_DYNAMIC_STENCIL_TEST_ENABLE; +} + +void radv_CmdSetStencilOpEXT( + VkCommandBuffer commandBuffer, + VkStencilFaceFlags faceMask, + VkStencilOp failOp, + VkStencilOp passOp, + VkStencilOp depthFailOp, + VkCompareOp compareOp) +{ + RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); + struct radv_cmd_state *state = &cmd_buffer->state; + bool front_same = + state->dynamic.stencil_op.front.fail_op == failOp && + state->dynamic.stencil_op.front.pass_op == passOp && + state->dynamic.stencil_op.front.depth_fail_op == depthFailOp && + state->dynamic.stencil_op.front.compare_op == compareOp; + bool back_same = + state->dynamic.stencil_op.back.fail_op == failOp && + state->dynamic.stencil_op.back.pass_op == passOp && + state->dynamic.stencil_op.back.depth_fail_op == depthFailOp && + state->dynamic.stencil_op.back.compare_op == compareOp; + + if ((!(faceMask & VK_STENCIL_FACE_FRONT_BIT) || front_same) && + (!(faceMask & VK_STENCIL_FACE_BACK_BIT) || back_same)) + return; + + if (faceMask & VK_STENCIL_FACE_FRONT_BIT) { + state->dynamic.stencil_op.front.fail_op = failOp; + state->dynamic.stencil_op.front.pass_op = passOp; + state->dynamic.stencil_op.front.depth_fail_op = depthFailOp; + state->dynamic.stencil_op.front.compare_op = compareOp; + } + + if (faceMask & VK_STENCIL_FACE_BACK_BIT) { + state->dynamic.stencil_op.back.fail_op = failOp; + state->dynamic.stencil_op.back.pass_op = passOp; + state->dynamic.stencil_op.back.depth_fail_op = depthFailOp; + state->dynamic.stencil_op.back.compare_op = compareOp; + } + + state->dirty |= RADV_CMD_DIRTY_DYNAMIC_STENCIL_OP; +} + void radv_CmdExecuteCommands( VkCommandBuffer commandBuffer, uint32_t commandBufferCount, @@ -3901,10 +4700,14 @@ void radv_CmdExecuteCommands( for (uint32_t i = 0; i < commandBufferCount; i++) { RADV_FROM_HANDLE(radv_cmd_buffer, secondary, pCmdBuffers[i]); - primary->scratch_size_needed = MAX2(primary->scratch_size_needed, - secondary->scratch_size_needed); - primary->compute_scratch_size_needed = MAX2(primary->compute_scratch_size_needed, - secondary->compute_scratch_size_needed); + primary->scratch_size_per_wave_needed = MAX2(primary->scratch_size_per_wave_needed, + secondary->scratch_size_per_wave_needed); + primary->scratch_waves_wanted = MAX2(primary->scratch_waves_wanted, + secondary->scratch_waves_wanted); + primary->compute_scratch_size_per_wave_needed = MAX2(primary->compute_scratch_size_per_wave_needed, + secondary->compute_scratch_size_per_wave_needed); + primary->compute_scratch_waves_wanted = MAX2(primary->compute_scratch_waves_wanted, + secondary->compute_scratch_waves_wanted); if (secondary->esgs_ring_size_needed > primary->esgs_ring_size_needed) primary->esgs_ring_size_needed = secondary->esgs_ring_size_needed; @@ -3914,6 +4717,8 @@ void radv_CmdExecuteCommands( primary->tess_rings_needed = true; if (secondary->sample_positions_needed) primary->sample_positions_needed = true; + if (secondary->gds_needed) + primary->gds_needed = true; if (!secondary->state.framebuffer && (primary->state.dirty & RADV_CMD_DIRTY_FRAMEBUFFER)) { @@ -3962,6 +4767,9 @@ void radv_CmdExecuteCommands( primary->state.last_first_instance = secondary->state.last_first_instance; primary->state.last_num_instances = secondary->state.last_num_instances; primary->state.last_vertex_offset = secondary->state.last_vertex_offset; + primary->state.last_sx_ps_downconvert = secondary->state.last_sx_ps_downconvert; + primary->state.last_sx_blend_opt_epsilon = secondary->state.last_sx_blend_opt_epsilon; + primary->state.last_sx_blend_opt_control = secondary->state.last_sx_blend_opt_control; if (secondary->state.last_index_type != -1) { primary->state.last_index_type = @@ -3988,15 +4796,18 @@ VkResult radv_CreateCommandPool( RADV_FROM_HANDLE(radv_device, device, _device); struct radv_cmd_pool *pool; - pool = vk_alloc2(&device->alloc, pAllocator, sizeof(*pool), 8, + pool = vk_alloc2(&device->vk.alloc, pAllocator, sizeof(*pool), 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); if (pool == NULL) return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY); + vk_object_base_init(&device->vk, &pool->base, + VK_OBJECT_TYPE_COMMAND_POOL); + if (pAllocator) pool->alloc = *pAllocator; else - pool->alloc = device->alloc; + pool->alloc = device->vk.alloc; list_inithead(&pool->cmd_buffers); list_inithead(&pool->free_cmd_buffers); @@ -4022,15 +4833,16 @@ void radv_DestroyCommandPool( list_for_each_entry_safe(struct radv_cmd_buffer, cmd_buffer, &pool->cmd_buffers, pool_link) { - radv_cmd_buffer_destroy(cmd_buffer); + radv_destroy_cmd_buffer(cmd_buffer); } list_for_each_entry_safe(struct radv_cmd_buffer, cmd_buffer, &pool->free_cmd_buffers, pool_link) { - radv_cmd_buffer_destroy(cmd_buffer); + radv_destroy_cmd_buffer(cmd_buffer); } - vk_free2(&device->alloc, pAllocator, pool); + vk_object_base_finish(&pool->base); + vk_free2(&device->vk.alloc, pAllocator, pool); } VkResult radv_ResetCommandPool( @@ -4063,7 +4875,7 @@ void radv_TrimCommandPool( list_for_each_entry_safe(struct radv_cmd_buffer, cmd_buffer, &pool->free_cmd_buffers, pool_link) { - radv_cmd_buffer_destroy(cmd_buffer); + radv_destroy_cmd_buffer(cmd_buffer); } } @@ -4081,6 +4893,8 @@ radv_cmd_buffer_begin_subpass(struct radv_cmd_buffer *cmd_buffer, radv_cmd_buffer_set_subpass(cmd_buffer, subpass); + radv_describe_barrier_start(cmd_buffer, RGP_BARRIER_EXTERNAL_RENDER_PASS_SYNC); + for (uint32_t i = 0; i < subpass->attachment_count; ++i) { const uint32_t a = subpass->attachments[i].attachment; if (a == VK_ATTACHMENT_UNUSED) @@ -4091,6 +4905,8 @@ radv_cmd_buffer_begin_subpass(struct radv_cmd_buffer *cmd_buffer, true); } + radv_describe_barrier_end(cmd_buffer); + radv_cmd_buffer_clear_subpass(cmd_buffer); assert(cmd_buffer->cs->cdw <= cdw_max); @@ -4105,6 +4921,8 @@ radv_cmd_buffer_end_subpass(struct radv_cmd_buffer *cmd_buffer) radv_cmd_buffer_resolve_subpass(cmd_buffer); + radv_describe_barrier_start(cmd_buffer, RGP_BARRIER_EXTERNAL_RENDER_PASS_SYNC); + for (uint32_t i = 0; i < subpass->attachment_count; ++i) { const uint32_t a = subpass->attachments[i].attachment; if (a == VK_ATTACHMENT_UNUSED) @@ -4114,17 +4932,18 @@ radv_cmd_buffer_end_subpass(struct radv_cmd_buffer *cmd_buffer) continue; VkImageLayout layout = state->pass->attachments[a].final_layout; - struct radv_subpass_attachment att = { a, layout }; + VkImageLayout stencil_layout = state->pass->attachments[a].stencil_final_layout; + struct radv_subpass_attachment att = { a, layout, stencil_layout }; radv_handle_subpass_image_transition(cmd_buffer, att, false); } + + radv_describe_barrier_end(cmd_buffer); } -void radv_CmdBeginRenderPass( - VkCommandBuffer commandBuffer, - const VkRenderPassBeginInfo* pRenderPassBegin, - VkSubpassContents contents) +void +radv_cmd_buffer_begin_render_pass(struct radv_cmd_buffer *cmd_buffer, + const VkRenderPassBeginInfo *pRenderPassBegin) { - RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); RADV_FROM_HANDLE(radv_render_pass, pass, pRenderPassBegin->renderPass); RADV_FROM_HANDLE(radv_framebuffer, framebuffer, pRenderPassBegin->framebuffer); VkResult result; @@ -4140,14 +4959,24 @@ void radv_CmdBeginRenderPass( result = radv_cmd_state_setup_sample_locations(cmd_buffer, pass, pRenderPassBegin); if (result != VK_SUCCESS) return; +} + +void radv_CmdBeginRenderPass( + VkCommandBuffer commandBuffer, + const VkRenderPassBeginInfo* pRenderPassBegin, + VkSubpassContents contents) +{ + RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); + + radv_cmd_buffer_begin_render_pass(cmd_buffer, pRenderPassBegin); radv_cmd_buffer_begin_subpass(cmd_buffer, 0); } -void radv_CmdBeginRenderPass2KHR( +void radv_CmdBeginRenderPass2( VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo* pRenderPassBeginInfo, - const VkSubpassBeginInfoKHR* pSubpassBeginInfo) + const VkSubpassBeginInfo* pSubpassBeginInfo) { radv_CmdBeginRenderPass(commandBuffer, pRenderPassBeginInfo, pSubpassBeginInfo->contents); @@ -4164,10 +4993,10 @@ void radv_CmdNextSubpass( radv_cmd_buffer_begin_subpass(cmd_buffer, prev_subpass + 1); } -void radv_CmdNextSubpass2KHR( +void radv_CmdNextSubpass2( VkCommandBuffer commandBuffer, - const VkSubpassBeginInfoKHR* pSubpassBeginInfo, - const VkSubpassEndInfoKHR* pSubpassEndInfo) + const VkSubpassBeginInfo* pSubpassBeginInfo, + const VkSubpassEndInfo* pSubpassEndInfo) { radv_CmdNextSubpass(commandBuffer, pSubpassBeginInfo->contents); } @@ -4229,7 +5058,7 @@ radv_cs_emit_indirect_draw_packet(struct radv_cmd_buffer *cmd_buffer, struct radeon_cmdbuf *cs = cmd_buffer->cs; unsigned di_src_sel = indexed ? V_0287F0_DI_SRC_SEL_DMA : V_0287F0_DI_SRC_SEL_AUTO_INDEX; - bool draw_id_enable = radv_get_shader(cmd_buffer->state.pipeline, MESA_SHADER_VERTEX)->info.info.vs.needs_draw_id; + bool draw_id_enable = radv_get_shader(cmd_buffer->state.pipeline, MESA_SHADER_VERTEX)->info.vs.needs_draw_id; uint32_t base_reg = cmd_buffer->state.pipeline->graphics.vtx_base_sgpr; bool predicating = cmd_buffer->state.predicating; assert(base_reg); @@ -4400,7 +5229,7 @@ static bool radv_need_late_scissor_emission(struct radv_cmd_buffer *cmd_buffer, { struct radv_cmd_state *state = &cmd_buffer->state; - if (!cmd_buffer->device->physical_device->has_scissor_bug) + if (!cmd_buffer->device->physical_device->rad_info.has_gfx9_scissor_bug) return false; if (cmd_buffer->state.context_roll_without_scissor_emitted || info->strmout_buffer) @@ -4453,7 +5282,7 @@ radv_emit_all_graphics_states(struct radv_cmd_buffer *cmd_buffer, if (info->indexed) { if (cmd_buffer->state.dirty & RADV_CMD_DIRTY_INDEX_BUFFER) - radv_emit_index_buffer(cmd_buffer); + radv_emit_index_buffer(cmd_buffer, info->indirect); } else { /* On GFX7 and later, non-indexed draws overwrite VGT_INDEX_TYPE, * so the state must be re-emitted before the next indexed @@ -4502,6 +5331,8 @@ radv_draw(struct radv_cmd_buffer *cmd_buffer, return; } + radv_describe_draw(cmd_buffer); + /* Use optimal packet order based on whether we need to sync the * pipeline. */ @@ -4653,7 +5484,7 @@ void radv_CmdDrawIndexedIndirect( radv_draw(cmd_buffer, &info); } -void radv_CmdDrawIndirectCountKHR( +void radv_CmdDrawIndirectCount( VkCommandBuffer commandBuffer, VkBuffer _buffer, VkDeviceSize offset, @@ -4677,7 +5508,7 @@ void radv_CmdDrawIndirectCountKHR( radv_draw(cmd_buffer, &info); } -void radv_CmdDrawIndexedIndirectCountKHR( +void radv_CmdDrawIndexedIndirectCount( VkCommandBuffer commandBuffer, VkBuffer _buffer, VkDeviceSize offset, @@ -4742,6 +5573,11 @@ radv_emit_dispatch_packets(struct radv_cmd_buffer *cmd_buffer, ASSERTED unsigned cdw_max = radeon_check_space(ws, cs, 25); + if (compute_shader->info.wave_size == 32) { + assert(cmd_buffer->device->physical_device->rad_info.chip_class >= GFX10); + dispatch_initiator |= S_00B800_CS_W32_EN(1); + } + if (info->indirect) { uint64_t va = radv_buffer_get_va(info->indirect->bo); @@ -4872,6 +5708,8 @@ radv_dispatch(struct radv_cmd_buffer *cmd_buffer, bool pipeline_is_dirty = pipeline && pipeline != cmd_buffer->state.emitted_compute_pipeline; + radv_describe_dispatch(cmd_buffer, 8, 8, 8); + if (cmd_buffer->state.flush_bits & (RADV_CMD_FLAG_FLUSH_AND_INV_CB | RADV_CMD_FLAG_FLUSH_AND_INV_DB | RADV_CMD_FLAG_PS_PARTIAL_FLUSH | @@ -4982,15 +5820,9 @@ void radv_unaligned_dispatch( radv_dispatch(cmd_buffer, &info); } -void radv_CmdEndRenderPass( - VkCommandBuffer commandBuffer) +void +radv_cmd_buffer_end_render_pass(struct radv_cmd_buffer *cmd_buffer) { - RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); - - radv_subpass_barrier(cmd_buffer, &cmd_buffer->state.pass->end_barrier); - - radv_cmd_buffer_end_subpass(cmd_buffer); - vk_free(&cmd_buffer->pool->alloc, cmd_buffer->state.attachments); vk_free(&cmd_buffer->pool->alloc, cmd_buffer->state.subpass_sample_locs); @@ -5001,9 +5833,21 @@ void radv_CmdEndRenderPass( cmd_buffer->state.subpass_sample_locs = NULL; } -void radv_CmdEndRenderPass2KHR( +void radv_CmdEndRenderPass( + VkCommandBuffer commandBuffer) +{ + RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); + + radv_subpass_barrier(cmd_buffer, &cmd_buffer->state.pass->end_barrier); + + radv_cmd_buffer_end_subpass(cmd_buffer); + + radv_cmd_buffer_end_render_pass(cmd_buffer); +} + +void radv_CmdEndRenderPass2( VkCommandBuffer commandBuffer, - const VkSubpassEndInfoKHR* pSubpassEndInfo) + const VkSubpassEndInfo* pSubpassEndInfo) { radv_CmdEndRenderPass(commandBuffer); } @@ -5017,26 +5861,30 @@ void radv_CmdEndRenderPass2KHR( */ static void radv_initialize_htile(struct radv_cmd_buffer *cmd_buffer, struct radv_image *image, - const VkImageSubresourceRange *range, - uint32_t clear_word) + const VkImageSubresourceRange *range) { assert(range->baseMipLevel == 0); assert(range->levelCount == 1 || range->levelCount == VK_REMAINING_ARRAY_LAYERS); VkImageAspectFlags aspects = VK_IMAGE_ASPECT_DEPTH_BIT; struct radv_cmd_state *state = &cmd_buffer->state; + uint32_t htile_value = vk_format_is_stencil(image->vk_format) ? 0xfffff30f : 0xfffc000f; VkClearDepthStencilValue value = {}; + struct radv_barrier_data barrier = {}; state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB | RADV_CMD_FLAG_FLUSH_AND_INV_DB_META; - state->flush_bits |= radv_clear_htile(cmd_buffer, image, range, clear_word); + barrier.layout_transitions.init_mask_ram = 1; + radv_describe_layout_transition(cmd_buffer, &barrier); + + state->flush_bits |= radv_clear_htile(cmd_buffer, image, range, htile_value); state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB_META; if (vk_format_is_stencil(image->vk_format)) aspects |= VK_IMAGE_ASPECT_STENCIL_BIT; - radv_set_ds_clear_metadata(cmd_buffer, image, value, aspects); + radv_set_ds_clear_metadata(cmd_buffer, image, range, value, aspects); if (radv_image_is_tc_compat_htile(image)) { /* Initialize the TC-compat metada value to 0 because by @@ -5044,14 +5892,16 @@ static void radv_initialize_htile(struct radv_cmd_buffer *cmd_buffer, * need have to conditionally update its value when performing * a fast depth clear. */ - radv_set_tc_compat_zrange_metadata(cmd_buffer, image, 0); + radv_set_tc_compat_zrange_metadata(cmd_buffer, image, range, 0); } } static void radv_handle_depth_image_transition(struct radv_cmd_buffer *cmd_buffer, struct radv_image *image, VkImageLayout src_layout, + bool src_render_loop, VkImageLayout dst_layout, + bool dst_render_loop, unsigned src_queue_mask, unsigned dst_queue_mask, const VkImageSubresourceRange *range, @@ -5061,30 +5911,17 @@ static void radv_handle_depth_image_transition(struct radv_cmd_buffer *cmd_buffe return; if (src_layout == VK_IMAGE_LAYOUT_UNDEFINED) { - uint32_t clear_value = vk_format_is_stencil(image->vk_format) ? 0xfffff30f : 0xfffc000f; - - if (radv_layout_is_htile_compressed(image, dst_layout, - dst_queue_mask)) { - clear_value = 0; - } - - radv_initialize_htile(cmd_buffer, image, range, clear_value); - } else if (!radv_layout_is_htile_compressed(image, src_layout, src_queue_mask) && - radv_layout_is_htile_compressed(image, dst_layout, dst_queue_mask)) { - uint32_t clear_value = vk_format_is_stencil(image->vk_format) ? 0xfffff30f : 0xfffc000f; - radv_initialize_htile(cmd_buffer, image, range, clear_value); - } else if (radv_layout_is_htile_compressed(image, src_layout, src_queue_mask) && - !radv_layout_is_htile_compressed(image, dst_layout, dst_queue_mask)) { - VkImageSubresourceRange local_range = *range; - local_range.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT; - local_range.baseMipLevel = 0; - local_range.levelCount = 1; - + radv_initialize_htile(cmd_buffer, image, range); + } else if (!radv_layout_is_htile_compressed(image, src_layout, src_render_loop, src_queue_mask) && + radv_layout_is_htile_compressed(image, dst_layout, dst_render_loop, dst_queue_mask)) { + radv_initialize_htile(cmd_buffer, image, range); + } else if (radv_layout_is_htile_compressed(image, src_layout, src_render_loop, src_queue_mask) && + !radv_layout_is_htile_compressed(image, dst_layout, dst_render_loop, dst_queue_mask)) { cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB | RADV_CMD_FLAG_FLUSH_AND_INV_DB_META; - radv_decompress_depth_image_inplace(cmd_buffer, image, - &local_range, sample_locs); + radv_decompress_depth_stencil(cmd_buffer, image, range, + sample_locs); cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB | RADV_CMD_FLAG_FLUSH_AND_INV_DB_META; @@ -5097,10 +5934,14 @@ static void radv_initialise_cmask(struct radv_cmd_buffer *cmd_buffer, uint32_t value) { struct radv_cmd_state *state = &cmd_buffer->state; + struct radv_barrier_data barrier = {}; state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB | RADV_CMD_FLAG_FLUSH_AND_INV_CB_META; + barrier.layout_transitions.init_mask_ram = 1; + radv_describe_layout_transition(cmd_buffer, &barrier); + state->flush_bits |= radv_clear_cmask(cmd_buffer, image, range, value); state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB_META; @@ -5119,10 +5960,14 @@ void radv_initialize_fmask(struct radv_cmd_buffer *cmd_buffer, }; uint32_t log2_samples = util_logbase2(image->info.samples); uint32_t value = fmask_clear_values[log2_samples]; + struct radv_barrier_data barrier = {}; state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB | RADV_CMD_FLAG_FLUSH_AND_INV_CB_META; + barrier.layout_transitions.init_mask_ram = 1; + radv_describe_layout_transition(cmd_buffer, &barrier); + state->flush_bits |= radv_clear_fmask(cmd_buffer, image, range, value); state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB_META; @@ -5133,11 +5978,15 @@ void radv_initialize_dcc(struct radv_cmd_buffer *cmd_buffer, const VkImageSubresourceRange *range, uint32_t value) { struct radv_cmd_state *state = &cmd_buffer->state; + struct radv_barrier_data barrier = {}; unsigned size = 0; state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB | RADV_CMD_FLAG_FLUSH_AND_INV_CB_META; + barrier.layout_transitions.init_mask_ram = 1; + radv_describe_layout_transition(cmd_buffer, &barrier); + state->flush_bits |= radv_clear_dcc(cmd_buffer, image, range, value); if (cmd_buffer->device->physical_device->rad_info.chip_class == GFX8) { @@ -5162,7 +6011,7 @@ void radv_initialize_dcc(struct radv_cmd_buffer *cmd_buffer, if (size != image->planes[0].surface.dcc_size) { state->flush_bits |= radv_fill_buffer(cmd_buffer, image->bo, - image->offset + image->dcc_offset + size, + image->offset + image->planes[0].surface.dcc_offset + size, image->planes[0].surface.dcc_size - size, 0xffffffff); } @@ -5178,7 +6027,9 @@ void radv_initialize_dcc(struct radv_cmd_buffer *cmd_buffer, static void radv_init_color_image_metadata(struct radv_cmd_buffer *cmd_buffer, struct radv_image *image, VkImageLayout src_layout, + bool src_render_loop, VkImageLayout dst_layout, + bool dst_render_loop, unsigned src_queue_mask, unsigned dst_queue_mask, const VkImageSubresourceRange *range) @@ -5202,7 +6053,8 @@ static void radv_init_color_image_metadata(struct radv_cmd_buffer *cmd_buffer, uint32_t value = 0xffffffffu; /* Fully expanded mode. */ bool need_decompress_pass = false; - if (radv_layout_dcc_compressed(image, dst_layout, + if (radv_layout_dcc_compressed(cmd_buffer->device, image, dst_layout, + dst_render_loop, dst_queue_mask)) { value = 0x20202020u; need_decompress_pass = true; @@ -5228,14 +6080,17 @@ static void radv_init_color_image_metadata(struct radv_cmd_buffer *cmd_buffer, static void radv_handle_color_image_transition(struct radv_cmd_buffer *cmd_buffer, struct radv_image *image, VkImageLayout src_layout, + bool src_render_loop, VkImageLayout dst_layout, + bool dst_render_loop, unsigned src_queue_mask, unsigned dst_queue_mask, const VkImageSubresourceRange *range) { if (src_layout == VK_IMAGE_LAYOUT_UNDEFINED) { radv_init_color_image_metadata(cmd_buffer, image, - src_layout, dst_layout, + src_layout, src_render_loop, + dst_layout, dst_render_loop, src_queue_mask, dst_queue_mask, range); return; @@ -5244,18 +6099,18 @@ static void radv_handle_color_image_transition(struct radv_cmd_buffer *cmd_buffe if (radv_dcc_enabled(image, range->baseMipLevel)) { if (src_layout == VK_IMAGE_LAYOUT_PREINITIALIZED) { radv_initialize_dcc(cmd_buffer, image, range, 0xffffffffu); - } else if (radv_layout_dcc_compressed(image, src_layout, src_queue_mask) && - !radv_layout_dcc_compressed(image, dst_layout, dst_queue_mask)) { + } else if (radv_layout_dcc_compressed(cmd_buffer->device, image, src_layout, src_render_loop, src_queue_mask) && + !radv_layout_dcc_compressed(cmd_buffer->device, image, dst_layout, dst_render_loop, dst_queue_mask)) { radv_decompress_dcc(cmd_buffer, image, range); - } else if (radv_layout_can_fast_clear(image, src_layout, src_queue_mask) && - !radv_layout_can_fast_clear(image, dst_layout, dst_queue_mask)) { + } else if (radv_layout_can_fast_clear(image, src_layout, src_render_loop, src_queue_mask) && + !radv_layout_can_fast_clear(image, dst_layout, dst_render_loop, dst_queue_mask)) { radv_fast_clear_flush_image_inplace(cmd_buffer, image, range); } } else if (radv_image_has_cmask(image) || radv_image_has_fmask(image)) { bool fce_eliminate = false, fmask_expand = false; - if (radv_layout_can_fast_clear(image, src_layout, src_queue_mask) && - !radv_layout_can_fast_clear(image, dst_layout, dst_queue_mask)) { + if (radv_layout_can_fast_clear(image, src_layout, src_render_loop, src_queue_mask) && + !radv_layout_can_fast_clear(image, dst_layout, dst_render_loop, dst_queue_mask)) { fce_eliminate = true; } @@ -5272,15 +6127,22 @@ static void radv_handle_color_image_transition(struct radv_cmd_buffer *cmd_buffe if (fce_eliminate || fmask_expand) radv_fast_clear_flush_image_inplace(cmd_buffer, image, range); - if (fmask_expand) + if (fmask_expand) { + struct radv_barrier_data barrier = {}; + barrier.layout_transitions.fmask_color_expand = 1; + radv_describe_layout_transition(cmd_buffer, &barrier); + radv_expand_fmask_image_inplace(cmd_buffer, image, range); + } } } static void radv_handle_image_transition(struct radv_cmd_buffer *cmd_buffer, struct radv_image *image, VkImageLayout src_layout, + bool src_render_loop, VkImageLayout dst_layout, + bool dst_render_loop, uint32_t src_family, uint32_t dst_family, const VkImageSubresourceRange *range, @@ -5319,18 +6181,21 @@ static void radv_handle_image_transition(struct radv_cmd_buffer *cmd_buffer, if (vk_format_is_depth(image->vk_format)) { radv_handle_depth_image_transition(cmd_buffer, image, - src_layout, dst_layout, + src_layout, src_render_loop, + dst_layout, dst_render_loop, src_queue_mask, dst_queue_mask, range, sample_locs); } else { radv_handle_color_image_transition(cmd_buffer, image, - src_layout, dst_layout, + src_layout, src_render_loop, + dst_layout, dst_render_loop, src_queue_mask, dst_queue_mask, range); } } struct radv_barrier_info { + enum rgp_barrier_reason reason; uint32_t eventCount; const VkEvent *pEvents; VkPipelineStageFlags srcStageMask; @@ -5351,6 +6216,8 @@ radv_barrier(struct radv_cmd_buffer *cmd_buffer, enum radv_cmd_flush_bits src_flush_bits = 0; enum radv_cmd_flush_bits dst_flush_bits = 0; + radv_describe_barrier_start(cmd_buffer, info->reason); + for (unsigned i = 0; i < info->eventCount; ++i) { RADV_FROM_HANDLE(radv_event, event, info->pEvents[i]); uint64_t va = radv_buffer_get_va(event->bo); @@ -5421,7 +6288,9 @@ radv_barrier(struct radv_cmd_buffer *cmd_buffer, radv_handle_image_transition(cmd_buffer, image, pImageMemoryBarriers[i].oldLayout, + false, /* Outside of a renderpass we are never in a renderloop */ pImageMemoryBarriers[i].newLayout, + false, /* Outside of a renderpass we are never in a renderloop */ pImageMemoryBarriers[i].srcQueueFamilyIndex, pImageMemoryBarriers[i].dstQueueFamilyIndex, &pImageMemoryBarriers[i].subresourceRange, @@ -5436,6 +6305,8 @@ radv_barrier(struct radv_cmd_buffer *cmd_buffer, si_cp_dma_wait_for_idle(cmd_buffer); cmd_buffer->state.flush_bits |= dst_flush_bits; + + radv_describe_barrier_end(cmd_buffer); } void radv_CmdPipelineBarrier( @@ -5453,6 +6324,7 @@ void radv_CmdPipelineBarrier( RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); struct radv_barrier_info info; + info.reason = RGP_BARRIER_EXTERNAL_CMD_PIPELINE_BARRIER; info.eventCount = 0; info.pEvents = NULL; info.srcStageMask = srcStageMask; @@ -5564,6 +6436,7 @@ void radv_CmdWaitEvents(VkCommandBuffer commandBuffer, RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); struct radv_barrier_info info; + info.reason = RGP_BARRIER_EXTERNAL_CMD_WAIT_EVENTS; info.eventCount = eventCount; info.pEvents = pEvents; info.srcStageMask = 0; @@ -5687,7 +6560,12 @@ void radv_CmdBindTransformFeedbackBuffersEXT( sb[idx].buffer = radv_buffer_from_handle(pBuffers[i]); sb[idx].offset = pOffsets[i]; - sb[idx].size = pSizes[i]; + + if (!pSizes || pSizes[i] == VK_WHOLE_SIZE) { + sb[idx].size = sb[idx].buffer->size - sb[idx].offset; + } else { + sb[idx].size = pSizes[i]; + } radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, sb[idx].buffer->bo); @@ -5733,9 +6611,15 @@ radv_set_streamout_enable(struct radv_cmd_buffer *cmd_buffer, bool enable) (so->enabled_mask << 8) | (so->enabled_mask << 12); - if ((old_streamout_enabled != so->streamout_enabled) || - (old_hw_enabled_mask != so->hw_enabled_mask)) + if (!cmd_buffer->device->physical_device->use_ngg_streamout && + ((old_streamout_enabled != so->streamout_enabled) || + (old_hw_enabled_mask != so->hw_enabled_mask))) radv_emit_streamout_enable(cmd_buffer); + + if (cmd_buffer->device->physical_device->use_ngg_streamout) { + cmd_buffer->gds_needed = true; + cmd_buffer->gds_oa_needed = true; + } } static void radv_flush_vgt_streamout(struct radv_cmd_buffer *cmd_buffer) @@ -5829,6 +6713,62 @@ radv_emit_streamout_begin(struct radv_cmd_buffer *cmd_buffer, radv_set_streamout_enable(cmd_buffer, true); } +static void +gfx10_emit_streamout_begin(struct radv_cmd_buffer *cmd_buffer, + uint32_t firstCounterBuffer, + uint32_t counterBufferCount, + const VkBuffer *pCounterBuffers, + const VkDeviceSize *pCounterBufferOffsets) +{ + struct radv_streamout_state *so = &cmd_buffer->state.streamout; + unsigned last_target = util_last_bit(so->enabled_mask) - 1; + struct radeon_cmdbuf *cs = cmd_buffer->cs; + uint32_t i; + + assert(cmd_buffer->device->physical_device->rad_info.chip_class >= GFX10); + assert(firstCounterBuffer + counterBufferCount <= MAX_SO_BUFFERS); + + /* Sync because the next streamout operation will overwrite GDS and we + * have to make sure it's idle. + * TODO: Improve by tracking if there is a streamout operation in + * flight. + */ + cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_VS_PARTIAL_FLUSH; + si_emit_cache_flush(cmd_buffer); + + for_each_bit(i, so->enabled_mask) { + int32_t counter_buffer_idx = i - firstCounterBuffer; + if (counter_buffer_idx >= 0 && counter_buffer_idx >= counterBufferCount) + counter_buffer_idx = -1; + + bool append = counter_buffer_idx >= 0 && + pCounterBuffers && pCounterBuffers[counter_buffer_idx]; + uint64_t va = 0; + + if (append) { + RADV_FROM_HANDLE(radv_buffer, buffer, pCounterBuffers[counter_buffer_idx]); + + va += radv_buffer_get_va(buffer->bo); + va += buffer->offset + pCounterBufferOffsets[counter_buffer_idx]; + + radv_cs_add_buffer(cmd_buffer->device->ws, cs, buffer->bo); + } + + radeon_emit(cs, PKT3(PKT3_DMA_DATA, 5, 0)); + radeon_emit(cs, S_411_SRC_SEL(append ? V_411_SRC_ADDR_TC_L2 : V_411_DATA) | + S_411_DST_SEL(V_411_GDS) | + S_411_CP_SYNC(i == last_target)); + radeon_emit(cs, va); + radeon_emit(cs, va >> 32); + radeon_emit(cs, 4 * i); /* destination in GDS */ + radeon_emit(cs, 0); + radeon_emit(cs, S_414_BYTE_COUNT_GFX9(4) | + S_414_DISABLE_WR_CONFIRM_GFX9(i != last_target)); + } + + radv_set_streamout_enable(cmd_buffer, true); +} + void radv_CmdBeginTransformFeedbackEXT( VkCommandBuffer commandBuffer, uint32_t firstCounterBuffer, @@ -5838,9 +6778,15 @@ void radv_CmdBeginTransformFeedbackEXT( { RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); - radv_emit_streamout_begin(cmd_buffer, - firstCounterBuffer, counterBufferCount, - pCounterBuffers, pCounterBufferOffsets); + if (cmd_buffer->device->physical_device->use_ngg_streamout) { + gfx10_emit_streamout_begin(cmd_buffer, + firstCounterBuffer, counterBufferCount, + pCounterBuffers, pCounterBufferOffsets); + } else { + radv_emit_streamout_begin(cmd_buffer, + firstCounterBuffer, counterBufferCount, + pCounterBuffers, pCounterBufferOffsets); + } } static void @@ -5895,6 +6841,47 @@ radv_emit_streamout_end(struct radv_cmd_buffer *cmd_buffer, radv_set_streamout_enable(cmd_buffer, false); } +static void +gfx10_emit_streamout_end(struct radv_cmd_buffer *cmd_buffer, + uint32_t firstCounterBuffer, + uint32_t counterBufferCount, + const VkBuffer *pCounterBuffers, + const VkDeviceSize *pCounterBufferOffsets) +{ + struct radv_streamout_state *so = &cmd_buffer->state.streamout; + struct radeon_cmdbuf *cs = cmd_buffer->cs; + uint32_t i; + + assert(cmd_buffer->device->physical_device->rad_info.chip_class >= GFX10); + assert(firstCounterBuffer + counterBufferCount <= MAX_SO_BUFFERS); + + for_each_bit(i, so->enabled_mask) { + int32_t counter_buffer_idx = i - firstCounterBuffer; + if (counter_buffer_idx >= 0 && counter_buffer_idx >= counterBufferCount) + counter_buffer_idx = -1; + + if (counter_buffer_idx >= 0 && pCounterBuffers && pCounterBuffers[counter_buffer_idx]) { + /* The array of counters buffer is optional. */ + RADV_FROM_HANDLE(radv_buffer, buffer, pCounterBuffers[counter_buffer_idx]); + uint64_t va = radv_buffer_get_va(buffer->bo); + + va += buffer->offset + pCounterBufferOffsets[counter_buffer_idx]; + + si_cs_emit_write_event_eop(cs, + cmd_buffer->device->physical_device->rad_info.chip_class, + radv_cmd_buffer_uses_mec(cmd_buffer), + V_028A90_PS_DONE, 0, + EOP_DST_SEL_TC_L2, + EOP_DATA_SEL_GDS, + va, EOP_DATA_GDS(i, 1), 0); + + radv_cs_add_buffer(cmd_buffer->device->ws, cs, buffer->bo); + } + } + + radv_set_streamout_enable(cmd_buffer, false); +} + void radv_CmdEndTransformFeedbackEXT( VkCommandBuffer commandBuffer, uint32_t firstCounterBuffer, @@ -5904,9 +6891,15 @@ void radv_CmdEndTransformFeedbackEXT( { RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); - radv_emit_streamout_end(cmd_buffer, - firstCounterBuffer, counterBufferCount, - pCounterBuffers, pCounterBufferOffsets); + if (cmd_buffer->device->physical_device->use_ngg_streamout) { + gfx10_emit_streamout_end(cmd_buffer, + firstCounterBuffer, counterBufferCount, + pCounterBuffers, pCounterBufferOffsets); + } else { + radv_emit_streamout_end(cmd_buffer, + firstCounterBuffer, counterBufferCount, + pCounterBuffers, pCounterBufferOffsets); + } } void radv_CmdDrawIndirectByteCountEXT( @@ -5946,6 +6939,8 @@ void radv_CmdWriteBufferMarkerAMD( si_emit_cache_flush(cmd_buffer); + ASSERTED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 12); + if (!(pipelineStage & ~VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT)) { radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0)); radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_IMM) | @@ -5965,4 +6960,6 @@ void radv_CmdWriteBufferMarkerAMD( va, marker, cmd_buffer->gfx9_eop_bug_va); } + + assert(cmd_buffer->cs->cdw <= cdw_max); }